hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24fa87e32337391171ba0bdb801e62259d0c5966
| 2,535
|
py
|
Python
|
parsl/dataflow/task_status_poller.py
|
jameshcorbett/parsl
|
2475a4c5743f3336967c5fe48b84f497336780fe
|
[
"Apache-2.0"
] | 1
|
2020-11-21T17:32:09.000Z
|
2020-11-21T17:32:09.000Z
|
parsl/dataflow/task_status_poller.py
|
jameshcorbett/parsl
|
2475a4c5743f3336967c5fe48b84f497336780fe
|
[
"Apache-2.0"
] | null | null | null |
parsl/dataflow/task_status_poller.py
|
jameshcorbett/parsl
|
2475a4c5743f3336967c5fe48b84f497336780fe
|
[
"Apache-2.0"
] | 1
|
2022-03-09T10:51:12.000Z
|
2022-03-09T10:51:12.000Z
|
import logging
import parsl # noqa F401 (used in string type annotation)
import time
from typing import Dict, Sequence
from typing import List # noqa F401 (used in type annotation)
from parsl.dataflow.executor_status import ExecutorStatus
from parsl.dataflow.job_error_handler import JobErrorHandler
from parsl.dataflow.strategy import Strategy
from parsl.executors.base import ParslExecutor
from parsl.providers.provider_base import JobStatus, JobState
logger = logging.getLogger(__name__)
class PollItem(ExecutorStatus):
def __init__(self, executor: ParslExecutor):
self._executor = executor
self._interval = executor.status_polling_interval
self._last_poll_time = 0.0
self._status = {} # type: Dict[object, JobStatus]
def _should_poll(self, now: float):
return now >= self._last_poll_time + self._interval
def poll(self, now: float):
if self._should_poll(now):
self._status = self._executor.status()
self._last_poll_time = now
@property
def status(self) -> Dict[object, JobStatus]:
return self._status
@property
def executor(self) -> ParslExecutor:
return self._executor
def scale_in(self, n):
ids = self._executor.scale_in(n)
if ids is not None:
for id in ids:
del self._status[id]
return ids
def scale_out(self, n):
ids = self._executor.scale_out(n)
if ids is not None:
for id in ids:
self._status[id] = JobStatus(JobState.PENDING)
return ids
def __repr__(self):
return self._status.__repr__()
class TaskStatusPoller(object):
def __init__(self, dfk: "parsl.dataflow.dflow.DataFlowKernel"):
self._poll_items = [] # type: List[PollItem]
self._strategy = Strategy(dfk)
self._error_handler = JobErrorHandler()
def poll(self, tasks=None, kind=None):
self._update_state()
self._error_handler.run(self._poll_items)
self._strategy.strategize(self._poll_items, tasks)
def _update_state(self):
now = time.time()
for item in self._poll_items:
item.poll(now)
def add_executors(self, executors: Sequence[ParslExecutor]):
for executor in executors:
if executor.status_polling_interval > 0:
logger.debug("Adding executor {}".format(executor.label))
self._poll_items.append(PollItem(executor))
self._strategy.add_executors(executors)
| 32.088608
| 73
| 0.666667
|
c1c32cae7b8de910663d67389397d31542b26a07
| 50,801
|
py
|
Python
|
diameter/node/Node.py
|
netch80/i1dk-PythonDiameter
|
ddc6cf76269379105f260c2ecab2c6930d7f32cf
|
[
"Zlib"
] | null | null | null |
diameter/node/Node.py
|
netch80/i1dk-PythonDiameter
|
ddc6cf76269379105f260c2ecab2c6930d7f32cf
|
[
"Zlib"
] | null | null | null |
diameter/node/Node.py
|
netch80/i1dk-PythonDiameter
|
ddc6cf76269379105f260c2ecab2c6930d7f32cf
|
[
"Zlib"
] | null | null | null |
import socket
import threading
import time
import diameter.ProtocolConstants
from diameter.node.NodeSettings import NodeSettings
from diameter.node.NodeState import NodeState
from diameter.node.Peer import Peer
from diameter.node.Connection import Connection
from diameter.node.ConnectionTimers import ConnectionTimers
from diameter.node.Capability import Capability
from diameter import *
from diameter.node.Error import *
import struct
import xdrlib
import select
import errno
import logging
class SelectThread(threading.Thread):
def __init__(self,node):
threading.Thread.__init__(self,name="Diameter node thread")
self.node=node
def run(self):
self.node.run_select()
class ReconnectThread(threading.Thread):
def __init__(self,node):
threading.Thread.__init__(self,name="Diameter node reconnect thread")
self.node=node
def run(self):
self.node.run_reconnect()
class Node:
"""A Diameter node
The Node class manages diameter transport connections and peers. It handles
the low-level messages itself (CER/CEA/DPR/DPA/DWR/DWA). The rest is sent to
the a message dispatcher. When connections are established or closed the
connection listener is notified. Message can be sent and received through
the node but no state is maintained per message.
Node is quite low-level. You probably want to use NodeManager instead.
"""
def __init__(self,message_dispatcher,connection_listener,settings):
"""
Constructor for Node.
Constructs a Node instance with the specified parameters.
The node is not automatically started.
message_dispatcher A message dispatcher. Must have a member
handle_message(msg,connkey,peer)
connection_listener A connection observer. Can be null. Must have a
member handle_connection(connkey,peer,up_down)
settings The node settings.
"""
self.message_dispatcher = message_dispatcher
self.connection_listener = connection_listener
self.settings = settings
self.node_state = NodeState()
self.map_key_conn_lock = threading.Lock()
self.map_key_conn_cv = threading.Condition(self.map_key_conn_lock)
self.obj_conn_wait = threading.Condition()
self.fd_pipe = socket.socketpair()
self.persistent_peers = set([])
self.persistent_peers_lock = threading.Lock()
self.node_thread = None
self.reconnect_thread = None
self.map_key_conn = {}
self.map_fd_conn = {}
self.logger = logging.getLogger("dk.i1.diameter.node")
def start(self):
"""
Start the node.
The node is started. If the port to listen on is already used by
another application or some other initial network error occurs a
StartError is raised.
"""
self.logger.log(logging.INFO,"Starting Diameter node")
self.please_stop = False
self.shutdown_deadline = None
self.__prepare()
self.node_thread = SelectThread(self)
self.node_thread.setDaemon(True)
self.node_thread.start()
self.reconnect_thread = ReconnectThread(self)
self.reconnect_thread.setDaemon(True)
self.reconnect_thread.start()
self.logger.log(logging.INFO,"Diameter node started")
def stop(self,grace_time=0):
"""
Stop the node.
All connections are closed. A DPR is sent to the each connected
peer unless the transport connection's buffers are full.
Threads waiting in Node.waitForConnection() are woken.
Graceful connection close is not guaranteed in all cases.
grace_time Maximum time to wait for connections to close gracefully.
"""
self.logger.log(logging.INFO,"Stopping Diameter node")
with self.map_key_conn_cv:
self.shutdown_deadline = time.time() + grace_time
self.please_stop = True
for connkey in self.map_key_conn.keys():
conn = self.map_key_conn[connkey]
if conn.state==Connection.state_connecting or \
conn.state==Connection.state_connected_in or \
conn.state==Connection.state_connected_out:
self.logger.log(logging.INFO,"Closing connection to %s because were are shutting down"%conn.host_id)
del self.map_fd_conn[conn.fd.fileno()]
del self.map_key_conn[connkey]
conn.fd.close()
elif conn.state==Connection.state_tls:
pass #todo
elif conn.state==Connection.state_ready:
#self.__sendDPR(conn,ProtocolConstants.DI_DISCONNECT_CAUSE_REBOOTING)
#self.__closeConnection_unlocked(conn,True)
self.__initiateConnectionClose(conn,ProtocolConstants.DI_DISCONNECT_CAUSE_REBOOTING)
elif conn.state==Connection.state_closing:
pass #nothing to do
elif conn.state==Connection.state_closed:
pass #nothing to do
self.__wakeSelectThread()
with self.map_key_conn_cv:
self.map_key_conn_cv.notify()
self.node_thread.join()
self.node_thread = None
self.reconnect_thread.join()
self.reconnect_thread = None
if self.sock_listen:
self.sock_listen.close()
self.sock_listen = None
self.map_key_conn = {}
self.map_fd_conn = {}
self.logger.log(logging.INFO,"Diameter node stopped")
def __prepare(self):
if self.settings.port!=0:
sock_listen = None
for addr in socket.getaddrinfo(None, self.settings.port, 0, socket.SOCK_STREAM,socket.IPPROTO_TCP, socket.AI_PASSIVE):
try:
sock_listen = socket.socket(addr[0], addr[1], addr[2])
except socket.error:
#most likely error: server has IPv6 capability, but IPv6 not enabled locally
sock_listen = None
continue
try:
sock_listen.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,struct.pack("i",1))
sock_listen.bind(addr[4])
sock_listen.listen(10)
except socket.error:
#most likely error: IPv6 enabled, but no interfaces has IPv6 address(es)
sock_listen.close()
sock_listen = None
continue
#It worked...
break
if not sock_listen:
raise StartError("Could not create listen-socket")
self.sock_listen = sock_listen
else:
self.sock_listen = None
self.map_key_conn = {}
def __anyReadyConnection(self):
rc=False
with self.map_key_conn_lock:
for conn in self.map_key_conn.itervalues():
if conn.state==Connection.state_ready:
rc=True
break
return rc
def waitForConnection(self,timeout=None):
"""Wait until at least one connection has been established or until the timeout expires.
Waits until at least one connection to a peer has been established
and capability-exchange has finished, or the specified timeout has expired.
timeout The maximum time to wait in milliseconds.
"""
with self.obj_conn_wait:
if timeout==None:
while not self.__anyReadyConnection():
self.obj_conn_wait.wait(10)
else:
now = time.time()
end = now+timeout
while not self.__anyReadyConnection():
now = time.time()
w = end - now
if w<0.0: break
self.obj_conn_wait.wait(w)
def findConnection(self,peer):
"""Returns the connection key for a peer.
Returns: The connection key. None if there is no connection to the peer.
"""
self.logger.log(logging.DEBUG,"Finding '" + peer.host +"'")
connkey=None
with self.map_key_conn_lock:
for connkey2,conn in self.map_key_conn.iteritems():
if conn.peer and conn.peer==peer:
connkey=connkey2
break
if not connkey:
self.logger.log(logging.DEBUG,peer.host+" NOT found")
return connkey
def isConnectionKeyValid(self,connkey):
"""Returns if the connection is still valid.
This method is usually only of interest to programs that do lengthy
processing of requests nad are located in a poor network. It is
usually much easier to just call sendMessage() and catch the
exception if the connection has gone stale.
"""
with self.map_key_conn_lock:
rc=connkey in self.map_key_conn
return rc
def connectionKey2Peer(self,connkey):
with self.map_key_conn_lock:
try:
peer = self.map_key_conn[connkey].peer
except KeyError:
peer = None
return peer
def connectionKey2InetAddress(self,connkey):
with self.map_key_conn_lock:
try:
conn = self.map_key_conn[connkey]
except KeyError:
return None
a = conn.fd.getpeername()
return a
def nextHopByHopIdentifier(self,connkey):
"Returns the next hop-by-hop identifier for a connection"
with self.map_key_conn_lock:
try:
conn = self.map_key_conn[connkey]
except KeyError:
raise StaleConnectionError()
rc = conn.nextHopByHopIdentifier()
return rc
def sendMessage(self,msg,connkey):
"""Send a message.
Send the specified message on the specified connection.
msg The message to be sent
connkey The connection to use. If the connection has been closed in
the meantime StaleConnectionError is thrown.
"""
with self.map_key_conn_lock:
try:
conn = self.map_key_conn[connkey]
except KeyError:
raise StaleConnectionError()
if conn.state!=Connection.state_ready:
raise StaleConnectionError()
self.__sendMessage_unlocked(msg,conn)
def __sendMessage_unlocked(self,msg,conn):
self.logger.log(logging.DEBUG, "command=%d, to=%s" % (
msg.hdr.command_code,
conn.peer.host if conn.peer else '(WRONG_PEER)'))
p = xdrlib.Packer()
msg.encode(p)
raw = p.get_buffer()
self.__hexDump(logging.DEBUG,"Sending to "+conn.host_id,raw)
was_empty = not conn.hasNetOutput()
conn.appendAppOutputBuffer(raw)
conn.processAppOutBuffer()
if was_empty:
self.__handleWritable(conn)
if conn.hasNetOutput():
# still some output. Wake select thread so it re-evaluates fdsets
self.__wakeSelectThread()
def initiateConnection(self,peer,persistent=False):
"""Initiate a connection to a peer.
A connection (if not already present) will be initiated to the peer.
On return, the connection is probably not established and it may
take a few seconds before it is. It is safe to call multiple times.
If <code>persistent</code> true then the peer is added to a list of
persistent peers and if the connection is lost it will automatically
be re-established. There is no way to change a peer from persistent
to non-persistent.
If/when the connection has been established and capability-exchange
has finished threads waiting in {@link #waitForConnection} are woken.
peer The peer that the node should try to establish a connection to.
persistent If true the Node wil try to keep a connection open to the peer.
"""
if persistent:
with self.persistent_peers_lock:
self.persistent_peers.add(peer)
with self.map_key_conn_lock:
for conn in self.map_key_conn.itervalues():
if conn.peer and \
conn.peer == peer:
#already has a connection to that peer
return
#what if we are connecting and the host_id matches?
#look up the ip-address first without the large mutex held
#We should really try all the possible addresses...
try:
ai = socket.getaddrinfo(peer.host,peer.port,0,socket.SOCK_STREAM,socket.IPPROTO_TCP)
except socket.gaierror as ex:
self.logger.log(logging.INFO,"getaddrinfo(%s/%d) failed"%(peer.host,peer.port),exc_info=ex)
return
self.logger.log(logging.INFO,"Initiating connection to '" + peer.host +"'")
conn = Connection()
conn.host_id = peer.host
conn.peer = peer
try:
fd = socket.socket(ai[0][0], ai[0][1], ai[0][2])
fd.setblocking(False)
fd.connect(ai[0][4])
except socket.error as ex:
(err,errstr) = ex.args
if err!=errno.EINPROGRESS:
#real error
self.logger.log(logging.ERROR,"socket() or connect() failed: %s"%errstr,exc_info=err)
return
conn.state = Connection.state_connecting
else:
self.logger.log(logging.DEBUG,"Connection to %s succeeded immediately"%peer.host)
conn.state = Connection.state_connected_out
conn.fd = fd
with self.map_key_conn_lock:
self.map_key_conn[conn.key] = conn
self.map_fd_conn[conn.fd.fileno()] = conn
if conn.state == Connection.state_connected_out:
self.__sendCER(conn)
else:
self.__wakeSelectThread()
def run_select(self):
if self.sock_listen:
self.sock_listen.setblocking(False)
while True:
if self.please_stop:
if time.time()>=self.shutdown_deadline:
break
with self.map_key_conn_lock:
isempty = len(self.map_key_conn)==0
if isempty:
break
#build FD sets
with self.map_key_conn_lock:
iwtd=[]
owtd=[]
for conn in self.map_key_conn.itervalues():
if conn.state!=Connection.state_closed:
iwtd.append(conn.fd)
if conn.hasNetOutput() or conn.state == Connection.state_connecting:
owtd.append(conn.fd)
if self.sock_listen:
iwtd.append(self.sock_listen)
iwtd.append(self.fd_pipe[0])
#calc timeout
timeout = self.__calcNextTimeout()
#do select
if timeout:
now=time.time()
if timeout>now:
ready_fds = select.select(iwtd,owtd,[],timeout - now)
else:
ready_fds = select.select(iwtd,owtd,[])
else:
ready_fds = select.select(iwtd,owtd,[])
for fd in ready_fds[0]:
if fd==self.sock_listen:
#accept
self.logger.log(logging.DEBUG,"Got an inbound connection (key is acceptable)")
client = self.sock_listen.accept()
if client:
self.logger.log(logging.INFO,"Got an inbound connection from %s on %d"%(str(client[1]),client[0].fileno()))
if not self.please_stop:
conn = Connection()
conn.fd = client[0]
conn.fd.setblocking(False)
conn.host_id = client[1][0]
conn.state = Connection.state_connected_in
with self.map_key_conn_lock:
self.map_key_conn[conn.key] = conn
self.map_fd_conn[conn.fd.fileno()] = conn
else:
#We don't want to add the connection if were are shutting down.
client[0].close()
else:
self.logger.log(logging.DEBUG,"Spurious wakeup on listen socket")
elif fd==self.fd_pipe[0]:
self.logger.log(logging.DEBUG,"wake-up pipe ready")
self.fd_pipe[0].recv(16)
else:
#readable
self.logger.log(logging.DEBUG,"fd is readable")
with self.map_key_conn_lock:
conn = self.map_fd_conn[fd.fileno()]
self.__handleReadable(conn)
if conn.state==Connection.state_closed:
#remove from ready_fds[1] to avoid silly exception in fileno() call
for i in range(0,len(ready_fds[1])):
if ready_fds[1][i]==conn.fd:
del ready_fds[1][i]
break
for fd in ready_fds[1]:
with self.map_key_conn_lock:
conn = self.map_fd_conn[fd.fileno()]
if conn.state==Connection.state_connecting:
#connection status ready
self.logger.log(logging.DEBUG,"An outbound connection is ready (key is connectable)")
err = fd.getsockopt(socket.SOL_SOCKET,socket.SO_ERROR)
if err==0:
self.logger.log(logging.DEBUG,"Connected!")
conn.state = Connection.state_connected_out
self.__sendCER(conn)
else:
self.logger.log(logging.WARNING,"Connection to '"+conn.host_id+"' failed")
fd.close()
del self.map_key_conn[conn.key]
del self.map_fd_conn[fd.fileno()]
else:
#plain writable
self.logger.log(logging.DEBUG,"fd is writable")
self.__handleWritable(conn)
self.__runTimers()
#close all connections
self.logger.log(logging.DEBUG,"Closing all transport connections")
with self.map_key_conn_lock:
for connkey in self.map_key_conn.keys():
conn = self.map_key_conn[connkey]
self.__closeConnection_unlocked(conn,True)
def __wakeSelectThread(self):
self.fd_pipe[1].send("d")
def __calcNextTimeout(self):
timeout = None
with self.map_key_conn_lock:
for conn in self.map_key_conn.itervalues():
ready = (conn.state==Connection.state_ready)
conn_timeout = conn.timers.calcNextTimeout(ready)
if conn_timeout and ((not timeout) or conn_timeout<timeout):
timeout = conn_timeout
if self.please_stop:
if (not timeout) or self.shutdown_deadline<timeout:
timeout = self.shutdown_deadline
return timeout
def __runTimers(self):
with self.map_key_conn_lock:
for connkey in self.map_key_conn.keys():
conn = self.map_key_conn[connkey]
ready = (conn.state==Connection.state_ready)
action=conn.timers.calcAction(ready)
if action==ConnectionTimers.timer_action_none:
pass
elif action==ConnectionTimers.timer_action_disconnect_no_cer:
self.logger.log(logging.WARNING,"Disconnecting due to no CER/CEA")
self.__closeConnection_unlocked(conn)
elif action==ConnectionTimers.timer_action_disconnect_idle:
self.logger.log(logging.WARNING,"Disconnecting due to idle")
#busy is the closest thing to "no traffic for a long time. No point in keeping the connection"
self.__sendDPR(conn,ProtocolConstants.DI_DISCONNECT_CAUSE_BUSY)
self.__closeConnection_unlocked(conn)
elif action==ConnectionTimers.timer_action_disconnect_no_dw:
self.logger.log(logging.WARNING,"Disconnecting due to no DWA")
self.__closeConnection_unlocked(conn)
elif action==ConnectionTimers.timer_action_dwr:
self.__sendDWR(conn)
def run_reconnect(self):
while True:
with self.map_key_conn_cv:
if self.please_stop:
break
self.map_key_conn_cv.wait(30.0)
with self.persistent_peers_lock:
for pp in self.persistent_peers:
self.initiateConnection(pp,False)
def __handleReadable(self,conn):
self.logger.log(logging.DEBUG,"handlereadable()...")
try:
stuff = conn.fd.recv(32768)
except socket.error as ex:
(err,errstr) = ex.args
if isTransientError(err):
#Not a real error
self.logger.log(logging.DEBUG,"recv() failed, err=%d, errstr=%s"%(err,errstr))
return
#hard error
self.logger.log(logging.INFO,"recv() failed, err=%d, errstr=%s"%(err,errstr))
self.__closeConnection(conn)
return
if len(stuff)==0:
#peer closed connection
self.logger.log(logging.DEBUG,"Read 0 bytes from peer")
self.__closeConnection(conn)
return
conn.appendNetInBuffer(stuff)
conn.processNetInBuffer()
self.__processInBuffer(conn)
def __hexDump(self,level,msg,raw):
if not self.logger.isEnabledFor(level): return
#todo: there must be a faster way of doing this...
s=msg+'\n'
for i in range(0,len(raw),16):
l = "%04x " % i
for j in range(i,i+16):
if (j % 4)==0:
l += " "
if j<len(raw):
l += "%02x" % ord(raw[j])
else:
l += ' '
l += " "
for j in range(i,min(i+16,len(raw))):
b = ord(raw[j])
if b>=32 and b<127:
l += raw[j]
else:
l += '.'
s += l + '\n'
self.logger.log(level,s)
def __processInBuffer(self,conn):
self.logger.log(logging.DEBUG,"Node.__processInBuffer()")
raw = conn.getAppInBuffer()
self.logger.log(logging.DEBUG,"len(raw)=%d"%len(raw))
u = xdrlib.Unpacker(raw)
while u.get_position()<len(raw):
msg_start = u.get_position()
bytes_left = len(raw)-msg_start
#print " msg_start=",msg_start," bytes_left=",bytes_left
if bytes_left<4:
break
msg_size = Message.decodeSize(u)
if bytes_left<msg_size:
break
msg = Message()
status = msg.decode(u,msg_size)
#print " state=",status
if status==Message.decode_status_decoded:
self.__hexDump(logging.DEBUG,"Got message "+conn.host_id,raw[msg_start:msg_start+msg_size])
b = self.__handleMessage(msg,conn)
if not b:
self.logger.log(logging.DEBUG,"handle error")
self.__closeConnection(conn)
return
elif status==Message.decode_status_not_enough:
break #?
elif status==Message.decode_status_garbage:
self.__hexDump(logging.WARNING,"Garbage from "+conn.host_id,raw[msg_start:msg_start+msg_size])
#self.__hexDump(logging.INFO,"Complete inbuffer: ",raw,0,raw_bytes)
self.__closeConnection(conn,reset=True)
return
conn.consumeAppInBuffer(u.get_position())
def __handleWritable(self,conn):
self.logger.log(logging.DEBUG,"__handleWritable():")
raw = conn.getNetOutBuffer()
if len(raw)==0: return
try:
bytes_sent = conn.fd.send(raw)
except socket.error as ex:
(err,errstr) = ex.args
if isTransientError(err):
#Not a real error
self.logger.log(logging.DEBUG,"send() failed, err=%d, errstr=%s"%(err,errstr))
return
#hard error
self.logger.log(logging.INFO,"send() failed, err=%d, errstr=%s"%(err,errstr))
self.__closeConnection_unlocked(conn)
return
conn.consumeNetOutBuffer(bytes_sent)
def __closeConnection_unlocked(self,conn,reset=False):
if conn.state==Connection.state_closed:
return
del self.map_key_conn[conn.key]
del self.map_fd_conn[conn.fd.fileno()]
if reset:
#Set lingertime to zero to force a RST when closing the socket
#rfc3588, section 2.1
conn.fd.setsockopt(socket.SOL_SOCKET,socket.SO_LINGER,struct.pack("ii",1,0))
pass
conn.fd.close()
if self.connection_listener:
self.connection_listener.handle_connection(conn.key,conn.peer,False)
conn.state = Connection.state_closed
def __closeConnection(self,conn,reset=False):
self.logger.log(logging.INFO,"Closing connection to " + conn.host_id)
with self.map_key_conn_lock:
self.__closeConnection_unlocked(conn,reset)
self.logger.log(logging.DEBUG,"Closed connection to " + conn.host_id)
def __initiateConnectionClose(self,conn,why):
if conn.state!=Connection.state_ready:
return #Should probably never happen
self.__sendDPR(conn,why)
conn.state = Connection.state_closing
def __handleMessage(self,msg,conn):
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.log(logging.DEBUG,"command_code=%d application_id=%d connection_state=%d"%(msg.hdr.command_code,msg.hdr.application_id,conn.state))
conn.timers.markActivity()
if conn.state==Connection.state_connected_in:
#only CER allowed
if (not msg.hdr.isRequest()) or \
msg.hdr.command_code!=ProtocolConstants.DIAMETER_COMMAND_CAPABILITIES_EXCHANGE or \
msg.hdr.application_id!=ProtocolConstants.DIAMETER_APPLICATION_COMMON:
self.logger.log(logging.WARNING,"Got something that wasn't a CER")
return False
conn.timers.markRealActivity()
return self.__handleCER(msg,conn)
elif conn.state==Connection.state_connected_out:
#only CEA allowed
if msg.hdr.isRequest() or \
msg.hdr.command_code!=ProtocolConstants.DIAMETER_COMMAND_CAPABILITIES_EXCHANGE or \
msg.hdr.application_id!=ProtocolConstants.DIAMETER_APPLICATION_COMMON:
self.logger.log(logging.WARNING,"Got something that wasn't a CEA")
return False
conn.timers.markRealActivity()
return self.__handleCEA(msg,conn)
else:
if msg.hdr.command_code==ProtocolConstants.DIAMETER_COMMAND_CAPABILITIES_EXCHANGE:
self.logger.log(logging.WARNING,"Got CER from "+conn.host_id+" after initial capability-exchange")
#not allowed in this state
return False
elif msg.hdr.command_code==ProtocolConstants.DIAMETER_COMMAND_DEVICE_WATCHDOG:
if msg.hdr.isRequest():
return self.__handleDWR(msg,conn)
else:
return self.__handleDWA(msg,conn)
elif msg.hdr.command_code==ProtocolConstants.DIAMETER_COMMAND_DISCONNECT_PEER:
if msg.hdr.isRequest():
return self.__handleDPR(msg,conn)
else:
return self.__handleDPA(msg,conn)
else:
conn.timers.markRealActivity()
if msg.hdr.isRequest():
if self.__isLoopedMessage(msg):
self.__rejectLoopedRequest(msg,conn)
return True
if not self.isAllowedApplication(msg,conn.peer):
self.__rejectDisallowedRequest(msg,conn)
return True
if not self.message_dispatcher.handle_message(msg,conn.key,conn.peer):
if msg.hdr.isRequest():
return self.__handleUnknownRequest(msg,conn)
else:
return True #unusual, but not impossible
else:
return True
def __isLoopedMessage(self,msg):
#6.1.3
for a in msg.subset(ProtocolConstants.DI_ROUTE_RECORD):
avp = AVP_UTF8String.narrow(a)
if avp.queryValue() == self.settings.host_id:
return True
return False
def __rejectLoopedRequest(self,msg,conn):
self.logger.log(logging.WARNING,"Rejecting looped request from %s (command=%d)"%(conn.peer.host,msg.hdr.command_code))
self.__rejectRequest(msg,conn,ProtocolConstants.DIAMETER_RESULT_LOOP_DETECTED)
def isAllowedApplication(self,msg,peer):
"""Determine if a message is supported by a peer.
The auth-application-id, acct-application-id or
vendor-specific-application AVP is extracted and tested against the
peer's capabilities.
msg The message
peer The peer
Returns: True if the peer should be able to handle the message.
"""
try:
avp = msg.find(ProtocolConstants.DI_AUTH_APPLICATION_ID)
if avp:
app = AVP_Unsigned32.narrow(avp).queryValue()
self.logger.log(logging.DEBUG,"auth-application-id=%d"%app)
return peer.capabilities.isAllowedAuthApp(app)
avp = msg.find(ProtocolConstants.DI_ACCT_APPLICATION_ID)
if avp:
app = AVP_Unsigned32.narrow(avp).queryValue()
self.logger.log(logging.DEBUG,"acct-application-id=%d"%app)
return peer.capabilities.isAllowedAcctApp(app)
avp = msg.find(ProtocolConstants.DI_VENDOR_SPECIFIC_APPLICATION_ID)
if avp:
g = AVP_Grouped.narrow(avp).getAVPs()
if len(g)==2 and \
g[0].code==ProtocolConstants.DI_VENDOR_ID:
vendor_id = AVP_Unsigned32.narrow(g[0]).queryValue()
app = AVP_Unsigned32.narrow(g[1]).queryValue()
self.logger.log(logging.DEBUG,"vendor-id=%d, app=%d"%(vendor_id,app))
if g[1].code==ProtocolConstants.DI_AUTH_APPLICATION_ID:
return peer.capabilities.isAllowedVendorAuthApp(vendor_id,app)
if g[1].code==ProtocolConstants.DI_ACCT_APPLICATION_ID:
return peer.capabilities.isAllowedVendorAcctApp(vendor_id,app)
return False
self.logger.log(logging.WARNING,"No auth-app-id, acct-app-id nor vendor-app in packet")
except InvalidAVPLengthError as ex:
self.logger.log(logging.INFO,"Encountered invalid AVP length while looking at application-id",exc_info=ex)
return False
def __rejectDisallowedRequest(self,msg,conn):
self.logger.log(logging.WARNING,"Rejecting request from " + conn.peer.host + " (command=" + msg.hdr.command_code + ") because it is not allowed.")
self.__rejectRequest(msg,conn,ProtocolConstants.DIAMETER_RESULT_APPLICATION_UNSUPPORTED)
def __rejectRequest(self,msg,conn,result_code):
response = Message()
response.prepareResponse(msg)
response.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, result_code))
self.addOurHostAndRealm(response)
Utils.copyProxyInfo(msg,response)
Utils.setMandatory_RFC3588(response)
self.sendMessage(response,conn.key)
def addOurHostAndRealm(self,msg):
"""Add origin-host and origin-realm to a message.
The configured host and realm is added to the message as origin-host
and origin-realm AVPs
"""
msg.append(AVP_UTF8String(ProtocolConstants.DI_ORIGIN_HOST,self.settings.host_id))
msg.append(AVP_UTF8String(ProtocolConstants.DI_ORIGIN_REALM,self.settings.realm))
def nextEndToEndIdentifier(self):
"""Returns an end-to-end identifier that is unique.
The initial value is generated as described in RFC 3588 section 3 page 34.
"""
return self.node_state.nextEndToEndIdentifier()
def __doElection(self,cer_host_id):
#5.6.4
c = cmp(self.settings.host_id,cer_host_id)
if c==0:
#this is a misconfigured peer or ourselves.
return False
close_other_connection = c>0
rc = True
with self.map_key_conn_cv:
for connkey,conn in self.map_key_conn.iteritems():
if conn.host_id and conn.host_id==cer_host_id:
if close_other_connection:
self.__closeConnection(conn)
rc = True
break
else:
rc = False #close this one
break
return rc
def __handleCER(self,msg,conn):
self.logger.log(logging.DEBUG,"CER received from " + conn.host_id)
#Handle election
avp = msg.find(ProtocolConstants.DI_ORIGIN_HOST)
if not avp:
#Origin-Host-Id is missing
error_response = Message()
error_response.prepareResponse(msg)
error_response.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_MISSING_AVP))
self.addOurHostAndRealm(error_response)
error_response.append(AVP_FailedAVP(AVP_UTF8String(ProtocolConstants.DI_ORIGIN_HOST,"")))
Utils.setMandatory_RFC3588(error_response)
self.__sendMessage_unlocked(error_response,conn)
return False
host_id = AVP_UTF8String.narrow(avp).queryValue()
self.logger.log(logging.DEBUG,"Peer's origin-host-id is " + host_id)
if not self.__doElection(host_id):
error_response = Message()
error_response.prepareResponse(msg)
error_response.append(AVP_Unsigned32(ProtocolConstants.DIAMETER_RESULT_ELECTION_LOST, ProtocolConstants.DIAMETER_RESULT_MISSING_AVP))
self.addOurHostAndRealm(error_response)
Utils.setMandatory_RFC3588(error_response)
self.__sendMessage_unlocked(error_response,conn)
return False
conn.peer = Peer(socket_address=conn.fd.getpeername())
conn.peer.host = host_id
conn.host_id = host_id
if self.__handleCEx(msg,conn):
#todo: check inband-security
cea = Message()
cea.prepareResponse(msg)
#Result-Code
cea.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_SUCCESS))
self.__addCEStuff(cea,conn.peer.capabilities,conn)
self.logger.log(logging.INFO,"Connection to " +conn.host_id + " is now ready")
Utils.setMandatory_RFC3588(cea)
self.__sendMessage_unlocked(cea,conn)
conn.state=Connection.state_ready
if self.connection_listener:
self.connection_listener.handle_connection(conn.key, conn.peer, True)
with self.obj_conn_wait:
self.obj_conn_wait.notifyAll()
return True
else:
return False
def __handleCEA(self,msg,conn):
self.logger.log(logging.DEBUG,"CEA received from "+conn.host_id)
avp = msg.find(ProtocolConstants.DI_ORIGIN_HOST)
if not avp:
self.logger.log(logging.WARNING,"Peer did not include origin-host-id in CEA")
return False
host_id = AVP_UTF8String.narrow(avp).queryValue()
self.logger.log(logging.DEBUG,"Node:Peer's origin-host-id is '"+host_id+"'")
conn.peer = Peer(socket_address=conn.fd.getpeername())
conn.peer.host = host_id
conn.host_id = host_id
rc = self.__handleCEx(msg,conn)
if rc:
conn.state=Connection.state_ready
self.logger.log(logging.INFO,"Connection to " +conn.host_id + " is now ready")
if self.connection_listener:
self.connection_listener.handle_connection(conn.key, conn.peer, True)
with self.obj_conn_wait:
self.obj_conn_wait.notifyAll()
return True
else:
return False
def __handleCEx(self,msg,conn):
self.logger.log(logging.DEBUG,"Processing CER/CEA")
#calculate capabilities and allowed applications
try:
reported_capabilities = Capability()
for a in msg.subset(ProtocolConstants.DI_SUPPORTED_VENDOR_ID):
vendor_id = AVP_Unsigned32.narrow(a).queryValue()
self.logger.log(logging.DEBUG,"peer supports vendor %d"%vendor_id)
reported_capabilities.addSupportedVendor(vendor_id)
for a in msg.subset(ProtocolConstants.DI_AUTH_APPLICATION_ID):
app = AVP_Unsigned32.narrow(a).queryValue()
self.logger.log(logging.DEBUG,"peer supports auth-app %d"%app)
if app!=ProtocolConstants.DIAMETER_APPLICATION_COMMON:
reported_capabilities.addAuthApp(app)
for a in msg.subset(ProtocolConstants.DI_ACCT_APPLICATION_ID):
app = AVP_Unsigned32.narrow(a).queryValue()
self.logger.log(logging.DEBUG,"peer supports acct-app %d"%app)
if app!=ProtocolConstants.DIAMETER_APPLICATION_COMMON:
reported_capabilities.addAcctApp(app)
for a in msg.subset(ProtocolConstants.DI_VENDOR_SPECIFIC_APPLICATION_ID):
ag = AVP_Grouped.narrow(a)
g = ag.getAVPs()
if len(g)>=2 and len(g)<=3:
#Some non-compliant implementations add both
#auth-application-id and acct-application-id,
#probably due to a weakly ambiguous 6.11 in rfc3588
vendor_id = None
auth_app_id = None
acct_app_id = None
for ga in g:
if ga.code==ProtocolConstants.DI_VENDOR_ID:
vendor_id = AVP_Unsigned32.narrow(ga).queryValue()
elif ga.code==ProtocolConstants.DI_AUTH_APPLICATION_ID:
auth_app_id = AVP_Unsigned32.narrow(ga).queryValue()
elif ga.code==ProtocolConstants.DI_ACCT_APPLICATION_ID:
acct_app_id = AVP_Unsigned32.narrow(ga).queryValue()
else:
raise InvalidAVPValueError(a)
if (not vendor_id) or not (auth_app_id or acct_app_id):
raise InvalidAVPValueError(a)
if auth_app_id!=None:
reported_capabilities.addVendorAuthApp(vendor_id,auth_app_id)
if acct_app_id!=None:
reported_capabilities.addVendorAcctApp(vendor_id,acct_app_id)
else:
raise InvalidAVPValueError(a)
result_capabilities = Capability.calculateIntersection(self.settings.capabilities, reported_capabilities)
if self.logger.isEnabledFor(logging.DEBUG):
s = ""
for i in result_capabilities.supported_vendor:
s += " supported_vendor %d\n"%i
for i in result_capabilities.auth_app:
s += " auth_app %d\n"%i
for i in result_capabilities.acct_app:
s += " acct_app %d\n"%i
self.logger.log(logging.DEBUG,"Resulting capabilities:\n"+s)
if result_capabilities.isEmpty():
self.logger.log(logging.INFO,"No applications in common with %s"%conn.host_id)
if msg.hdr.isRequest():
error_response = Message()
error_response.prepareResponse(msg)
self.__addCEStuff(error_response,self.settings.capabilities,conn)
error_response.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_NO_COMMON_APPLICATION))
Utils.setMandatory_RFC3588(error_response)
self.__sendMessage_unlocked(error_response,conn)
return False
conn.peer.capabilities = result_capabilities
except InvalidAVPLengthError as ex:
self.logger.log(logging.WARNING,"Invalid AVP in CER/CEA",exc_info=ex)
if msg.hdr.isRequest():
error_response = Message()
error_response.prepareResponse(msg)
self.__addCEStuff(error_response,self.settings.capabilities,conn)
error_response.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_INVALID_AVP_LENGTH))
error_response.append(AVP_FailedAVP(ex.avp))
Utils.setMandatory_RFC3588(error_response)
self.__sendMessage_unlocked(error_response,conn)
return False
except InvalidAVPValueError as ex:
self.logger.log(logging.WARNING,"Invalid AVP in CER/CEA",exc_info=ex)
if msg.hdr.isRequest():
error_response = Message()
error_response.prepareResponse(msg)
self.__addCEStuff(error_response,self.settings.capabilities,conn)
error_response.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_INVALID_AVP_VALUE))
error_response.append(AVP_FailedAVP(ex.avp))
Utils.setMandatory_RFC3588(error_response)
self.__sendMessage_unlocked(error_response,conn)
return False
return True
def __sendCER(self,conn):
self.logger.log(logging.DEBUG,"Sending CER to "+conn.host_id)
cer = Message()
cer.hdr.setRequest(True)
cer.hdr.command_code = ProtocolConstants.DIAMETER_COMMAND_CAPABILITIES_EXCHANGE
cer.hdr.application_id = ProtocolConstants.DIAMETER_APPLICATION_COMMON
cer.hdr.hop_by_hop_identifier = conn.nextHopByHopIdentifier()
cer.hdr.end_to_end_identifier = self.node_state.nextEndToEndIdentifier()
self.__addCEStuff(cer,self.settings.capabilities,conn)
Utils.setMandatory_RFC3588(cer)
self.__sendMessage_unlocked(cer,conn)
def __addCEStuff(self,msg,capabilities,conn):
#Origin-Host, Origin-Realm
self.addOurHostAndRealm(msg)
#Host-IP-Address
# This is not really that good...
if conn.peer and conn.peer.use_ericsson_host_ip_address_format:
#Some servers (ericsson) requires a non-compliant payload in the host-ip-address AVP
tmp_avp = AVP_Address(ProtocolConstants.DI_HOST_IP_ADDRESS, conn.fd.getsockname()[0])
msg.append(AVP(ProtocolConstants.DI_HOST_IP_ADDRESS, tmp_avp.payload[2:]))
else:
msg.append(AVP_Address(ProtocolConstants.DI_HOST_IP_ADDRESS, conn.fd.getsockname()[0]))
#Vendor-Id
msg.append(AVP_Unsigned32(ProtocolConstants.DI_VENDOR_ID, self.settings.vendor_id))
#Product-Name
msg.append(AVP_UTF8String(ProtocolConstants.DI_PRODUCT_NAME, self.settings.product_name))
#Origin-State-Id
msg.append(AVP_Unsigned32(ProtocolConstants.DI_ORIGIN_STATE_ID, self.node_state.state_id))
#Error-Message, Failed-AVP: not in success
#Supported-Vendor-Id
for i in capabilities.supported_vendor:
msg.append(AVP_Unsigned32(ProtocolConstants.DI_SUPPORTED_VENDOR_ID,i))
#Auth-Application-Id
for i in capabilities.auth_app:
msg.append(AVP_Unsigned32(ProtocolConstants.DI_AUTH_APPLICATION_ID,i))
#Inband-Security-Id
# todo
#Acct-Application-Id
for i in capabilities.acct_app:
msg.append(AVP_Unsigned32(ProtocolConstants.DI_ACCT_APPLICATION_ID,i))
#Vendor-Specific-Application-Id
for va in capabilities.auth_vendor:
g = []
g.append(AVP_Unsigned32(ProtocolConstants.DI_VENDOR_ID,va.vendor_id))
g[-1].setMandatory(True)
g.append(AVP_Unsigned32(ProtocolConstants.DI_AUTH_APPLICATION_ID,va.application_id))
g[-1].setMandatory(True)
msg.append(AVP_Grouped(ProtocolConstants.DI_VENDOR_SPECIFIC_APPLICATION_ID,g))
for va in capabilities.acct_vendor:
g = []
g.append(AVP_Unsigned32(ProtocolConstants.DI_VENDOR_ID,va.vendor_id))
g[-1].setMandatory(True)
g.append(AVP_Unsigned32(ProtocolConstants.DI_ACCT_APPLICATION_ID,va.application_id))
g[-1].setMandatory(True)
msg.append(AVP_Grouped(ProtocolConstants.DI_VENDOR_SPECIFIC_APPLICATION_ID,g))
#Firmware-Revision
if self.settings.firmware_revision!=0:
msg.append(AVP_Unsigned32(ProtocolConstants.DI_FIRMWARE_REVISION,self.settings.firmware_revision))
def __handleDWR(self,msg,conn):
self.logger.log(logging.INFO,"DWR received from "+conn.host_id)
conn.timers.markDWR()
dwa = Message()
dwa.prepareResponse(msg)
dwa.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_SUCCESS))
self.addOurHostAndRealm(dwa)
dwa.append(AVP_Unsigned32(ProtocolConstants.DI_ORIGIN_STATE_ID, self.node_state.state_id))
Utils.setMandatory_RFC3588(dwa)
self.sendMessage(dwa,conn.key)
return True
def __handleDWA(self,msg,conn):
self.logger.log(logging.DEBUG,"DWA received from "+conn.host_id)
conn.timers.markDWA()
return True
def __handleDPR(self,msg,conn):
self.logger.log(logging.DEBUG,"DPR received from "+conn.host_id)
dpa = Message()
dpa.prepareResponse(msg)
dpa.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_SUCCESS))
self.addOurHostAndRealm(dpa)
Utils.setMandatory_RFC3588(dpa)
self.sendMessage(dpa,conn.key)
return False
def __handleDPA(self,msg,conn):
if conn.state==Connection.state_closing:
self.logger.log(logging.INFO,"Got a DPA from %s"%conn.host_id)
else:
self.logger.log(logging.WARNING,"Got a DPA. This is not expected")
return False #in any case close the connection
def __handleUnknownRequest(self,msg,conn):
self.logger.log(logging.INFO,"Unknown request received from "+conn.host_id)
answer = Message()
answer.prepareResponse(msg)
answer.append(AVP_Unsigned32(ProtocolConstants.DI_RESULT_CODE, ProtocolConstants.DIAMETER_RESULT_UNABLE_TO_DELIVER))
self.addOurHostAndRealm(answer)
Utils.setMandatory_RFC3588(answer)
self.sendMessage(answer,conn.key)
return True
def __sendDWR(self,conn):
self.logger.log(logging.DEBUG,"Sending DWR to "+conn.host_id)
dwr = Message()
dwr.hdr.setRequest(True)
dwr.hdr.command_code = ProtocolConstants.DIAMETER_COMMAND_DEVICE_WATCHDOG
dwr.hdr.application_id = ProtocolConstants.DIAMETER_APPLICATION_COMMON
dwr.hdr.hop_by_hop_identifier = conn.nextHopByHopIdentifier()
dwr.hdr.end_to_end_identifier = self.node_state.nextEndToEndIdentifier()
self.addOurHostAndRealm(dwr)
dwr.append(AVP_Unsigned32(ProtocolConstants.DI_ORIGIN_STATE_ID, self.node_state.state_id))
Utils.setMandatory_RFC3588(dwr)
self.__sendMessage_unlocked(dwr,conn)
conn.timers.markDWR_out()
def __sendDPR(self,conn,why):
self.logger.log(logging.DEBUG,"Sending DPR to "+conn.host_id)
dpr = Message()
dpr.hdr.setRequest(True)
dpr.hdr.command_code = ProtocolConstants.DIAMETER_COMMAND_DISCONNECT_PEER
dpr.hdr.application_id = ProtocolConstants.DIAMETER_APPLICATION_COMMON
dpr.hdr.hop_by_hop_identifier = conn.nextHopByHopIdentifier()
dpr.hdr.end_to_end_identifier = self.node_state.nextEndToEndIdentifier()
self.addOurHostAndRealm(dpr)
dpr.append(AVP_Unsigned32(ProtocolConstants.DI_DISCONNECT_CAUSE, why))
Utils.setMandatory_RFC3588(dpr)
self.__sendMessage_unlocked(dpr,conn)
def makeNewSessionId(self,optional_part=None):
"""Generate a new session-id
A Session-Id consists of a mandatory part and an optional part.
The mandatory part consists of the host-id and two sequencer.
The optional part can be anything. The caller provide some
information that will be helpful in debugging in production
environments, such as user-name or calling-station-id.
"""
mandatory_part = self.settings.host_id + ";" + self.node_state.nextSessionId_second_part()
if not optional_part:
return mandatory_part
else:
return mandatory_part + ";" + optional_part
def isTransientError(err):
return err==errno.EAGAIN or \
err==errno.EWOULDBLOCK or \
err==errno.ENOBUFS or \
err==errno.ENOSR or \
err==errno.EINTR
def _unittest():
class CL:
def handle(self,connkey,peer,updown):
pass
class MD:
def handle(self,msg,connkey,peer):
return False
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(name)s %(levelname)s %(message)s')
cap = Capability()
cap.addAuthApp(ProtocolConstants.DIAMETER_APPLICATION_NASREQ)
settings = NodeSettings("isjsys.int.i1.dk","i1.dk",1,cap,3868,"pythondiameter",1)
n = Node(MD(),CL(),settings)
n.start()
time.sleep(1)
n.stop()
| 45.236866
| 155
| 0.607311
|
74a4e6d20123cb907d00b6834955d1f5ca26d57e
| 362
|
py
|
Python
|
FacialRecognition/Media Manipulation/open_video.py
|
markgacoka/micro-projects
|
e8115c8270a115282e7dfda6e24620b3333f8c6b
|
[
"MIT"
] | 1
|
2021-03-19T10:42:07.000Z
|
2021-03-19T10:42:07.000Z
|
Media Manipulation/open_video.py
|
markgacoka/FacialRecognition
|
af3e4e37f40f7995f2e276c35283bbe3b73a2a27
|
[
"MIT"
] | null | null | null |
Media Manipulation/open_video.py
|
markgacoka/FacialRecognition
|
af3e4e37f40f7995f2e276c35283bbe3b73a2a27
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
cap = cv2.VideoCapture('hassan.mp4')
if (cap.isOpened()== False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
cv2.imshow('Frame',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
| 14.48
| 45
| 0.629834
|
aa3ddcc16c7bc56b842da5056de4fd44ebe6cc64
| 55
|
py
|
Python
|
mdsearch/__init__.py
|
lydia07/mdsearch
|
a328e822d6d66869aeefef687887b0a39d4f4512
|
[
"MIT"
] | null | null | null |
mdsearch/__init__.py
|
lydia07/mdsearch
|
a328e822d6d66869aeefef687887b0a39d4f4512
|
[
"MIT"
] | null | null | null |
mdsearch/__init__.py
|
lydia07/mdsearch
|
a328e822d6d66869aeefef687887b0a39d4f4512
|
[
"MIT"
] | null | null | null |
from .searcher import Searcher
__all__ = ["Searcher"]
| 13.75
| 30
| 0.745455
|
e5c2c70a26bcbc953c188a59f61030cc44b787fb
| 2,693
|
py
|
Python
|
index.py
|
daxinniu/data1050_dash_app
|
451fe05ae56b6f7d1585d6f0e0526395b9b7f16d
|
[
"MIT"
] | null | null | null |
index.py
|
daxinniu/data1050_dash_app
|
451fe05ae56b6f7d1585d6f0e0526395b9b7f16d
|
[
"MIT"
] | null | null | null |
index.py
|
daxinniu/data1050_dash_app
|
451fe05ae56b6f7d1585d6f0e0526395b9b7f16d
|
[
"MIT"
] | 2
|
2021-12-08T16:06:53.000Z
|
2021-12-10T06:38:23.000Z
|
import dash_bootstrap_components as dbc
import dash
from dash import html
from dash import dcc
from dash.dependencies import Output, Input
from app import app, historical_data
# Connect to the layout and callbacks of each tab
from all_state_graph import all_state_graph_layout
from by_state_graph import by_state_graph_layout
from Approval import Approval_US_layout
from map import map_layout
from concern import concern_US_layout
from image import image_layout
# from trends import trends_layout
# from other import other_layout
# our app's Tabs *********************************************************
app_tabs = html.Div(
[
dbc.Tabs(
[
dbc.Tab(label="Overview", tab_id="tab-all_state_graph", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
dbc.Tab(label="By State", tab_id="tab-by_state_graph", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
dbc.Tab(label="Map", tab_id="tab-map", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
dbc.Tab(label="Gov Admin Approval", tab_id="tab-approval", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
dbc.Tab(label="Concern Level", tab_id="tab-concern", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
dbc.Tab(label="About Us", tab_id="tab-about", labelClassName="text-success font-weight-bold", activeLabelClassName="text-danger"),
],
id="tabs",
active_tab="tab-all_state_graph",
),
], className="mt-3"
)
app.layout = dbc.Container([
html.Br(),
dbc.Row(dbc.Col(html.H1("Covid Data Analysis Dashboard",
style={"textAlign": "center"}), width=12)),
html.Hr(),
dbc.Row(dbc.Col(app_tabs, width=12), className="mb-3"),
html.Div(id='content', children=[])
])
@app.callback(
Output("content", "children"),
[Input("tabs", "active_tab")]
)
def switch_tab(tab_chosen):
if tab_chosen == "tab-all_state_graph":
return all_state_graph_layout
elif tab_chosen == "tab-by_state_graph":
return by_state_graph_layout
elif tab_chosen == "tab-map":
return map_layout
elif tab_chosen == 'tab-approval':
return Approval_US_layout
elif tab_chosen == 'tab-concern':
return concern_US_layout
elif tab_chosen == 'tab-about':
return image_layout
return html.P("This shouldn't be displayed for now...")
if __name__=='__main__':
app.run_server(debug=False, host="0.0.0.0", port=8080)
| 38.471429
| 159
| 0.6684
|
a1018d6aceefd84656fcfb9694757de53f51f41b
| 7,201
|
py
|
Python
|
kvirt/kubeadm/__init__.py
|
toozej/kcli
|
c43d212576e1bf26ee1a0a1aa2f3d2ff8e34254b
|
[
"Apache-2.0"
] | null | null | null |
kvirt/kubeadm/__init__.py
|
toozej/kcli
|
c43d212576e1bf26ee1a0a1aa2f3d2ff8e34254b
|
[
"Apache-2.0"
] | null | null | null |
kvirt/kubeadm/__init__.py
|
toozej/kcli
|
c43d212576e1bf26ee1a0a1aa2f3d2ff8e34254b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from kvirt.common import success, error, pprint, warning, info2, container_mode
from kvirt.common import get_kubectl, kube_create_app, get_ssh_pub_key
from kvirt.defaults import UBUNTUS
from random import choice
from shutil import which
from string import ascii_letters, digits
import os
import sys
import yaml
# virtplatforms = ['kvm', 'kubevirt', 'ovirt', 'openstack', 'vsphere']
cloudplatforms = ['aws', 'gcp']
def scale(config, plandir, cluster, overrides):
plan = cluster
data = {'cluster': cluster, 'nip': False, 'kube': cluster, 'kubetype': 'generic', 'image': 'centos8stream'}
data['basedir'] = '/workdir' if container_mode() else '.'
cluster = data.get('cluster')
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if os.path.exists("%s/kcli_parameters.yml" % clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install:
installparam = yaml.safe_load(install)
data.update(installparam)
plan = installparam.get('plan', plan)
data.update(overrides)
if os.path.exists(clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'w') as paramfile:
yaml.safe_dump(data, paramfile)
client = config.client
pprint("Scaling on client %s" % client)
image = data.get('image')
data['ubuntu'] = True if 'ubuntu' in image.lower() or [entry for entry in UBUNTUS if entry in image] else False
os.chdir(os.path.expanduser("~/.kcli"))
for role in ['masters', 'workers']:
overrides = data.copy()
if overrides.get(role, 0) == 0:
continue
threaded = data.get('threaded', False) or data.get(f'{role}_threaded', False)
config.plan(plan, inputfile='%s/%s.yml' % (plandir, role), overrides=overrides, threaded=threaded)
def create(config, plandir, cluster, overrides):
platform = config.type
k = config.k
data = {'kubetype': 'generic', 'nip': False, 'domain': 'karmalabs.com'}
data.update(overrides)
if 'keys' not in overrides and get_ssh_pub_key() is None:
error("No usable public key found, which is required for the deployment")
sys.exit(1)
data['cluster'] = overrides.get('cluster', cluster if cluster is not None else 'testk')
plan = cluster if cluster is not None else data['cluster']
data['kube'] = data['cluster']
masters = data.get('masters', 1)
if masters == 0:
error("Invalid number of masters")
sys.exit(1)
network = data.get('network', 'default')
nip = data['nip']
api_ip = data.get('api_ip')
if platform in cloudplatforms:
domain = data.get('domain', 'karmalabs.com')
api_ip = "%s-master.%s" % (cluster, domain)
elif api_ip is None:
if network == 'default' and platform == 'kvm':
warning("Using 192.168.122.253 as api_ip")
data['api_ip'] = "192.168.122.253"
api_ip = "192.168.122.253"
elif platform == 'kubevirt':
selector = {'kcli/plan': plan, 'kcli/role': 'master'}
service_type = "LoadBalancer" if k.access_mode == 'LoadBalancer' else 'ClusterIP'
api_ip = config.k.create_service("%s-api" % cluster, config.k.namespace, selector, _type=service_type,
ports=[6443])
if api_ip is None:
sys.exit(1)
else:
pprint("Using api_ip %s" % api_ip)
data['api_ip'] = api_ip
else:
error("You need to define api_ip in your parameters file")
sys.exit(1)
if nip and platform not in cloudplatforms:
data['domain'] = "%s.nip.io" % api_ip
if data.get('virtual_router_id') is None:
data['virtual_router_id'] = hash(data['cluster']) % 254 + 1
virtual_router_id = data['virtual_router_id']
pprint(f"Using keepalived virtual_router_id {virtual_router_id}")
auth_pass = ''.join(choice(ascii_letters + digits) for i in range(5))
data['auth_pass'] = auth_pass
version = data.get('version')
if version is not None and not str(version).startswith('1.'):
error("Invalid version %s" % version)
sys.exit(1)
if data.get('eksd', False) and data.get('engine', 'containerd') != 'docker':
warning("Forcing engine to docker for eksd")
data['engine'] = 'docker'
data['basedir'] = '/workdir' if container_mode() else '.'
cluster = data.get('cluster')
image = data.get('image', 'centos8stream')
data['ubuntu'] = True if 'ubuntu' in image.lower() or [entry for entry in UBUNTUS if entry in image] else False
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if os.path.exists(clusterdir):
error("Please remove existing directory %s first..." % clusterdir)
sys.exit(1)
if which('kubectl') is None:
get_kubectl()
if not os.path.exists(clusterdir):
os.makedirs(clusterdir)
os.mkdir("%s/auth" % clusterdir)
with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p:
installparam = overrides.copy()
installparam['api_ip'] = api_ip
installparam['virtual_router_id'] = data['virtual_router_id']
installparam['plan'] = plan
installparam['kubetype'] = 'generic'
installparam['image'] = image
installparam['auth_pass'] = auth_pass
yaml.safe_dump(installparam, p, default_flow_style=False, encoding='utf-8', allow_unicode=True)
master_threaded = data.get('threaded', False) or data.get('masters_threaded', False)
result = config.plan(plan, inputfile='%s/masters.yml' % plandir, overrides=data, threaded=master_threaded)
if result['result'] != "success":
sys.exit(1)
workers = data.get('workers', 0)
if workers > 0:
pprint("Deploying workers")
if 'name' in data:
del data['name']
os.chdir(os.path.expanduser("~/.kcli"))
worker_threaded = data.get('threaded', False) or data.get('workers_threaded', False)
config.plan(plan, inputfile='%s/workers.yml' % plandir, overrides=data, threaded=worker_threaded)
success("Kubernetes cluster %s deployed!!!" % cluster)
masters = data.get('masters', 1)
info2("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" % cluster)
info2("export PATH=$PWD:$PATH")
prefile = 'pre_ubuntu.sh' if data['ubuntu'] else 'pre_el.sh'
predata = config.process_inputfile(plan, "%s/%s" % (plandir, prefile), overrides=data)
with open("%s/pre.sh" % clusterdir, 'w') as f:
f.write(predata)
os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
apps = data.get('apps', [])
if apps:
os.environ["PATH"] += ":%s" % os.getcwd()
for app in apps:
appdir = "%s/apps/%s" % (plandir, app)
if not os.path.exists(appdir):
warning("Skipping unsupported app %s" % app)
else:
pprint("Adding app %s" % app)
if '%s_version' % app not in overrides:
data['%s_version' % app] = 'latest'
kube_create_app(config, appdir, overrides=data)
| 46.75974
| 115
| 0.618942
|
a52e22cb9387819bd18b5cdd1ecd7083f5633c73
| 8,373
|
py
|
Python
|
colorply/ui/gui.py
|
arthurdjn/colorply
|
1a4ef819ddee068565664bb08953b474e54e2b0b
|
[
"MIT"
] | 4
|
2019-11-01T19:43:54.000Z
|
2021-01-27T21:42:20.000Z
|
colorply/ui/gui.py
|
arthurdraws/colorply
|
1a4ef819ddee068565664bb08953b474e54e2b0b
|
[
"MIT"
] | null | null | null |
colorply/ui/gui.py
|
arthurdraws/colorply
|
1a4ef819ddee068565664bb08953b474e54e2b0b
|
[
"MIT"
] | null | null | null |
"""
This is the python-colorply GUI.
"""
# -*- coding: utf-8 -*-
# Created on Sun Jul 14 10:17:54 2019
# @author: Cédric Perion | Arthur Dujardin
import sys
# PyQt
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication, QFileDialog, QLineEdit, QHBoxLayout, QVBoxLayout, \
QComboBox, QProgressBar, QLabel
from PyQt5.QtCore import pyqtSignal, QThread # Threading
# Colorply modules
from colorply.process.improcess import add_cloud_channel
from colorply.ui.palette import set_dark_theme
class RunThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self):
super(RunThread, self).__init__()
def run(self, window):
"""
Run the process in a different thread.
Parameters
----------
window : PyQT5 window
The main window.
Returns
-------
None.
"""
imDir = window.imageDirLine.text()
oriDir = window.imageOri.text()
imExt = "." + str(window.imageExt.currentText())
cal = window.calibDirLine.text()
inPly = window.inPlyLine.text()
outPly = window.outPlyLine.text()
channel = window.imageChannelLine.text()
modestr = str(window.computeMethod.currentText())
mode = window.modeDict[modestr]
# A sexy way to check if none of the fields are empty
if len(oriDir) * len(imDir) * len(cal) * len(inPly) * len(outPly) * len(channel):
try:
window.warningLabel.setVisible(False)
window.progress.setValue(1.0)
window.progress.setMaximum(1.0)
window.progress.setVisible(True)
var = add_cloud_channel(inPly, outPly, cal, oriDir, imDir, imExt, channel, mode, window.progress)
if var:
window.warningLabel.setText("All done !")
window.warningLabel.setVisible(True)
return
except FileNotFoundError:
window.progress.setVisible(False)
window.warningLabel.setText("One of the files / folders has not been found.")
window.warningLabel.setVisible(True)
return
else:
window.warningLabel.setVisible(True)
window.progress.setVisible(False)
return
class MainWindow(QWidget):
def __init__(self):
super().__init__()
# QThread.__init__(self)
self.initUI()
def initUI(self):
"""
Initialyze the window with different buttons and actions.
Returns
-------
None.
"""
self.setWindowTitle('Python-colorply')
hbox1 = QHBoxLayout() # image directory line
hbox2 = QHBoxLayout() # orientation image directory
hbox3 = QHBoxLayout() # calibration directory line
hbox4 = QHBoxLayout() # input ply line
hbox5 = QHBoxLayout() # output ply line
hbox6 = QHBoxLayout() # channel, compute method and run button line
hbox7 = QHBoxLayout()
vbox = QVBoxLayout()
# Image extension
self.imageExt = QComboBox()
self.extList = ["JPG", "jpg", "TIF", "tif", "PNG", "png", "CR2", "DNG"] # list of all extension available
for k in range(len(self.extList)): # adding the possibilities
self.imageExt.addItem(self.extList[k])
self.imageExt.setFixedWidth(50)
# Compute method
self.computeMethod = QComboBox()
# dictionnary of all methods available
self.modeDict = {
"Average": "avg",
"Random": "alea",
"Weighted Average": "wavg",
"Distance": "dist",
}
# adding the methods to a drop down menu
for k in self.modeDict:
self.computeMethod.addItem(k)
# Text lines
self.imageDirLine = QLineEdit()
self.imageOri = QLineEdit()
self.imageChannelLabel = QLabel("Channel name :")
self.imageChannelLine = QLineEdit()
self.calibDirLine = QLineEdit()
self.inPlyLine = QLineEdit()
self.outPlyLine = QLineEdit()
self.warningLabel = QLabel("Error: please fill all the fields !")
self.warningLabel.setVisible(False)
# Buttons
imageChooseButton = QPushButton("Choose your image folder")
imageChooseButton.setFixedWidth(194)
imageChooseButton.clicked.connect(self.select_image_dir)
oriChooseButton = QPushButton("Choose orientation folder")
oriChooseButton.setFixedWidth(250)
oriChooseButton.clicked.connect(self.select_ori_dir)
calibChooseButton = QPushButton("Choose your calibration file")
calibChooseButton.setFixedWidth(250)
calibChooseButton.clicked.connect(self.select_calib_dir)
inPlyChooseButton = QPushButton("Choose your input PLY file")
inPlyChooseButton.setFixedWidth(250)
inPlyChooseButton.clicked.connect(self.select_input_ply)
outPlyChooseButton = QPushButton("Choose your output PLY file")
outPlyChooseButton.setFixedWidth(250)
outPlyChooseButton.clicked.connect(self.select_output_ply)
computeButton = QPushButton("RUN")
computeButton.clicked.connect(self.compute)
# Progress bar
self.progress = QProgressBar(self)
self.progress.setVisible(False)
# Boxes
hbox1.addWidget(self.imageDirLine)
hbox1.addWidget(imageChooseButton)
hbox1.addWidget(self.imageExt)
hbox2.addWidget(self.imageOri)
hbox2.addWidget(oriChooseButton)
hbox3.addWidget(self.calibDirLine)
hbox3.addWidget(calibChooseButton)
hbox4.addWidget(self.inPlyLine)
hbox4.addWidget(inPlyChooseButton)
hbox5.addWidget(self.outPlyLine)
hbox5.addWidget(outPlyChooseButton)
hbox6.addWidget(self.computeMethod)
hbox6.addWidget(self.imageChannelLabel)
hbox6.addWidget(self.imageChannelLine)
hbox6.addWidget(computeButton)
hbox7.addWidget(self.progress)
hbox7.addWidget(self.warningLabel)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addLayout(hbox5)
vbox.addStretch(1)
vbox.addLayout(hbox6)
vbox.addLayout(hbox7)
self.setLayout(vbox)
def select_image_dir(self):
"""
Select the image directory from the window.
Returns
-------
None.
"""
fname = QFileDialog.getExistingDirectory(self, 'Select image directory')
if fname:
self.imageDirLine.setText(fname)
def select_ori_dir(self):
"""
Select the MicMac orientation directory from the window.
Returns
-------
None.
"""
fname = QFileDialog.getExistingDirectory(self, 'Select image orientation directory')
if fname:
self.imageOri.setText(fname)
def select_calib_dir(self):
"""
Select the MicMac calibration directory from the window.
Returns
-------
None.
"""
fname = QFileDialog.getOpenFileName(self, 'Select calibration file')
if fname[0]:
self.calibDirLine.setText(fname[0])
def select_input_ply(self):
"""
Select the input ply file from the window.
Returns
-------
None.
"""
fname = QFileDialog.getOpenFileName(self, 'Select input PLY file')
if fname[0]:
self.inPlyLine.setText(fname[0])
def select_output_ply(self):
"""
Select the output ply file from the window.
Returns
-------
None.
"""
fname = QFileDialog.getSaveFileName(self, 'Select output PLY file name')
if fname[0]:
self.outPlyLine.setText(fname[0])
def compute(self):
"""
Run the process module, with different threads.
Returns
-------
None.
"""
thread = RunThread()
thread.run(self)
def interface():
"""
Create the main window of Colorply.
Returns
-------
None.
"""
app = QApplication(sys.argv)
app = set_dark_theme(app)
window = MainWindow()
window.show()
app.exec_()
| 28.576792
| 115
| 0.604801
|
884b289e1326cedb36024db4fa0744c65bdba646
| 17,689
|
py
|
Python
|
tests/test_crf_cuda.py
|
KohYoungResearchAmerica/MONAI
|
eca3f19182b9fcee0be7123728a9826cd382d152
|
[
"Apache-2.0"
] | null | null | null |
tests/test_crf_cuda.py
|
KohYoungResearchAmerica/MONAI
|
eca3f19182b9fcee0be7123728a9826cd382d152
|
[
"Apache-2.0"
] | null | null | null |
tests/test_crf_cuda.py
|
KohYoungResearchAmerica/MONAI
|
eca3f19182b9fcee0be7123728a9826cd382d152
|
[
"Apache-2.0"
] | 1
|
2021-01-19T19:35:00.000Z
|
2021-01-19T19:35:00.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.blocks import CRF
from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda
TEST_CASES = [
[
# Case Description
"2 batche(s), 1 dimension(s), 2 classe(s), 1 channel(s)",
# Parameters
[
1.0, # bilateral_weight
0.3, # gaussian_weight
5.0, # bilateral_spatial_sigma
0.5, # bilateral_color_sigma
5.0, # gaussian_spatial_sigma
1.0, # update_factor
1, # compatibility_kernel_range
5, # iterations
],
# Input
[
# Batch 0
[
# Class 0
[0.8, 0.9, 0.6, 0.2, 0.3],
# Class 1
[0.1, 0.3, 0.5, 0.8, 0.7],
],
# Batch 1
[
# Class 0
[0.8, 0.9, 0.6, 0.2, 0.3],
# Class 1
[0.1, 0.3, 0.5, 0.8, 0.7],
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 1, 1, 0.5, 0],
],
# Batch 1
[
# Channel 0
[1, 1, 0.5, 0, 0],
],
],
# Expected
[
# Batch 0
[
# Class 0
[0.965345, 0.961201, 0.920527, 0.772525, 0.711900],
# Class 1
[0.034655, 0.038799, 0.079473, 0.227475, 0.288100],
],
# Batch 1
[
# Class 0
[0.897615, 0.816166, 0.500186, 0.158644, 0.133245],
# Class 1
[0.102385, 0.183834, 0.499814, 0.841356, 0.866755],
],
],
],
[
# Case Description
"1 batche(s), 2 dimension(s), 3 classe(s), 2 channel(s)",
# Parameters
[
1.0, # bilateral_weight
0.3, # gaussian_weight
5.0, # bilateral_spatial_sigma
0.5, # bilateral_color_sigma
5.0, # gaussian_spatial_sigma
1.0, # update_factor
1, # compatibility_kernel_range
5, # iterations
],
# Input
[
# Batch 0
[
# Class 0
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
],
# Class 1
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Class 2
[
[0.0, 0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0, 0.5],
[0.0, 0.5, 1.0, 0.5, 0.0],
[0.5, 1.0, 0.5, 0.0, 0.0],
[1.0, 0.5, 0.0, 0.0, 0.0],
],
],
],
# Features
[
# Batch 0
[
# Channel 0
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Channel 1
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
],
],
],
# Expected
[
# Batch 0
[
# Class 0
[
[0.001529, 0.000798, 0.000323, 0.000093, 0.000053],
[0.001365, 0.000966, 0.000422, 0.000178, 0.000281],
[0.001405, 0.001007, 0.002425, 0.013078, 0.064707],
[0.001239, 0.001263, 0.033857, 0.665830, 0.951172],
[0.001534, 0.004486, 0.263298, 0.973852, 0.999018],
],
# Class 1
[
[0.230989, 0.025518, 0.000764, 0.000057, 0.000029],
[0.037540, 0.008348, 0.000381, 0.000055, 0.000075],
[0.001987, 0.000665, 0.000363, 0.000499, 0.001170],
[0.000187, 0.000143, 0.000805, 0.001361, 0.000533],
[0.000131, 0.000286, 0.002139, 0.000410, 0.000069],
],
# Class 2
[
[0.767482, 0.973685, 0.998913, 0.999850, 0.999919],
[0.961095, 0.990687, 0.999197, 0.999768, 0.999644],
[0.996608, 0.998328, 0.997212, 0.986423, 0.934124],
[0.998574, 0.998594, 0.965337, 0.332809, 0.048295],
[0.998334, 0.995228, 0.734563, 0.025738, 0.000912],
],
],
],
],
[
# Case Description
"1 batche(s), 3 dimension(s), 2 classe(s), 1 channel(s)",
# Parameters
[
1.0, # bilateral_weight
0.3, # gaussian_weight
5.0, # bilateral_spatial_sigma
0.1, # bilateral_color_sigma
5.0, # gaussian_spatial_sigma
1.0, # update_factor
1, # compatibility_kernel_range
2, # iterations
],
# Input
[
# Batch 0
[
# Class 0
[
# Slice 0
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 1
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 2
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 3
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 4
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
# Class 1
[
# Slice 0
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 1
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 2
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 3
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
],
# Slice 4
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
],
],
],
],
# Features
[
# Batch 0
[
# Channel 0
[
# Slice 0
[
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 1
[
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
# Slice 2
[
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.0, 0.0],
[0.5, 0.5, 0.8, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
],
# Slice 3
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
],
# Slice 4
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
],
],
],
],
# Expected
[
# Batch 0
[
# Class 0
[
# Slice 0
[
[1.000000, 1.000000, 1.000000, 0.999884, 0.769625],
[1.000000, 1.000000, 1.000000, 0.999851, 0.714004],
[1.000000, 1.000000, 0.999988, 0.997150, 0.614165],
[0.999862, 0.999832, 0.996976, 0.945058, 0.497088],
[0.720345, 0.672450, 0.590360, 0.490120, 0.416671],
],
# Slice 1
[
[1.000000, 1.000000, 1.000000, 0.999848, 0.707997],
[1.000000, 1.000000, 1.000000, 0.997064, 0.127893],
[1.000000, 1.000000, 0.999469, 0.591574, 0.007791],
[0.999812, 0.996663, 0.582521, 0.006041, 0.000427],
[0.637809, 0.107586, 0.007432, 0.000437, 0.000333],
],
# Slice 2
[
[1.000000, 1.000000, 0.999987, 0.996994, 0.600095],
[1.000000, 1.000000, 0.999441, 0.575839, 0.007303],
[0.999986, 0.999411, 0.587268, 0.001117, 0.000033],
[0.996210, 0.550023, 0.001114, 0.000001, 0.000000],
[0.520757, 0.006334, 0.000034, 0.000000, 0.000000],
],
# Slice 3
[
[0.999834, 0.999807, 0.996617, 0.940887, 0.482334],
[0.999799, 0.996410, 0.553696, 0.005287, 0.000376],
[0.996193, 0.546801, 0.001047, 0.000001, 0.000000],
[0.930515, 0.005142, 0.000001, 0.000000, 0.000000],
[0.430705, 0.000371, 0.000000, 0.000000, 0.000000],
],
# Slice 4
[
[0.665227, 0.627316, 0.550517, 0.467839, 0.406319],
[0.617408, 0.098325, 0.006247, 0.000359, 0.000278],
[0.524800, 0.006229, 0.000030, 0.000000, 0.000000],
[0.443054, 0.000372, 0.000000, 0.000000, 0.000000],
[0.388126, 0.000305, 0.000000, 0.000000, 0.000000],
],
],
# Class 1
[
# Slice 0
[
[0.000000, 0.000000, 0.000000, 0.000116, 0.230375],
[0.000000, 0.000000, 0.000000, 0.000149, 0.285996],
[0.000000, 0.000000, 0.000012, 0.002850, 0.385835],
[0.000138, 0.000168, 0.003024, 0.054942, 0.502912],
[0.279655, 0.327550, 0.409640, 0.509880, 0.583329],
],
# Slice 1
[
[0.000000, 0.000000, 0.000000, 0.000152, 0.292003],
[0.000000, 0.000000, 0.000000, 0.002936, 0.872107],
[0.000000, 0.000000, 0.000531, 0.408426, 0.992209],
[0.000188, 0.003337, 0.417479, 0.993959, 0.999574],
[0.362191, 0.892414, 0.992568, 0.999564, 0.999667],
],
# Slice 2
[
[0.000000, 0.000000, 0.000013, 0.003006, 0.399905],
[0.000000, 0.000000, 0.000559, 0.424161, 0.992697],
[0.000014, 0.000589, 0.412732, 0.998884, 0.999967],
[0.003790, 0.449977, 0.998886, 0.999999, 1.000000],
[0.479243, 0.993666, 0.999966, 1.000000, 1.000000],
],
# Slice 3
[
[0.000166, 0.000193, 0.003383, 0.059113, 0.517666],
[0.000201, 0.003590, 0.446304, 0.994713, 0.999624],
[0.003807, 0.453199, 0.998953, 0.999999, 1.000000],
[0.069485, 0.994858, 0.999999, 1.000000, 1.000000],
[0.569295, 0.999629, 1.000000, 1.000000, 1.000000],
],
# Slice 4
[
[0.334773, 0.372684, 0.449483, 0.532161, 0.593681],
[0.382592, 0.901675, 0.993753, 0.999641, 0.999722],
[0.475200, 0.993771, 0.999970, 1.000000, 1.000000],
[0.556946, 0.999628, 1.000000, 1.000000, 1.000000],
[0.611874, 0.999695, 1.000000, 1.000000, 1.000000],
],
],
],
],
],
]
@skip_if_no_cpp_extension
@skip_if_no_cuda
class CRFTestCaseCuda(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test(self, test_case_description, params, input, features, expected):
# Create input tensors
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda"))
feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda"))
# apply filter
crf = CRF(*params)
output = crf(input_tensor, feature_tensor).cpu().numpy()
# Ensure result are as expected
# np.testing.assert_allclose(output, expected, atol=5e-2, rtol=5e-2)
# Temporarily allowing some (10%) mismatched elements due to non determinism.
absolute_diff_tolerance = 5e-2
mismatch_ratio_tolerance = 0.1
output = np.array(output).flatten()
expected = np.array(expected).flatten()
abs_diff = abs(output - expected)
mismatch_count = sum(np.where(abs_diff > absolute_diff_tolerance, 1, 0))
self.assertLessEqual(mismatch_count / len(output), mismatch_ratio_tolerance)
if __name__ == "__main__":
unittest.main()
| 38.122845
| 112
| 0.340042
|
edf8a12ed332be381fcf7751aa505e514365bcab
| 85
|
py
|
Python
|
crash_course/ch04/squares2.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch04/squares2.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch04/squares2.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
# list comprehension
squares = [value**2 for value in range(1, 11)]
print(squares)
| 14.166667
| 46
| 0.705882
|
31af1fb951d654f8d4192fdd45fba2772281ee75
| 1,652
|
py
|
Python
|
bsidessf2020ctf/eccmul/solve.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 101
|
2020-03-09T17:40:47.000Z
|
2022-03-31T23:26:55.000Z
|
bsidessf2020ctf/eccmul/solve.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 1
|
2021-11-09T13:39:40.000Z
|
2021-11-10T19:15:04.000Z
|
bsidessf2020ctf/eccmul/solve.py
|
datajerk/ctf-write-ups
|
1bc4ecc63a59de7d924c7214b1ce467801792da0
|
[
"MIT"
] | 31
|
2020-05-27T12:29:50.000Z
|
2022-03-31T23:23:32.000Z
|
# Extended Euclidean algorithm
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
# calculate `modular inverse`
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise ValueError
return x % m
# double function
def ecc_double(x1, y1, p, a):
s = ((3*(x1**2) + a) * modinv(2*y1, p))%p
x3 = (s**2 - x1 - x1)%p
y3 = (s*(x1-x3) - y1)%p
return (x3, y3)
# add function
def ecc_add(x1, y1, x2, y2, p, a):
s = 0
if (x1==x2):
s = ((3*(x1**2) + a) * modinv(2*y1, p))%p
else:
s = ((y2-y1) * modinv(x2-x1, p))%p
x3 = (s**2 - x1 - x2)%p
y3 = (s*(x1 - x3) - y1)%p
return (x3, y3)
def double_and_add(multi, generator, p, a):
(x3, y3)=(0, 0)
(x1, y1) = generator
(x_tmp, y_tmp) = generator
init = 0
for i in str(bin(multi)[2:]):
if (i=='1') and (init==0):
init = 1
elif (i=='1') and (init==1):
(x3,y3) = ecc_double(x_tmp, y_tmp, p, a)
(x3,y3) = ecc_add(x1, y1, x3, y3, p, a)
(x_tmp, y_tmp) = (x3, y3)
else:
(x3, y3) = ecc_double(x_tmp, y_tmp, p, a)
(x_tmp, y_tmp) = (x3, y3)
return (x3, y3)
p = 14976980263601993881
a = 2557469063
b = 3368387639
generator=(2342304216201758750, 762803873429369431)
print("6921481148*P = ", double_and_add(6921481148, generator, p, a))
| 30.036364
| 89
| 0.550847
|
d3d5b6556e2c5bba7ac9e7e8bcd08c0673d33395
| 2,636
|
py
|
Python
|
scripts/WaitForDoorOpen.py
|
tue-robotics/hsr_demo
|
c3e139e2ffe4baba17f42e0ee1e726ef08ed69dc
|
[
"MIT"
] | null | null | null |
scripts/WaitForDoorOpen.py
|
tue-robotics/hsr_demo
|
c3e139e2ffe4baba17f42e0ee1e726ef08ed69dc
|
[
"MIT"
] | 3
|
2017-06-10T10:17:14.000Z
|
2017-09-28T14:44:42.000Z
|
scripts/WaitForDoorOpen.py
|
tue-robotics/hsr_demo
|
c3e139e2ffe4baba17f42e0ee1e726ef08ed69dc
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import roslib
import rospy
from sensor_msgs.msg import LaserScan
from threading import Event
class WaitForDoorOpen:
def __init__(self, timeout=None):
self.distances = [] #TODO Loy: Keeping all of these is quite ugly. Would a ring buffer or collections.deque suffice?
self.door_open = Event()
self.timeout = timeout
self.no_door_found = False
def avg(self, lst):
return sum(lst)/max(len(lst), 1)
def process_scan(self, scan_msg):
try:
middle_index = len(scan_msg.ranges)/2 # Get the middle point
ranges_at_center = scan_msg.ranges[middle_index-2:middle_index+2] # Get some points around the middle
distance_to_door = self.avg(ranges_at_center) # and the average of the middle range and use it as the distance to the door
self.distances += [distance_to_door] #store all distances
avg_distance_now = self.avg(self.distances[-5:]) #And the latest 5
# print "d_start = {0}, d_now = {1}, curr = {2}".format(avg_distance_at_start, avg_distance_now, distance_to_door)
if self.distances[0] > 1.0:
self.no_door_found = True
rospy.loginfo("No door found")
self.door_open.set() #Then set a threading Event that run is waiting for.
elif avg_distance_now > 1.0:
rospy.loginfo("Distance to door is more than a meter")
self.door_open.set() #Then set a threading Event that run is waiting for.
except Exception, e:
rospy.logerr("Receiving laser failed so unsubscribing: {0}".format(e))
self.laser_sub.unregister()
def run(self):
rospy.loginfo("Waiting for door...")
self.laser_sub = rospy.Subscriber("/hsrb/base_scan", LaserScan, self.process_scan)
opened_before_timout = self.door_open.wait(self.timeout)
rospy.loginfo("Unregistering laser listener and clearing data")
self.laser_sub.unregister()
self.distances = []
self.door_open.clear()
if self.no_door_found:
rospy.loginfo("No door found")
return "no_door"
if opened_before_timout:
rospy.loginfo("Door is open")
return "open"
rospy.loginfo("Timed out with door still closed")
return "closed"
############################## initializing program ##############################
if __name__ == '__main__':
rospy.init_node('wait_for_open_door_exec')
timeout = 10
waitForDoorOpen = WaitForDoorOpen(timeout)
result = waitForDoorOpen.run()
| 37.657143
| 135
| 0.627845
|
d6f0c55209ad65dff4d8adf949bbd0c52cbb9a53
| 3,435
|
py
|
Python
|
opencv-3.3.0/modules/dnn/test/imagenet_cls_test_inception.py
|
AmericaGL/TrashTalk_Dapp
|
401f17289261b5f537b239e7759dc039d53211e1
|
[
"MIT"
] | 17
|
2020-03-13T00:10:28.000Z
|
2021-09-06T17:13:17.000Z
|
opencv-3.3.0/modules/dnn/test/imagenet_cls_test_inception.py
|
AmericaGL/TrashTalk_Dapp
|
401f17289261b5f537b239e7759dc039d53211e1
|
[
"MIT"
] | 1
|
2020-03-12T08:10:07.000Z
|
2020-03-12T08:10:07.000Z
|
opencv-3.3.0/modules/dnn/test/imagenet_cls_test_inception.py
|
AmericaGL/TrashTalk_Dapp
|
401f17289261b5f537b239e7759dc039d53211e1
|
[
"MIT"
] | 11
|
2016-03-20T18:32:24.000Z
|
2020-12-31T21:22:22.000Z
|
import numpy as np
import sys
import os
import argparse
import tensorflow as tf
from tensorflow.python.platform import gfile
from imagenet_cls_test_alexnet import MeanValueFetch, DnnCaffeModel, Framework, ClsAccEvaluation
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
# If you've got an exception "Cannot load libmkl_avx.so or libmkl_def.so" or similar, try to export next variable
# before runnigng the script:
# LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so
class TensorflowModel(Framework):
sess = tf.Session
output = tf.Graph
def __init__(self, model_file, in_blob_name, out_blob_name):
self.in_blob_name = in_blob_name
self.sess = tf.Session()
with gfile.FastGFile(model_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
self.output = self.sess.graph.get_tensor_by_name(out_blob_name + ":0")
def get_name(self):
return 'Tensorflow'
def get_output(self, input_blob):
assert len(input_blob.shape) == 4
batch_tf = input_blob.transpose(0, 2, 3, 1)
out = self.sess.run(self.output,
{self.in_blob_name+':0': batch_tf})
out = out[..., 1:1001]
return out
class DnnTfInceptionModel(DnnCaffeModel):
net = cv.dnn.Net()
def __init__(self, model_file, in_blob_name, out_blob_name):
self.net = cv.dnn.readNetFromTensorflow(model_file)
self.in_blob_name = in_blob_name
self.out_blob_name = out_blob_name
def get_output(self, input_blob):
return super(DnnTfInceptionModel, self).get_output(input_blob)[..., 1:1001]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to ImageNet validation subset images dir, ILSVRC2012_img_val dir")
parser.add_argument("--img_cls_file", help="path to file with classes ids for images, download it here:"
"https://github.com/opencv/opencv_extra/tree/master/testdata/dnn/img_classes_inception.txt")
parser.add_argument("--model", help="path to tensorflow model, download it here:"
"https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip")
parser.add_argument("--log", help="path to logging file")
parser.add_argument("--batch_size", help="size of images in batch", default=1)
parser.add_argument("--frame_size", help="size of input image", default=224)
parser.add_argument("--in_blob", help="name for input blob", default='input')
parser.add_argument("--out_blob", help="name for output blob", default='softmax2')
args = parser.parse_args()
data_fetcher = MeanValueFetch(args.frame_size, args.imgs_dir, True)
frameworks = [TensorflowModel(args.model, args.in_blob, args.out_blob),
DnnTfInceptionModel(args.model, '', args.out_blob)]
acc_eval = ClsAccEvaluation(args.log, args.img_cls_file, args.batch_size)
acc_eval.process(frameworks, data_fetcher)
| 44.038462
| 144
| 0.694905
|
0ab295f9ae586673bd229b43e646d61b56eb1048
| 3,799
|
py
|
Python
|
tests/test_loader.py
|
gordonwatts/func_adl_servicex_type_generator
|
2040b008460f0332a82fdd970fa9e4946990fe48
|
[
"MIT"
] | null | null | null |
tests/test_loader.py
|
gordonwatts/func_adl_servicex_type_generator
|
2040b008460f0332a82fdd970fa9e4946990fe48
|
[
"MIT"
] | 2
|
2022-01-23T20:27:59.000Z
|
2022-01-25T23:34:34.000Z
|
tests/test_loader.py
|
gordonwatts/func_adl_servicex_type_generator
|
2040b008460f0332a82fdd970fa9e4946990fe48
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from func_adl_servicex_type_generator.loader import load_yaml
def test_load_full_file():
data = load_yaml(Path("./tests/xaod_r21_1.yaml"))
collection_dict = {c.name: c for c in data.collections}
classes_dict = {c.name: c for c in data.classes}
assert "DiTauJets" in collection_dict
assert "xAOD.Jet_v1" in classes_dict
di_jets = collection_dict["DiTauJets"]
# jets = collection_dict["Jets"]
jets_class = classes_dict["xAOD.Jet_v1"]
btagging = classes_dict["xAOD.BTagging_v1"]
truth = classes_dict["xAOD.TruthParticle_v1"]
event_info = collection_dict["EventInfo"]
element_link = classes_dict["ElementLink_DataVector_xAOD_BTagging_v1__"]
assert di_jets.name == "DiTauJets"
assert di_jets.collection_item_type == "xAOD.DiTauJet_v1"
assert di_jets.collection_type == "Iterable[xAOD.DiTauJet_v1]"
assert di_jets.collection_item_type_name == "DiTauJet_v1"
assert di_jets.cpp_item_type == "xAOD::DiTauJet_v1"
assert di_jets.cpp_collection_type == "DataVector<xAOD::DiTauJet_v1>"
assert jets_class.name == "xAOD.Jet_v1"
assert len(jets_class.methods) > 0
pt_methods = [m for m in jets_class.methods if m.name == "pt"]
assert len(pt_methods) == 1
assert pt_methods[0].return_type == "double"
assert len(pt_methods[0].arguments) == 0
assert len(pt_methods[0].param_arguments) == 0
attr_methods = [m for m in jets_class.methods if m.name == "getAttribute"]
assert len(attr_methods) == 1
assert attr_methods[0].return_type == "U"
assert len(attr_methods[0].param_arguments) == 1
assert attr_methods[0].param_arguments[0].arg_type == "cpp_type[U]"
assert attr_methods[0].param_helper == "type_support.index_type_forwarder"
calc_llr = [m for m in btagging.methods if m.name == "calcLLR"]
assert len(calc_llr) == 1
assert len(calc_llr[0].arguments) == 2
assert calc_llr[0].arguments[0].arg_type == "float"
assert len(event_info.cpp_include_file) == 1
assert event_info.link_libraries == ["xAODEventInfo"]
decayVtx = [m for m in truth.methods if m.name == "decayVtx"]
assert len(decayVtx) == 1
assert decayVtx[0].return_type == "const xAOD::TruthVertex_v1*"
assert len(element_link.behaviors) == 1
assert element_link.behaviors[0] == "xAOD::BTagging_v1**"
assert "sys_error_tool" in data.metadata
m_sys = data.metadata["sys_error_tool"]
assert isinstance(m_sys.data, list)
# assert len(jets.parameters) == 2
# jets_p = jets.parameters[0]
# assert jets_p.name == "calibration"
# assert jets_p.type == "str"
# assert jets_p.default_value == "'NOSYS'"
# assert len(jets_p.actions) == 2
# jets_a = jets_p.actions[1]
# assert jets_a.value == "'*Any*'"
# assert jets_a.md_names == [
# "sys_error_tool",
# "pileup_tool",
# "jet_corrections",
# "add_calibration_to_job",
# ]
# assert jets_a.bank_rename == "{bank_name}Calib_{calibration}"
assert len(data.files) > 0
trigger_list = [f for f in data.files if f.file_name == "trigger.py"]
assert len(trigger_list) == 1
trigger = trigger_list[0]
assert len(trigger.init_lines) == 2
assert len(trigger.contents) > 0
assert trigger.contents[0].startswith("#")
def test_load_container_types():
data = load_yaml(Path("./tests/xaod_r21_1.yaml"))
classes_dict = {c.name: c for c in data.classes}
non_container = classes_dict["xAOD.Jet_v1"]
container = classes_dict["xAOD.JetConstituentVector"]
assert non_container.cpp_container_type is None
assert non_container.python_container_type is None
assert container.cpp_container_type == "xAOD::JetConstituent*"
assert container.python_container_type == "xAOD.JetConstituent"
| 37.613861
| 78
| 0.697815
|
48b6e760beb3051277b46e80518865e10bf712d7
| 13,938
|
py
|
Python
|
py3.8/multiprocess/util.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 356
|
2015-06-21T21:05:10.000Z
|
2022-03-30T11:57:08.000Z
|
py3.8/multiprocess/util.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 103
|
2015-06-22T01:44:14.000Z
|
2022-03-01T03:44:25.000Z
|
py3.8/multiprocess/util.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 72
|
2015-09-02T14:10:24.000Z
|
2022-03-25T06:49:43.000Z
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import os
import itertools
import sys
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from . import process
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocess'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocess
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
# Abstract socket support
def _platform_supports_abstract_sockets():
if sys.platform == "linux":
return True
if hasattr(sys, 'getandroidapilevel'):
return True
return False
def is_abstract_socket_namespace(address):
if not address:
return False
if isinstance(address, bytes):
return address[0] == 0
elif isinstance(address, str):
return address[0] == "\0"
raise TypeError('address type of {address!r} unrecognized')
abstract_sockets_supported = _platform_supports_abstract_sockets()
#
# Function returning a temp directory which will be removed on exit
#
def _remove_temp_dir(rmtree, tempdir):
rmtree(tempdir)
current_process = process.current_process()
# current_process() can be None if the finalizer is called
# late during Python finalization
if current_process is not None:
current_process._config['tempdir'] = None
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
tempdir = process.current_process()._config.get('tempdir')
if tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
# keep a strong reference to shutil.rmtree(), since the finalizer
# can be called late during Python shutdown
Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
exitpriority=-100)
process.current_process()._config['tempdir'] = tempdir
return tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
if (exitpriority is not None) and not isinstance(exitpriority,int):
raise TypeError(
"Exitpriority ({0!r}) must be None or int, not {1!s}".format(
exitpriority, type(exitpriority)))
if obj is not None:
self._weakref = weakref.ref(obj, self)
elif exitpriority is None:
raise ValueError("Without object, exitpriority cannot be None")
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<%s object, dead>' % self.__class__.__name__
x = '<%s object, callback=%s' % (
self.__class__.__name__,
getattr(self._callback, '__name__', self._callback))
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitpriority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0] is not None
else:
f = lambda p : p[0] is not None and p[0] >= minpriority
# Careful: _finalizer_registry may be mutated while this function
# is running (either by a GC run or by another thread).
# list(_finalizer_registry) should be atomic, while
# list(_finalizer_registry.items()) is not.
keys = [key for key in list(_finalizer_registry) if f(key)]
keys.sort(reverse=True)
for key in keys:
finalizer = _finalizer_registry.get(key)
# key may have been removed from the registry
if finalizer is not None:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=process.active_children,
current_process=process.current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p.daemon:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
#
# Close fds except those specified
#
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
MAXFD = 256
def close_all_fds_except(fds):
fds = list(fds) + [-1, MAXFD]
fds.sort()
assert fds[-1] == MAXFD, 'fd too large'
for i in range(len(fds) - 1):
os.closerange(fds[i]+1, fds[i+1])
#
# Close sys.stdin and replace stdin with os.devnull
#
def _close_stdin():
if sys.stdin is None:
return
try:
sys.stdin.close()
except (OSError, ValueError):
pass
try:
fd = os.open(os.devnull, os.O_RDONLY)
try:
sys.stdin = open(fd, closefd=False)
except:
os.close(fd)
raise
except (OSError, ValueError):
pass
#
# Flush standard streams, if any
#
def _flush_std_streams():
try:
sys.stdout.flush()
except (AttributeError, ValueError):
pass
try:
sys.stderr.flush()
except (AttributeError, ValueError):
pass
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
import _posixsubprocess
passfds = tuple(sorted(map(int, passfds)))
errpipe_read, errpipe_write = os.pipe()
try:
return _posixsubprocess.fork_exec(
args, [os.fsencode(path)], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
False, False, None)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
def close_fds(*fds):
"""Close each file descriptor given as an argument"""
for fd in fds:
os.close(fd)
def _cleanup_tests():
"""Cleanup multiprocessing resources when multiprocessing tests
completed."""
from test import support
# cleanup multiprocessing
process._cleanup()
# Stop the ForkServer process if it's running
from multiprocess import forkserver
forkserver._forkserver._stop()
# Stop the ResourceTracker process if it's running
from multiprocess import resource_tracker
resource_tracker._resource_tracker._stop()
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir().
_run_finalizers()
support.gc_collect()
support.reap_children()
| 28.444898
| 79
| 0.628856
|
0f731c83ef0bbc42376119971e1ceab1e5253c82
| 3,622
|
py
|
Python
|
cfitall/utils.py
|
wryfi/cfitall
|
f933aae6bd1c42abd6af2bb06656cf37b4ee2888
|
[
"MIT"
] | 1
|
2022-02-24T00:01:06.000Z
|
2022-02-24T00:01:06.000Z
|
cfitall/utils.py
|
wryfi/cfitall
|
f933aae6bd1c42abd6af2bb06656cf37b4ee2888
|
[
"MIT"
] | 8
|
2021-06-02T16:11:34.000Z
|
2022-03-15T23:18:07.000Z
|
cfitall/utils.py
|
wryfi/cfitall
|
f933aae6bd1c42abd6af2bb06656cf37b4ee2888
|
[
"MIT"
] | 1
|
2021-11-06T22:20:23.000Z
|
2021-11-06T22:20:23.000Z
|
"""
This module is full of little recursive functions that help with
converting string-path notations into nested dicts, and vice
versa. A string-path, or "flattened" dict might look like this:
{'foo.bar.bat': 'asdfhjklkjhfdsa'}
As an "expanded" or "nested" dict, the same data would be:
{'foo': {'bar': {'bat': 'lkjhgfdsasdfghjkl'}}}
"""
from collections import Mapping
def add_keys(destdict, srclist, value=None):
"""
Nests keys from srclist into destdict, with optional value set on the final key.
:param dict destdict: the dict to update with values from srclist
:param list srclist: list of keys to add to destdict
:param value: final value to set
:return: destination dictionary
:rtype: dict
"""
if len(srclist) > 1:
destdict[srclist[0]] = {}
destdict[srclist[0]] = destdict.get(srclist[0], {})
add_keys(destdict[srclist[0]], srclist[1:], value)
else:
destdict[srclist[0]] = value
return destdict
def expand_flattened_path(flattened_path, value=None, separator='.'):
"""
Expands a dotted path into a nested dict; if value is set, the
final key in the path will be set to value.
:param str flattened_path: flattened path to expand
:param str separator: character(s) separating path components
:param value: set final key to this value
:return: nested dictionary
:rtype: dict
"""
split_list = flattened_path.split(separator)
return add_keys({}, split_list, value)
def flatten_dict(nested):
"""
Flattens a deeply nested dictionary into a flattened dictionary.
For example `{'foo': {'bar': 'baz'}}` would be flattened to
`{'foo.bar': 'baz'}`.
:param dict nested: nested dictionary of configuration data
:rtype: dict
:return: dict of key-value pairs
"""
flattened = {}
for key, value in nested.items():
if isinstance(value, Mapping):
for subkey, subval in value.items():
newkey = '.'.join([key, subkey])
flattened[newkey] = subval
flatten_dict(flattened)
else:
flattened[key] = value
mappings = [isinstance(value, Mapping) for key, value in flattened.items()]
if len(set(mappings)) == 1 and set(mappings).pop() is False:
return flattened
else:
return flatten_dict(flattened)
def merge_dicts(source, destination):
"""
Performs a deep merge of two nested dicts by expanding all Mapping objects
until they reach a non-mapping value (e.g. a list, string, int, etc.) and
copying these from the source to the destination.
:param dict source: the source dictionary to copy values from
:param dict destination: the dictionary to update with values from source
:return: destination
:rtype: dict
"""
for key, value in source.items():
key = key.lower() if isinstance(key, str) else key
if isinstance(value, Mapping):
node = destination.setdefault(key, {})
merge_dicts(value, node)
else:
destination[key] = value
return destination
def expand_flattened_dict(flattened, separator='.'):
"""
Expands a flattened dict into a nested dict.
:param dict flattened: the flattened dict to expand
:param str separator: character used for separating paths in flattened dict
:return: nested dict
:rtype: dict
"""
merged = {}
for key, value in flattened.items():
expanded = expand_flattened_path(key, value=value, separator=separator)
merged = merge_dicts(merged, expanded)
return merged
| 32.630631
| 84
| 0.6582
|
043b47b968d827a80ba23b13bb03e7ef81191cf1
| 13,358
|
py
|
Python
|
official/core/base_trainer_test.py
|
lightbooster/models
|
833900b085a353712010c26c66ae9111246b5ac7
|
[
"Apache-2.0"
] | 2
|
2021-03-30T12:57:54.000Z
|
2021-03-30T13:18:48.000Z
|
official/core/base_trainer_test.py
|
lightbooster/models
|
833900b085a353712010c26c66ae9111246b5ac7
|
[
"Apache-2.0"
] | 2
|
2021-03-24T23:09:46.000Z
|
2021-03-25T18:42:10.000Z
|
official/core/base_trainer_test.py
|
lightbooster/models
|
833900b085a353712010c26c66ae9111246b5ac7
|
[
"Apache-2.0"
] | 1
|
2021-05-08T09:22:42.000Z
|
2021-05-08T09:22:42.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.core.trainers.trainer."""
# pylint: disable=g-direct-tensorflow-import
import multiprocessing
import os
import sys
from absl.testing import parameterized
import numpy as np
import portpicker
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import base_trainer as trainer_lib
from official.core import config_definitions as cfg
from official.core import train_lib
from official.utils.testing import mock_task
TPU_TEST = 'test_tpu' in sys.argv[0]
GPU_TEST = 'test_gpu' in sys.argv[0]
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],)
def create_in_process_cluster(num_workers, num_ps):
"""Creates and starts local servers and returns the cluster_resolver."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports]
if num_ps > 0:
cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports]
cluster_spec = tf.train.ClusterSpec(cluster_dict)
# Workers need some inter_ops threads to work properly.
worker_config = tf.compat.v1.ConfigProto()
if multiprocessing.cpu_count() < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
for i in range(num_workers):
tf.distribute.Server(
cluster_spec,
job_name='worker',
task_index=i,
config=worker_config,
protocol='grpc')
for i in range(num_ps):
tf.distribute.Server(
cluster_spec, job_name='ps', task_index=i, protocol='grpc')
cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec, rpc_layer='grpc')
return cluster_resolver
def dataset_fn(input_context=None):
del input_context
def dummy_data(_):
return tf.zeros((1, 1), dtype=tf.float32)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class MockAsyncTrainer(trainer_lib._AsyncTrainer):
"""Mock AsyncTrainer to test the _AsyncTrainer class."""
def __init__(self):
self._strategy = tf.distribute.get_strategy()
self.init_async()
self.global_step = tf.Variable(
0,
dtype=tf.int64,
name='global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
self.eval_global_step = tf.Variable(
0,
dtype=tf.int64,
name='eval_global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
train_dataset = self.distribute_dataset(dataset_fn)
trainer_lib.orbit.StandardTrainer.__init__(
self, train_dataset, options=trainer_lib.orbit.StandardTrainerOptions())
eval_dataset = self.distribute_dataset(dataset_fn)
trainer_lib.orbit.StandardEvaluator.__init__(
self,
eval_dataset,
options=trainer_lib.orbit.StandardEvaluatorOptions(
use_tf_while_loop=True))
def train_loop_begin(self):
self.global_step.assign(0)
def train_step(self, iterator):
def replica_step(_):
self.global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def train_loop_end(self):
self.join()
return self.global_step.numpy()
def eval_begin(self):
self.eval_global_step.assign(0)
def eval_step(self, iterator):
def replica_step(_):
self.eval_global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def eval_end(self):
self.join()
return self.eval_global_step.numpy()
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
def create_test_trainer(self, config, model_dir=None, task=None):
task = task or mock_task.MockTask(config.task, logging_dir=model_dir)
ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(config.trainer.optimizer_config,
config.runtime),
checkpoint_exporter=ckpt_exporter)
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
def test_base_async_trainer(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
trainer = MockAsyncTrainer()
trainer.init_async()
self.assertIsInstance(
trainer._coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator)
self.assertEqual(trainer.train(tf.constant(10)), 10)
self.assertEqual(trainer.evaluate(tf.constant(11)), 11)
def test_async_trainer_train(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
def test_async_trainer_validate(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('acc', logs)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate_without_loss(self, distribution):
class MockTaskWithoutValidationLoss(mock_task.MockTask):
def validation_step(self, inputs, model, metrics=None):
# Disable validation loss.
logs = super().validation_step(inputs, model)
del logs[self.loss]
return logs
with distribution.scope():
task = MockTaskWithoutValidationLoss()
trainer = self.create_test_trainer(self._config, task=task)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertNotIn('validation_loss', logs)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
},
'use_experimental_api': {
'type': False
},
})))
trainer = self.create_test_trainer(config)
if mixed_precision_dtype != 'float16':
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
elif mixed_precision_dtype == 'float16' and loss_scale is None:
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
else:
self.assertIsInstance(trainer.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer)
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
def test_export_best_ckpt(self):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='acc',
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
self.assertTrue(
tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))
def test_recovery(self):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
loss_upper_bound=0.5,
recovery_max_trials=2,
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
checkpoint_manager = tf.train.CheckpointManager(
trainer.checkpoint, self.get_temp_dir(), max_to_keep=2)
checkpoint_manager.save()
trainer.add_recovery(config.trainer, checkpoint_manager=checkpoint_manager)
before_weights = trainer.model.get_weights()
_ = trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
# The training loss is 1.0 and upper_bound is 0.5, so the recover happens.
after_weights = trainer.model.get_weights()
for left, right in zip(before_weights, after_weights):
self.assertAllEqual(left, right)
# Let's the loss be NaN and max_trials = 0 to see RuntimeError.
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
recovery_max_trials=0,
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
task = mock_task.MockTask(config.task, logging_dir=model_dir)
def build_losses(labels, model_outputs, aux_losses=None):
del labels, model_outputs
return tf.constant([np.nan], tf.float32) + aux_losses
task.build_losses = build_losses
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(config.trainer.optimizer_config,
config.runtime))
trainer.add_recovery(config.trainer, checkpoint_manager=checkpoint_manager)
with self.assertRaises(RuntimeError):
_ = trainer.train(tf.convert_to_tensor(2, dtype=tf.int32))
if __name__ == '__main__':
tf.test.main()
| 35.716578
| 80
| 0.679593
|
bc5559eb9cc910855c46e491c48065222e2878f3
| 94
|
py
|
Python
|
selfdrive/hardware/__init__.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 116
|
2018-03-07T09:00:10.000Z
|
2020-04-06T18:37:45.000Z
|
selfdrive/hardware/__init__.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 49
|
2018-09-14T19:05:27.000Z
|
2020-03-30T11:46:54.000Z
|
selfdrive/hardware/__init__.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 143
|
2018-10-12T18:21:21.000Z
|
2020-04-06T00:16:36.000Z
|
import os
EON = os.path.isfile('/EON')
TICI = os.path.isfile('/TICI')
PC = not (EON or TICI)
| 15.666667
| 30
| 0.638298
|
1ae97b8d3967e276606fa6fe02c2a9c28d13bc3f
| 30,280
|
py
|
Python
|
test/codes_tests/test_bse.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | 1
|
2019-12-28T22:47:51.000Z
|
2019-12-28T22:47:51.000Z
|
test/codes_tests/test_bse.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | null | null | null |
test/codes_tests/test_bse.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | 2
|
2021-11-19T04:41:37.000Z
|
2021-11-20T02:11:17.000Z
|
from amuse.community.bse.interface import BSE, BSEInterface
from amuse.test.amusetest import TestWithMPI
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import Particles
import numpy
class TestBSEInterface(TestWithMPI):
class state(object):
def __init__(self):
self.type1 = 0.0
self.type2 = 0.0
self.initial_mass1 = 0.0
self.initial_mass2 = 0.0
self.mass1 = 0.0
self.mass2 = 0.0
self.radius1 = 0.0
self.radius2 = 0.0
self.luminosity1 = 0.0
self.luminosity2 = 0.0
self.core_mass1 = 0.0
self.core_mass2 = 0.0
self.core_radius1 = 0.0
self.core_radius2 = 0.0
self.envelope_mass1 = 0.0
self.envelope_mass2 = 0.0
self.envelope_radius1 = 0.0
self.envelope_radius2 = 0.0
self.spin1 = 0.0
self.spin2 = 0.0
self.epoch1 = 0.0
self.epoch2 = 0.0
self.t_ms1 = 0.0
self.t_ms2 = 0.0
self.bse_age = 0.0
self.orbital_period = 0.0
self.eccentricity = 0.0
def test1(self):
print "Test initialization..."
instance = BSEInterface()
metallicity = 0.02
neta = 0.5
bwind = 0.0
hewind = 0.5
alpha1 = 1.0
CElambda = 0.5
ceflag = 0
tflag = 1
ifflag = 0
wdflag = 1
bhflag = 0
nsflag = 1
mxns = 3.0
idum = 29769
pts1 = 0.05
pts2 = 0.01
pts3 = 0.02
sigma = 190.0
beta = 1.0/8.0
xi = 1.0
acc2 = 3.0/2.0
epsnov = 0.001
eddfac = 1.0
gamma = -1.0
status = instance.initialize(metallicity,
neta, bwind, hewind, alpha1, CElambda,
ceflag, tflag, ifflag, wdflag, bhflag,
nsflag, mxns, idum, pts1, pts2, pts3,
sigma,beta,xi,acc2,epsnov,eddfac,gamma)
self.assertEqual(status,0)
instance.stop()
def test2(self):
print "Test basic operations (legacy functions evolve & get_time_step)..."
instance = BSEInterface()
status = instance.initialize(0.02, 0.5, 0.0, 0.5, 1.0, 0.5, 0, 1, 0, 1, 0, 1, 3.0,
29769, 0.05, 0.01, 0.02, 190.0, 1.0/8.0, 1.0, 3.0/2.0, 0.001, 1.0, -1.0)
new_state = self.state()
new_state.mass1 = 3.0
new_state.mass2 = 1.0
new_state.initial_mass1 = 3.0
new_state.initial_mass2 = 1.0
new_state.type1 = 1.0
new_state.type2 = 1.0
new_state.end_time = 1e-06
new_state.orbital_period = 200.0
new_state.eccentricity = 0.5
result = instance.evolve_binary(
new_state.type1,new_state.type2,new_state.initial_mass1,new_state.initial_mass2,
new_state.mass1, new_state.mass2, new_state.radius1, new_state.radius2,
new_state.luminosity1, new_state.luminosity2, new_state.core_mass1,
new_state.core_mass2, new_state.core_radius1, new_state.core_radius2,
new_state.envelope_mass1, new_state.envelope_mass2, new_state.envelope_radius1,
new_state.envelope_radius2, new_state.spin1, new_state.spin2, new_state.epoch1,
new_state.epoch2, new_state.t_ms1, new_state.t_ms2, new_state.bse_age,
new_state.orbital_period, new_state.eccentricity, new_state.end_time
)
updated_state = self.state()
(updated_state.type1,updated_state.type2,updated_state.initial_mass1,updated_state.initial_mass2,
updated_state.mass1, updated_state.mass2, updated_state.radius1, updated_state.radius2,
updated_state.luminosity1, updated_state.luminosity2, updated_state.core_mass1,
updated_state.core_mass2, updated_state.core_radius1, updated_state.core_radius2,
updated_state.envelope_mass1,updated_state.envelope_mass2,updated_state.envelope_radius1,
updated_state.envelope_radius2, updated_state.spin1, updated_state.spin2,
updated_state.epoch1, updated_state.epoch2, updated_state.t_ms1, updated_state.t_ms2,
updated_state.bse_age, updated_state.orbital_period,
updated_state.eccentricity, updated_state.end_time) = result
expected = {
'radius2' : '0x1.c6c8a1c793bcep-1',
'luminosity2' : '0x1.653b1b2d0333bp-1',
'core_mass2' : '0x0.0p+0',
'bse_age' : '0x1.0c6f7a0b5ed8dp-20',
'end_time' : '0x1.0c6f7a0b5ed8dp-20',
'envelope_mass2' : '0x1.0d6fc100ab510p-5',
'mass2' : '0x1.0000000000000p+0',
'initial_mass2' : '0x1.0000000000000p+0',
'envelope_radius2' : '0x1.db27631ba0e5ap-3',
'core_radius2' : '0x0.0p+0',
'epoch2' : '0x0.0p+0',
't_ms2' : '0x1.57d90abe54643p+13',
'spin2' : '0x1.07413b0522aebp+10',
};
for x in expected:
#print "'%s' : '%s'," % (x, getattr(updated_state, x).hex())
self.assertAlmostRelativeEqual(float.fromhex(expected[x]),getattr(updated_state, x))
self.assertEquals(updated_state.end_time, 1e-06)
dt = instance.get_time_step(updated_state.type1, updated_state.type2,
updated_state.initial_mass1, updated_state.initial_mass2, updated_state.mass1,
updated_state.mass2, updated_state.t_ms1, updated_state.t_ms2,
updated_state.epoch1, updated_state.epoch2, updated_state.bse_age)
self.assertAlmostEqual(dt, 18.8768, 3)
instance.stop()
def test3(self):
print "Test whether the interface can handle arrays..."
instance = BSEInterface()
status = instance.initialize(0.02, 0.5, 0.0, 0.5, 1.0, 0.5, 0, 1, 0, 1, 0, 1, 3.0,
29769, 0.05, 0.01, 0.02, 190.0, 1.0/8.0, 1.0, 3.0/2.0, 0.001, 1.0, -1.0)
masses1 = [10.0,5.0,4.0]
masses2 = [1.0,1.0,1.0]
types1 = types2 = [1,1,1]
orbital_periods = [100.0,200.0,300.0]
eccentricities = [0.5,0.6,0.7]
radii1 = luminosity1 = core_mass1 = core_radius1 = envelope_mass1 =\
envelope_radius1 = spin1 = epoch1 = t_ms1 = [0.0,0.0,0.0]
radii2 = luminosity2 = core_mass2 = core_radius2 = envelope_mass2 =\
envelope_radius2 = spin2 = epoch2 = t_ms2 = [0.0,0.0,0.0]
init_mass1 = masses1
init_mass2 = masses2
bse_age = [0.0,0.0,0.0]
end_time = [10.0, 10.0, 10.0]
result = instance.evolve_binary(
types1, types2, init_mass1, init_mass2,
masses1, masses2, radii1, radii2,
luminosity1, luminosity2, core_mass1, core_mass2,
core_radius1, core_radius2, envelope_mass1, envelope_mass2,
envelope_radius1, envelope_radius2, spin1, spin2,
epoch1, epoch2, t_ms1, t_ms2,
bse_age, orbital_periods, eccentricities, end_time
)
self.assertAlmostEqual(result['mass1'][0], 9.977, 2)
self.assertAlmostEqual(result['mass1'][1], 5.0, 2)
self.assertAlmostEqual(result['mass1'][2], 4.0, 2)
instance.stop()
def test4(self):
print "Test large number of particles..."
number_of_particles = 2000
instance = BSEInterface()
status = instance.initialize(0.02, 0.5, 0.0, 0.5, 1.0, 0.5, 0, 1, 0, 1, 0, 1, 3.0,
29769, 0.05, 0.01, 0.02, 190.0, 1.0/8.0, 1.0, 3.0/2.0, 0.001, 1.0, -1.0)
masses1 = [1.0 + ((x / 1.0*number_of_particles) * 10.0) for x in range(1,number_of_particles+1)]
masses2 = [2.0 + ((x / 1.0*number_of_particles) * 5.0) for x in range(1,number_of_particles+1)]
orbital_periods = [100.0 + ((x / 1.0*number_of_particles) * 900.0) for x in range(1,number_of_particles+1)]
eccentricities = [0.5 + ((x / 1.0*number_of_particles) * 0.4) for x in range(1,number_of_particles+1)]
types1 = types2 = [1 for x in range(1,number_of_particles+1)]
radii1 = luminosity1 = core_mass1 = core_radius1 = envelope_mass1 =\
envelope_radius1 = spin1 = epoch1 = t_ms1 =\
radii2 = luminosity2 = core_mass2 = core_radius2 = envelope_mass2 =\
envelope_radius2 = spin2 = epoch2 = t_ms2 =\
bse_age = [0.0 for x in range(1,number_of_particles+1)]
end_time = [1.0 for x in range(1,number_of_particles+1)]
init_mass1 = masses1
init_mass2 = masses2
result = instance.evolve_binary(
types1, types2, init_mass1, init_mass2,
masses1, masses2, radii1, radii2,
luminosity1, luminosity2, core_mass1, core_mass2,
core_radius1, core_radius2, envelope_mass1, envelope_mass2,
envelope_radius1, envelope_radius2, spin1, spin2,
epoch1, epoch2, t_ms1, t_ms2,
bse_age, orbital_periods, eccentricities, end_time
)
self.assertEquals(len(result['mass1']), number_of_particles)
instance.stop()
class TestBSE(TestWithMPI):
def test1(self):
print "Testing evolution of a close binary system..."
instance = BSE()
instance.initialize_code()
instance.parameters.metallicity = 0.001
instance.parameters.common_envelope_efficiency = 3.0
instance.parameters.Eddington_mass_transfer_limit_factor = 10.0
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
orbital_period = 200.0 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_bse_to_model = instance.particles.new_channel_to(stars)
from_bse_to_model.copy()
from_bse_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_bse_to_model_binaries.copy()
previous_type = binary.child1.stellar_type
results = []
current_time = 0 | units.Myr
while current_time < (480 | units.Myr):
instance.update_time_steps()
# The next line appears a bit weird, but saves time for this simple test.
current_time = current_time + max(5.0*instance.binaries[0].time_step, 0.3 | units.Myr)
instance.evolve_model(current_time)
from_bse_to_model.copy()
from_bse_to_model_binaries.copy()
if not binary.child1.stellar_type == previous_type:
results.append((binary.age, binary.child1.mass, binary.child1.stellar_type))
previous_type = binary.child1.stellar_type
self.assertEqual(len(results), 6)
types = (
"Hertzsprung Gap",
"First Giant Branch",
"Core Helium Burning",
"First Asymptotic Giant Branch",
"Hertzsprung Gap Naked Helium star",
"Carbon/Oxygen White Dwarf",
)
for result, expected in zip(results, types):
self.assertEquals(str(result[2]), expected)
times = (
284.8516 | units.Myr,
287.0595 | units.Myr,
287.7848 | units.Myr,
331.1454 | units.Myr,
331.3983 | units.Myr,
332.2786 | units.Myr,
)
for result, expected in zip(results, times):
self.assertAlmostEqual(result[0].value_in(units.Myr), expected.value_in(units.Myr), 0)
masses = (
3.000 | units.MSun,
3.000 | units.MSun,
2.999 | units.MSun,
2.956 | units.MSun,
0.888 | units.MSun,
0.707 | units.MSun,
)
for result, expected in zip(results, masses):
self.assertAlmostEqual(result[1].value_in(units.MSun), expected.value_in(units.MSun), 2)
instance.stop()
def test2(self):
print "Testing evolution of a wide binary system."
instance = BSE()
instance.parameters.metallicity = 0.001
instance.parameters.common_envelope_efficiency = 3.0
instance.parameters.Eddington_mass_transfer_limit_factor = 10.0
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
orbital_period = 2.0e5 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_bse_to_model = instance.particles.new_channel_to(stars)
from_bse_to_model.copy()
from_bse_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_bse_to_model_binaries.copy()
previous_type = binary.child1.stellar_type
results = []
current_time = 0 | units.Myr
while current_time < (335 | units.Myr):
instance.update_time_steps()
# The next line appears a bit weird, but saves time for this simple test.
current_time = current_time + max(2.0*instance.binaries[0].time_step, 0.04 | units.Myr)
instance.evolve_model(current_time)
from_bse_to_model.copy()
from_bse_to_model_binaries.copy()
if not binary.child1.stellar_type == previous_type:
results.append((binary.age, binary.child1.mass, binary.child1.stellar_type))
previous_type = binary.child1.stellar_type
print results
self.assertEqual(len(results), 6)
times = (
284.8516 | units.Myr,
287.0595 | units.Myr,
287.7848 | units.Myr,
331.1454 | units.Myr,
332.7407 | units.Myr,
333.4146 | units.Myr
)
for result, expected in zip(results, times):
self.assertAlmostEqual(result[0].value_in(units.Myr), expected.value_in(units.Myr), 0)
masses = (
3.000 | units.MSun,
3.000 | units.MSun,
2.999 | units.MSun,
2.956 | units.MSun,
2.919 | units.MSun,
0.928 | units.MSun
)
for result, expected in zip(results, masses):
self.assertAlmostEqual(result[1].value_in(units.MSun), expected.value_in(units.MSun), 2)
types = (
"Hertzsprung Gap",
"First Giant Branch",
"Core Helium Burning",
"First Asymptotic Giant Branch",
"Second Asymptotic Giant Branch",
"Carbon/Oxygen White Dwarf",
)
for result, expected in zip(results, types):
self.assertEquals(str(result[2]), expected)
instance.stop()
def test3(self):
print "Testing standard BSE example 2..."
instance = BSE()
instance.parameters.common_envelope_efficiency = 3.0
instance.parameters.Eddington_mass_transfer_limit_factor = 10.0
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 7.816 | units.MSun
stars[1].mass = 4.387 | units.MSun
orbital_period = 1964.18453 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.0
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_bse_to_model = instance.particles.new_channel_to(stars)
from_bse_to_model.copy()
from_bse_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_bse_to_model_binaries.copy()
previous_type1 = binary.child1.stellar_type
previous_type2 = binary.child2.stellar_type
results = []
current_time = 0 | units.Myr
while current_time < (170 | units.Myr):
instance.update_time_steps()
# The next line appears a bit weird, but saves time for this simple test.
current_time = current_time + max(2.0*instance.binaries[0].time_step, 0.04 | units.Myr)
instance.evolve_model(current_time)
from_bse_to_model.copy()
from_bse_to_model_binaries.copy()
if not (binary.child1.stellar_type == previous_type1 and binary.child2.stellar_type == previous_type2):
results.append((binary.age, str(binary.child1.stellar_type)+" and "+str(binary.child2.stellar_type)))
previous_type1 = binary.child1.stellar_type
previous_type2 = binary.child2.stellar_type
print '\n'.join(map(str, results))
self.assertEqual(len(results), 13)
times = (
38.9708 | units.Myr,
39.0897 | units.Myr,
39.1213 | units.Myr,
43.8025 | units.Myr,
43.9923 | units.Myr,
44.0686 | units.Myr,
141.7077 | units.Myr,
142.3448 | units.Myr,
142.7827 | units.Myr,
166.1043 | units.Myr,
166.5795 | units.Myr,
166.9627 | units.Myr,
166.9863 | units.Myr
)
for result, expected in zip(results, times):
self.assertAlmostEqual(result[0].value_in(units.Myr), expected.value_in(units.Myr), 0)
types = (
"Hertzsprung Gap and Main Sequence star",
"First Giant Branch and Main Sequence star",
"Core Helium Burning and Main Sequence star",
"First Asymptotic Giant Branch and Main Sequence star",
"Second Asymptotic Giant Branch and Main Sequence star",
"Oxygen/Neon White Dwarf and Main Sequence star",
"Oxygen/Neon White Dwarf and Hertzsprung Gap",
"Oxygen/Neon White Dwarf and First Giant Branch",
"Oxygen/Neon White Dwarf and Core Helium Burning",
"Oxygen/Neon White Dwarf and First Asymptotic Giant Branch",
"Oxygen/Neon White Dwarf and Hertzsprung Gap Naked Helium star",
"Neutron Star and Hertzsprung Gap Naked Helium star",
"Neutron Star and Carbon/Oxygen White Dwarf",
)
for result, expected in zip(results, types):
self.assertEquals(result[1], expected)
self.assertAlmostEqual(binary.child1.mass.value_in(units.MSun), 1.304, 3)
self.assertAlmostEqual(binary.child2.mass.value_in(units.MSun), 0.800, 3)
instance.stop()
def test4(self):
print "Quick testing standard BSE example 2..."
instance = BSE()
instance.parameters.common_envelope_efficiency = 3.0
instance.parameters.Eddington_mass_transfer_limit_factor = 10.0
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 7.816 | units.MSun
stars[1].mass = 4.387 | units.MSun
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
orbital_period = 1964.18453 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.0
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_bse_to_model = instance.particles.new_channel_to(stars)
from_bse_to_model.copy()
from_bse_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_bse_to_model_binaries.copy()
instance.evolve_model(170 | units.Myr)
from_bse_to_model.copy()
from_bse_to_model_binaries.copy()
self.assertAlmostEqual(binary.child1.mass.value_in(units.MSun), 1.304, 3)
self.assertAlmostEqual(binary.child2.mass.value_in(units.MSun), 0.800, 3)
self.assertEquals(str(binary.child1.stellar_type), "Neutron Star")
self.assertEquals(str(binary.child2.stellar_type), "Carbon/Oxygen White Dwarf")
instance.stop()
def test5(self):
print "Testing stellar collision..."
instance = BSE()
instance.parameters.common_envelope_efficiency = 3.0
instance.parameters.Eddington_mass_transfer_limit_factor = 10.0
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
orbital_period = 200.0 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.99
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_bse_to_model = instance.particles.new_channel_to(stars)
from_bse_to_model.copy()
from_bse_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_bse_to_model_binaries.copy()
instance.evolve_model(170 | units.Myr)
from_bse_to_model.copy()
from_bse_to_model_binaries.copy()
print binaries
self.assertAlmostEqual(binary.child1.mass.value_in(units.MSun), 3.300, 3)
self.assertAlmostEqual(binary.child2.mass.value_in(units.MSun), 0.000, 3)
self.assertEquals(str(binary.child1.stellar_type), "Main Sequence star")
self.assertEquals(str(binary.child2.stellar_type), "Massless Supernova")
instance.stop()
def test6(self):
print "Testing additional parameters for initialization..."
instance = BSE()
instance.initialize_code()
self.assertEqual(instance.parameters.reimers_mass_loss_coefficient, 0.5)
myvalue = 0.7
instance.parameters.reimers_mass_loss_coefficient = myvalue
self.assertEqual(instance.parameters.reimers_mass_loss_coefficient, myvalue)
instance.commit_parameters()
self.assertEqual(instance.parameters.reimers_mass_loss_coefficient, myvalue)
instance.stop()
instance = BSE()
self.assertEqual(instance.parameters.reimers_mass_loss_coefficient, 0.5)
myvalue = 0.7
instance.parameters.reimers_mass_loss_coefficient = myvalue
instance.parameters.set_defaults()
instance.commit_parameters()
self.assertEqual(instance.parameters.reimers_mass_loss_coefficient, 0.5)
instance.stop()
def test7(self):
print "Test evolve_model optional arguments: end_time and keep_synchronous"
instance = BSE()
instance.commit_parameters()
stars = Particles(6)
stars.mass = [1.0,2.0,3.0, 0.1, 0.2, 0.3] | units.MSun
binaries = Particles(3)
binaries.eccentricity = 0.0
for i in range(3):
binaries[i].child1 = stars[i]
binaries[i].child2 = stars[i+3]
orbital_period = 200.0 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(
orbital_period,
binaries.child1.as_set().mass ,
binaries.child2.as_set().mass
)
binaries.semi_major_axis = semi_major_axis
instance.particles.add_particles(stars)
instance.binaries.add_particles(binaries)
self.assertAlmostEqual(instance.binaries.age, [0.0, 0.0, 0.0] | units.yr)
self.assertAlmostEqual(instance.binaries.time_step, [550.1565, 58.2081, 18.8768] | units.Myr, 3)
print "evolve_model without arguments: use shared timestep = min(particles.time_step)"
instance.evolve_model()
self.assertAlmostEqual(instance.binaries.age, [18.8768, 18.8768, 18.8768] | units.Myr, 3)
self.assertAlmostEqual(instance.binaries.time_step, [550.1565, 58.2081, 18.8768] | units.Myr, 3)
self.assertAlmostEqual(instance.model_time, 18.8768 | units.Myr, 3)
print "evolve_model with end_time: take timesteps, until end_time is reached exactly"
instance.evolve_model(100 | units.Myr)
self.assertAlmostEqual(instance.binaries.age, [100.0, 100.0, 100.0] | units.Myr, 3)
self.assertAlmostEqual(instance.binaries.time_step, [550.1565, 58.2081, 18.8768] | units.Myr, 3)
self.assertAlmostEqual(instance.model_time, 100.0 | units.Myr, 3)
print "evolve_model with keep_synchronous: use non-shared timestep, particle ages will typically diverge"
instance.evolve_model(keep_synchronous = False)
self.assertAlmostEqual(instance.binaries.age, (100 | units.Myr) + ([550.1565, 58.2081, 18.8768] | units.Myr), 3)
self.assertAlmostEqual(instance.binaries.time_step, [550.1565, 58.2081, 18.8768] | units.Myr, 3)
self.assertAlmostEqual(instance.model_time, 100.0 | units.Myr, 3) # Unchanged!
instance.stop()
def test8(self):
print "Testing adding and removing particles from stellar evolution code..."
instance = BSE()
instance.initialize_code()
stars = Particles(6)
stars.mass = [1.0,1.0, 1.0, 0.2, 0.2, 0.2] | units.MSun
binaries = Particles(3)
binaries.eccentricity = 0.0
for i in range(3):
binaries[i].child1 = stars[i]
binaries[i].child2 = stars[i+3]
orbital_period = 200.0 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(
orbital_period,
binaries.child1.as_set().mass ,
binaries.child2.as_set().mass
)
binaries.semi_major_axis = semi_major_axis
instance.commit_parameters()
self.assertEquals(len(instance.particles), 0)
self.assertEquals(len(instance.binaries), 0) # before creation
instance.particles.add_particles(stars)
instance.binaries.add_particles(binaries[:-1])
instance.commit_particles()
instance.evolve_model(1.0 | units.Myr)
self.assertEquals(len(instance.binaries), 2) # before remove
self.assertAlmostEqual(instance.binaries.age, 1.0 | units.Myr)
instance.binaries.remove_particle(binaries[0])
self.assertEquals(len(instance.binaries), 1)
instance.evolve_model(2.0 | units.Myr)
self.assertAlmostEqual(instance.binaries[0].age, 2.0 | units.Myr)
instance.binaries.add_particles(binaries[::2])
self.assertEquals(len(instance.binaries), 3) # it's back...
self.assertAlmostEqual(instance.binaries[0].age, 2.0 | units.Myr)
self.assertAlmostEqual(instance.binaries[1].age, 0.0 | units.Myr)
self.assertAlmostEqual(instance.binaries[2].age, 0.0 | units.Myr) # ... and rejuvenated.
instance.evolve_model(3.0 | units.Myr) # The young stars keep their age offset from the old star
self.assertAlmostEqual(instance.binaries.age, [3.0, 1.0, 1.0] | units.Myr)
instance.evolve_model(4.0 | units.Myr)
self.assertAlmostEqual(instance.binaries.age, [4.0, 2.0, 2.0] | units.Myr)
instance.stop()
def test9(self):
print "Testing BSE states"
instance = BSE()
stars = Particles(2)
stars.mass = [1.0, 0.2] | units.MSun
binaries = Particles(1)
orbital_period = 200.0 | units.day
semi_major_axis = instance.orbital_period_to_semi_major_axis(orbital_period, stars[0].mass , stars[1].mass)
binaries.semi_major_axis = semi_major_axis
binaries.eccentricity = 0.0
binaries[0].child1 = stars[0]
binaries[0].child2 = stars[1]
print "First do everything manually:",
self.assertEquals(instance.get_name_of_current_state(), 'UNINITIALIZED')
instance.initialize_code()
self.assertEquals(instance.get_name_of_current_state(), 'INITIALIZED')
instance.commit_parameters()
self.assertEquals(instance.get_name_of_current_state(), 'RUN')
instance.cleanup_code()
self.assertEquals(instance.get_name_of_current_state(), 'END')
instance.stop()
print "ok"
print "initialize_code(), commit_parameters(), " \
"and cleanup_code() should be called automatically:",
instance = BSE()
self.assertEquals(instance.get_name_of_current_state(), 'UNINITIALIZED')
instance.parameters.reimers_mass_loss_coefficient = 0.5
self.assertEquals(instance.get_name_of_current_state(), 'INITIALIZED')
instance.particles.add_particles(stars)
instance.binaries.add_particles(binaries)
self.assertEquals(instance.get_name_of_current_state(), 'RUN')
instance.stop()
self.assertEquals(instance.get_name_of_current_state(), 'STOPPED')
print "ok"
| 41.881051
| 120
| 0.61037
|
dbf78b560ead9079dca603035c4f5538eb5e6c2c
| 272
|
py
|
Python
|
Week10/week 10 programming assignment 1.py
|
Slow-Rain/NPTEL-The-Joy-of-Computing-using-Python
|
e4bd830ef7a3f171a14a88f94df626c766a7649b
|
[
"MIT"
] | 29
|
2021-01-25T16:13:17.000Z
|
2022-03-23T16:34:06.000Z
|
Week10/week 10 programming assignment 1.py
|
Slow-Rain/NPTEL-The-Joy-of-Computing-using-Python
|
e4bd830ef7a3f171a14a88f94df626c766a7649b
|
[
"MIT"
] | 12
|
2021-02-11T13:47:07.000Z
|
2021-10-10T04:26:45.000Z
|
Week10/week 10 programming assignment 1.py
|
Slow-Rain/NPTEL-The-Joy-of-Computing-using-Python
|
e4bd830ef7a3f171a14a88f94df626c766a7649b
|
[
"MIT"
] | 30
|
2021-02-10T05:54:31.000Z
|
2022-02-25T11:24:36.000Z
|
a,b,c = int(input()),input().split(),0
key = [b[i-a:i] for i in range(1,len(b)+1) if i % a == 0]
for i in range(len(key)):
if key[i].count('1') == 2:
c = i+1
if c!=0:
print("yes",c,end="")
else:
print("no",end="")
| 15.111111
| 58
| 0.419118
|
f145132fcd12abefb449481e71727fcd8224485f
| 8,610
|
py
|
Python
|
main.py
|
moh7/CarND-Semantic-Segmentation
|
64400fedd6002a42bec923f3b03ec4f9b6d26ff0
|
[
"MIT"
] | null | null | null |
main.py
|
moh7/CarND-Semantic-Segmentation
|
64400fedd6002a42bec923f3b03ec4f9b6d26ff0
|
[
"MIT"
] | null | null | null |
main.py
|
moh7/CarND-Semantic-Segmentation
|
64400fedd6002a42bec923f3b03ec4f9b6d26ff0
|
[
"MIT"
] | null | null | null |
import os.path
import warnings
import scipy.misc
import tensorflow as tf
from distutils.version import LooseVersion
import project_tests as tests
import helper
## set parameters
L2_REG = 1e-5
STDEV = 1e-3
KEEP_PROB = 0.5
LEARNING_RATE = 1e-4
EPOCHS = 15
BATCH_SIZE = 5
IMAGE_SHAPE = (160, 576)
NUM_CLASSES = 2
DATA_DIR = './data'
RUNS_DIR = './runs'
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
input_image = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_image, keep_prob, layer3, layer4, layer7
print("Load VGG Model:")
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer7_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer3_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
layer7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, 1,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
layer4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, 1,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
layer3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, 1,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
# upsample by 2
output = tf.layers.conv2d_transpose(layer7_conv_1x1, num_classes, 4, 2,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
# add skip connection
output = tf.add(output, layer4_conv_1x1)
# upsample by 2
output = tf.layers.conv2d_transpose(output, num_classes, 4, 2,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
# add skip connection
output = tf.add(output, layer3_conv_1x1)
# upsample by 8
output = tf.layers.conv2d_transpose(output, num_classes, 16, 8,
padding='same', kernel_initializer=tf.random_normal_initializer(stddev=STDEV),
kernel_regularizer=tf.contrib.layers.l2_regularizer(L2_REG))
return output
print("Layers Test:")
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
logits = tf.reshape(nn_last_layer, (-1, num_classes))
labels = tf.reshape(correct_label, (-1, num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
print("Optimize Test:")
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
for epoch in range(epochs):
for image, targets in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss],
feed_dict = {input_image: image, correct_label: targets, keep_prob: KEEP_PROB ,
learning_rate: LEARNING_RATE })
# Print info during the learning process
print("Epoch: {}".format(epoch + 1), "/ {}".format(epochs), " Loss: {:.3f}".format(loss))
#tests.test_train_nn(train_nn)
def run():
tests.test_for_kitti_dataset(DATA_DIR)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(DATA_DIR)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
print("Start training...")
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(DATA_DIR, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(DATA_DIR, 'data_road/training'), IMAGE_SHAPE)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# Add some augmentations, see helper.py
# Build FCN model and optimization funtion
input_image, keep_prob, layer3, layer4, layer7 = load_vgg(sess, vgg_path)
last_layer = layers(layer3, layer4, layer7, NUM_CLASSES)
correct_label = tf.placeholder(dtype = tf.float32, shape = (None, None, None, NUM_CLASSES))
learning_rate = tf.placeholder(dtype = tf.float32)
logits, train_op, cross_entropy_loss = optimize(last_layer, correct_label, learning_rate, NUM_CLASSES)
# preparation to save the trained model
saver = tf.train.Saver()
# train the neural net
sess.run(tf.global_variables_initializer())
train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label,
keep_prob, learning_rate)
# Save inference data using helper.save_inference_samples
helper.save_inference_samples(RUNS_DIR, DATA_DIR, sess, IMAGE_SHAPE, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video
# save model
saver.save(sess, './runs/sem_seg_model.ckpt')
if __name__ == '__main__':
# pass
run()
| 40.422535
| 146
| 0.694077
|
d4d5c5911d9de56f0dc69f3d97472514e60c3032
| 7,415
|
py
|
Python
|
src/uefi/BaseTools/Source/Python/UPT/Object/Parser/InfPackagesObject.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | null | null | null |
src/uefi/BaseTools/Source/Python/UPT/Object/Parser/InfPackagesObject.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | null | null | null |
src/uefi/BaseTools/Source/Python/UPT/Object/Parser/InfPackagesObject.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | null | null | null |
## @file
# This file is used to define class objects of INF file [Packages] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfPackageObject
'''
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library.Misc import Sdict
from Library.ParserValidate import IsValidPath
from Library.ExpressionValidate import IsValidFeatureFlagExp
class InfPackageItem():
def __init__(self,
PackageName = '',
FeatureFlagExp = '',
HelpString = ''):
self.PackageName = PackageName
self.FeatureFlagExp = FeatureFlagExp
self.HelpString = HelpString
self.SupArchList = []
def SetPackageName(self, PackageName):
self.PackageName = PackageName
def GetPackageName(self):
return self.PackageName
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
## INF package section
#
#
#
class InfPackageObject():
def __init__(self):
self.Packages = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
def SetPackages(self, PackageData, Arch = None):
IsValidFileFlag = False
SupArchList = []
for ArchItem in Arch:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem is None):
ArchItem = 'COMMON'
SupArchList.append(ArchItem)
for PackageItem in PackageData:
PackageItemObj = InfPackageItem()
HelpStringObj = PackageItem[1]
CurrentLineOfPackItem = PackageItem[2]
PackageItem = PackageItem[0]
if HelpStringObj is not None:
HelpString = HelpStringObj.HeaderComments + HelpStringObj.TailComments
PackageItemObj.SetHelpString(HelpString)
if len(PackageItem) >= 1:
#
# Validate file exist/format.
#
if IsValidPath(PackageItem[0], ''):
IsValidFileFlag = True
elif IsValidPath(PackageItem[0], GlobalData.gINF_MODULE_DIR):
IsValidFileFlag = True
elif IsValidPath(PackageItem[0], GlobalData.gWORKSPACE):
IsValidFileFlag = True
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID%(PackageItem[0]),
File=CurrentLineOfPackItem[2],
Line=CurrentLineOfPackItem[1],
ExtraData=CurrentLineOfPackItem[0])
return False
if IsValidFileFlag:
PackageItemObj.SetPackageName(PackageItem[0])
if len(PackageItem) == 2:
#
# Validate Feature Flag Express
#
if PackageItem[1].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfPackItem[2],
Line=CurrentLineOfPackItem[1],
ExtraData=CurrentLineOfPackItem[0])
#
# Validate FFE
#
FeatureFlagRtv = IsValidFeatureFlagExp(PackageItem[1].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID%(FeatureFlagRtv[1]),
File=CurrentLineOfPackItem[2],
Line=CurrentLineOfPackItem[1],
ExtraData=CurrentLineOfPackItem[0])
PackageItemObj.SetFeatureFlagExp(PackageItem[1].strip())
if len(PackageItem) > 2:
#
# Invalid format of Package statement
#
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PACKAGE_SECTION_CONTENT_ERROR,
File=CurrentLineOfPackItem[2],
Line=CurrentLineOfPackItem[1],
ExtraData=CurrentLineOfPackItem[0])
PackageItemObj.SetSupArchList(SupArchList)
#
# Determine package file name duplicate. Follow below rule:
#
# A package filename must not be duplicated within a [Packages]
# section. Package filenames may appear in multiple architectural
# [Packages] sections. A package filename listed in an
# architectural [Packages] section must not be listed in the common
# architectural [Packages] section.
#
# NOTE: This check will not report error now.
#
for Item in self.Packages:
if Item.GetPackageName() == PackageItemObj.GetPackageName():
ItemSupArchList = Item.GetSupArchList()
for ItemArch in ItemSupArchList:
for PackageItemObjArch in SupArchList:
if ItemArch == PackageItemObjArch:
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE
#
pass
if ItemArch.upper() == 'COMMON' or PackageItemObjArch.upper() == 'COMMON':
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
#
pass
if (PackageItemObj) in self.Packages:
PackageList = self.Packages[PackageItemObj]
PackageList.append(PackageItemObj)
self.Packages[PackageItemObj] = PackageList
else:
PackageList = []
PackageList.append(PackageItemObj)
self.Packages[PackageItemObj] = PackageList
return True
def GetPackages(self, Arch = None):
if Arch is None:
return self.Packages
| 39.441489
| 102
| 0.54619
|
d89a431c436fb50e22cfba03e91fc295bd90c666
| 7,874
|
py
|
Python
|
daiquiri/oai/renderers.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 14
|
2018-12-23T18:35:02.000Z
|
2021-12-15T04:55:12.000Z
|
daiquiri/oai/renderers.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 40
|
2018-12-20T12:44:05.000Z
|
2022-03-21T11:35:20.000Z
|
daiquiri/oai/renderers.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 5
|
2019-05-16T08:03:35.000Z
|
2021-08-23T20:03:11.000Z
|
from daiquiri.core.renderers import XMLRenderer
from daiquiri.core.renderers.datacite import DataciteRendererMixin
from daiquiri.core.renderers.dublincore import DublincoreRendererMixin
from daiquiri.core.renderers.voresource import VoresourceRendererMixin
class OaiRenderer(DublincoreRendererMixin, DataciteRendererMixin, VoresourceRendererMixin, XMLRenderer):
def render_document(self, data, accepted_media_type=None, renderer_context=None):
self.start('oai:OAI-PMH', {
'xmlns:oai': 'http://www.openarchives.org/OAI/2.0/',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd'
})
self.node('oai:responseDate', {}, data['responseDate'])
request_arguments = data['arguments']
for error_code, _ in data['errors']:
if error_code in ['badVerb', 'badArgument']:
request_arguments = {}
self.node('oai:request', request_arguments, data['baseUrl'])
if data['errors']:
self.render_errors(data['errors'])
elif data['verb'] == 'GetRecord':
self.render_get_record(data['response'])
elif data['verb'] == 'Identify':
self.render_identify(data['response'], data['baseUrl'])
elif data['verb'] == 'ListIdentifiers':
self.render_list_identifiers(data['response']['items'], data['response']['resumptionToken'])
elif data['verb'] == 'ListMetadataFormats':
self.render_list_metadata_formats(data['response'])
elif data['verb'] == 'ListRecords':
self.render_list_records(data['response']['items'], data['response']['resumptionToken'])
elif data['verb'] == 'ListSets':
self.render_list_sets(data['response'])
self.end('oai:OAI-PMH')
def render_errors(self, errors):
for error_code, error_message in errors:
self.node('error', {'code': error_code}, error_message)
def render_get_record(self, item):
self.start('oai:GetRecord')
self.render_record(item)
self.end('oai:GetRecord')
def render_identify(self, repository_metadata, base_url):
self.start('oai:Identify')
self.node('oai:repositoryName', {}, repository_metadata.get('repository_name'))
self.node('oai:baseURL', {}, base_url)
self.node('oai:protocolVersion', {}, '2.0')
self.node('oai:adminEmail', {}, repository_metadata['admin_email'])
self.node('oai:earliestDatestamp', {}, repository_metadata.get('earliest_datestamp').strftime('%Y-%m-%dT%H:%M:%SZ'))
self.node('oai:deletedRecord', {}, repository_metadata.get('deleted_record'))
self.node('oai:granularity', {}, 'YYYY-MM-DDThh:mm:ssZ')
self.render_identify_description(repository_metadata)
self.end('oai:Identify')
def render_identify_description(self, repository_metadata):
self.start('oai:description')
if repository_metadata['identifier'] is not None:
self.render_oai_identifier(repository_metadata.get('identifier'))
self.end('oai:description')
self.start('oai:description')
if repository_metadata['registry'] is not None:
self.render_voresource(repository_metadata.get('registry'))
self.end('oai:description')
def render_oai_identifier(self, identifier_metadata):
self.start('oai-identifier', {
'xmlns': 'http://www.openarchives.org/OAI/2.0/oai-identifier',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/oai-identifier http://www.openarchives.org/OAI/2.0/oai-identifier.xsd'
})
self.node('scheme', {}, identifier_metadata.get('scheme'))
self.node('repositoryIdentifier', {}, identifier_metadata.get('repository_identifier'))
self.node('delimiter', {}, identifier_metadata.get('delimiter'))
self.node('sampleIdentifier', {}, identifier_metadata.get('sample_identifier'))
self.end('oai-identifier')
def render_list_identifiers(self, items, resumption_token):
self.start('oai:ListIdentifiers')
for item in items:
self.render_header(item['header'])
if resumption_token:
self.node('oai:resumptionToken', {
'oai:expirationDate': resumption_token.get('expirationDate'),
'oai:completeListSize': resumption_token.get('completeListSize'),
'oai:cursor': resumption_token.get('cursor')
}, resumption_token['token'])
self.end('oai:ListIdentifiers')
def render_list_metadata_formats(self, metadata_formats):
self.start('oai:ListMetadataFormats')
for metadata_format in metadata_formats:
self.start('oai:metadataFormat')
self.node('oai:metadataPrefix', {}, metadata_format['prefix'])
self.node('oai:schema', {}, metadata_format.get('schema'))
self.node('oai:metadataNamespace', {}, metadata_format.get('namespace'))
self.end('oai:metadataFormat')
self.end('oai:ListMetadataFormats')
def render_list_records(self, items, resumption_token):
self.start('oai:ListRecords')
for item in items:
self.render_record(item)
if resumption_token:
self.node('oai:resumptionToken', {
'oai:expirationDate': resumption_token.get('expirationDate'),
'oai:completeListSize': resumption_token.get('completeListSize'),
'oai:cursor': resumption_token.get('cursor')
}, resumption_token['token'])
self.end('oai:ListRecords')
def render_list_sets(self, data):
self.start('oai:ListSets')
for oai_set in data['oai_sets']:
self.start('oai:set')
self.node('oai:setSpec', {}, oai_set['setSpec'])
self.node('oai:setName', {}, oai_set['setName'])
if oai_set['setDescription'] is not None:
self.node('oai:setDescription', {}, oai_set['setDescription'])
self.end('oai:set')
self.end('oai:ListSets')
def render_record(self, record):
self.start('oai:record')
self.render_header(record['header'])
if record['metadata'] is not None:
self.start('oai:metadata')
self.render_metadata(record['metadata'])
self.end('oai:metadata')
self.end('oai:record')
def render_header(self, header):
self.start('oai:header', {'status': 'deleted'} if header['deleted'] else {})
self.node('oai:identifier', {}, header['identifier'])
self.node('oai:datestamp', {}, header['datestamp'])
for spec in header.get('setSpec', []):
self.node('oai:setSpec', {}, spec)
self.end('oai:header')
def render_metadata(self, metadata):
raise NotImplementedError()
class DublincoreRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_dublincore(metadata)
class OaiDataciteRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.start('oai_datacite', {
'xmlns': 'http://schema.datacite.org/oai/oai-1.0/',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd'
})
self.start('payload')
self.render_datacite(metadata)
self.end('payload')
self.end('oai_datacite')
class DataciteRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_datacite(metadata)
class VoresourceRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_voresource(metadata)
| 42.562162
| 141
| 0.640208
|
c1cbf518ba97c0e9f12951b8713bd9789ee2e457
| 2,131
|
py
|
Python
|
src/libs/BLAKE3/.github/workflows/upload_github_release_asset.py
|
CrackerCat/wtf
|
4714b0c9b85a20ad10886041a8fe908194e42630
|
[
"MIT"
] | 3,384
|
2020-01-09T16:48:28.000Z
|
2022-03-31T18:17:12.000Z
|
src/libs/BLAKE3/.github/workflows/upload_github_release_asset.py
|
CrackerCat/wtf
|
4714b0c9b85a20ad10886041a8fe908194e42630
|
[
"MIT"
] | 214
|
2020-01-09T21:15:36.000Z
|
2022-03-27T20:50:12.000Z
|
src/libs/BLAKE3/.github/workflows/upload_github_release_asset.py
|
CrackerCat/wtf
|
4714b0c9b85a20ad10886041a8fe908194e42630
|
[
"MIT"
] | 216
|
2020-01-09T18:15:27.000Z
|
2022-03-26T01:31:37.000Z
|
#! /usr/bin/env python3
import github
import os
import sys
RETRIES = 10
g = github.Github(os.environ["GITHUB_TOKEN"])
tag_name = os.environ["GITHUB_TAG"]
tag_prefix = "refs/tags/"
if tag_name.startswith(tag_prefix):
tag_name = tag_name[len(tag_prefix):]
assert len(sys.argv) == 2
asset_path = sys.argv[1]
asset_name = os.path.basename(asset_path)
repo = g.get_repo(os.environ["GITHUB_REPOSITORY"])
tags = list(repo.get_tags())
for tag in tags:
if tag.name == tag_name:
break
else:
raise RuntimeError("no tag named " + repr(tag_name))
try:
print("Creating GitHub release for tag " + repr(tag_name) + "...")
repo.create_git_release(tag_name, tag_name, tag.commit.commit.message)
except github.GithubException as github_error:
if github_error.data["errors"][0]["code"] == "already_exists":
print("Release for tag " + repr(tag_name) + " already exists.")
else:
raise
releases = list(repo.get_releases())
for release in releases:
if release.tag_name == tag_name:
break
else:
raise RuntimeError("no release for tag " + repr(tag_name))
print("Uploading " + repr(asset_path) + "...")
for i in range(RETRIES):
try:
print("Upload attempt #{} of {}...".format(i + 1, RETRIES))
release.upload_asset(asset_path)
break
except github.GithubException as github_error:
# Unfortunately the asset upload API is flaky. Even worse, it often
# partially succeeds, returning an error to the caller but leaving the
# release in a state where subsequent uploads of the same asset will
# fail with an "already_exists" error. (Though the asset is not visible
# on github.com, so we can't just declare victory and move on.) If we
# detect this case, explicitly delete the asset and continue retrying.
print(github_error)
for asset in release.get_assets():
if asset.name == asset_name:
print("Found uploaded asset after failure. Deleting...")
asset.delete_asset()
else:
raise RuntimeError("All upload attempts failed.")
print("Success!")
| 32.287879
| 79
| 0.672454
|
2ced73ff66673cdb3aa65c18207ccde56a675bf5
| 273
|
py
|
Python
|
Topicos Avanzados de Programacion/clienteFoto.py
|
NoelCordova/universidad
|
444fce93eebedf5cbb5f49c29f94030bbe900015
|
[
"MIT"
] | null | null | null |
Topicos Avanzados de Programacion/clienteFoto.py
|
NoelCordova/universidad
|
444fce93eebedf5cbb5f49c29f94030bbe900015
|
[
"MIT"
] | null | null | null |
Topicos Avanzados de Programacion/clienteFoto.py
|
NoelCordova/universidad
|
444fce93eebedf5cbb5f49c29f94030bbe900015
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#Cliente
import socket
arch= open('/home/slingerriperxd/Desktop/foto.jpg','rb')
foto= arch.read()
HOST= 'Localhost'
PUERTO= 2015
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PUERTO))
s.sendall(foto)
arch.close()
s.close()
| 17.0625
| 56
| 0.732601
|
342914cff2a0486689ccd5d05feae87726525f66
| 5,282
|
py
|
Python
|
cltk/corpus/aramaic/transliterate.py
|
almostearthling/cltk
|
048fc2a24e7f26d0f28ebbf64a6a605ab8387d0b
|
[
"MIT"
] | null | null | null |
cltk/corpus/aramaic/transliterate.py
|
almostearthling/cltk
|
048fc2a24e7f26d0f28ebbf64a6a605ab8387d0b
|
[
"MIT"
] | null | null | null |
cltk/corpus/aramaic/transliterate.py
|
almostearthling/cltk
|
048fc2a24e7f26d0f28ebbf64a6a605ab8387d0b
|
[
"MIT"
] | null | null | null |
# simple script to transform a hebrew transcription of an imperial aramaic
# text to its own unicode block
IMPERIAL_ARAMAIC_BLOCK = [
# Imperial Aramaic block as it is provided
# by https://www.unicode.org/charts/PDF/U10840.pdf
# The Unicode Standard, Version 13.0
("10840", "IMPERIAL ARAMAIC LETTER ALEPH", "\N{IMPERIAL ARAMAIC LETTER ALEPH}"),
("10841", "IMPERIAL ARAMAIC LETTER BETH", "\N{IMPERIAL ARAMAIC LETTER BETH}"),
("10842", "IMPERIAL ARAMAIC LETTER GIMEL", "\N{IMPERIAL ARAMAIC LETTER GIMEL}"),
("10843", "IMPERIAL ARAMAIC LETTER DALETH", "\N{IMPERIAL ARAMAIC LETTER DALETH}"),
("10844", "IMPERIAL ARAMAIC LETTER HE", "\N{IMPERIAL ARAMAIC LETTER HE}"),
("10845", "IMPERIAL ARAMAIC LETTER WAW", "\N{IMPERIAL ARAMAIC LETTER WAW}"),
("10846", "IMPERIAL ARAMAIC LETTER ZAYIN", "\N{IMPERIAL ARAMAIC LETTER ZAYIN}"),
("10847", "IMPERIAL ARAMAIC LETTER HETH", "\N{IMPERIAL ARAMAIC LETTER HETH}"),
("10848", "IMPERIAL ARAMAIC LETTER TETH", "\N{IMPERIAL ARAMAIC LETTER TETH}"),
("10849", "IMPERIAL ARAMAIC LETTER YODH", "\N{IMPERIAL ARAMAIC LETTER YODH}"),
("1084A", "IMPERIAL ARAMAIC LETTER KAPH", "\N{IMPERIAL ARAMAIC LETTER KAPH}"),
("1084B", "IMPERIAL ARAMAIC LETTER LAMEDH", "\N{IMPERIAL ARAMAIC LETTER LAMEDH}"),
("1084C", "IMPERIAL ARAMAIC LETTER MEM", "\N{IMPERIAL ARAMAIC LETTER MEM}"),
("1084D", "IMPERIAL ARAMAIC LETTER NUN", "\N{IMPERIAL ARAMAIC LETTER NUN}"),
("1084E", "IMPERIAL ARAMAIC LETTER SAMEKH", "\N{IMPERIAL ARAMAIC LETTER SAMEKH}"),
("1084F", "IMPERIAL ARAMAIC LETTER AYIN", "\N{IMPERIAL ARAMAIC LETTER AYIN}"),
("10850", "IMPERIAL ARAMAIC LETTER PE", "\N{IMPERIAL ARAMAIC LETTER PE}"),
("10851", "IMPERIAL ARAMAIC LETTER SADHE", "\N{IMPERIAL ARAMAIC LETTER SADHE}"),
("10852", "IMPERIAL ARAMAIC LETTER QOPH", "\N{IMPERIAL ARAMAIC LETTER QOPH}"),
("10853", "IMPERIAL ARAMAIC LETTER RESH", "\N{IMPERIAL ARAMAIC LETTER RESH}"),
("10854", "IMPERIAL ARAMAIC LETTER SHIN", "\N{IMPERIAL ARAMAIC LETTER SHIN}"),
("10855", "IMPERIAL ARAMAIC LETTER TAW", "\N{IMPERIAL ARAMAIC LETTER TAW}"),
("10857", "IMPERIAL ARAMAIC SECTION SIGN", "\N{IMPERIAL ARAMAIC SECTION SIGN}"),
("10858", "IMPERIAL ARAMAIC NUMBER ONE", "\N{IMPERIAL ARAMAIC NUMBER ONE}"),
("10859", "IMPERIAL ARAMAIC NUMBER TWO", "\N{IMPERIAL ARAMAIC NUMBER TWO}"),
("1085A", "IMPERIAL ARAMAIC NUMBER THREE", "\N{IMPERIAL ARAMAIC NUMBER THREE}"),
("1085B", "IMPERIAL ARAMAIC NUMBER TEN", "\N{IMPERIAL ARAMAIC NUMBER TEN}"),
("1085C", "IMPERIAL ARAMAIC NUMBER TWENTY", "\N{IMPERIAL ARAMAIC NUMBER TWENTY}"),
(
"1085D",
"IMPERIAL ARAMAIC NUMBER ONE HUNDRED",
"\N{IMPERIAL ARAMAIC NUMBER ONE HUNDRED}",
),
(
"1085E",
"IMPERIAL ARAMAIC NUMBER ONE THOUSAND",
"\N{IMPERIAL ARAMAIC NUMBER ONE THOUSAND}",
),
(
"1085F",
"IMPERIAL ARAMAIC NUMBER TEN THOUSAND",
"\N{IMPERIAL ARAMAIC NUMBER TEN THOUSAND}",
),
]
TABLE = [
# Equivalencies are provided based on
# Skeleton Achaemenid Aramaic Grammar
# http://arshama.classics.ox.ac.uk/aramaic/
("\N{IMPERIAL ARAMAIC LETTER ALEPH}", "א"),
("\N{IMPERIAL ARAMAIC LETTER BETH}", "ב"),
("\N{IMPERIAL ARAMAIC LETTER GIMEL}", "ג"),
("\N{IMPERIAL ARAMAIC LETTER DALETH}", "ד"),
("\N{IMPERIAL ARAMAIC LETTER HE}", "ה"),
("\N{IMPERIAL ARAMAIC LETTER WAW}", "ו"),
("\N{IMPERIAL ARAMAIC LETTER ZAYIN}", "ז"),
("\N{IMPERIAL ARAMAIC LETTER HETH}", "ח"),
("\N{IMPERIAL ARAMAIC LETTER TETH}", "ט"),
("\N{IMPERIAL ARAMAIC LETTER YODH}", "י"),
("\N{IMPERIAL ARAMAIC LETTER KAPH}", "כ", "ך"),
("\N{IMPERIAL ARAMAIC LETTER LAMEDH}", "ל"),
("\N{IMPERIAL ARAMAIC LETTER MEM}", "מ", "ם"),
("\N{IMPERIAL ARAMAIC LETTER NUN}", "נ", "ן"),
("\N{IMPERIAL ARAMAIC LETTER SAMEKH}", "ס"),
("\N{IMPERIAL ARAMAIC LETTER AYIN}", "ע"),
("\N{IMPERIAL ARAMAIC LETTER PE}", "פ", "ף"),
("\N{IMPERIAL ARAMAIC LETTER SADHE}", "צ", "ץ"),
("\N{IMPERIAL ARAMAIC LETTER QOPH}", "ק"),
("\N{IMPERIAL ARAMAIC LETTER RESH}", "ר"),
("\N{IMPERIAL ARAMAIC LETTER SHIN}", "שׁ"),
("\N{IMPERIAL ARAMAIC LETTER TAW}", "ת"),
("\N{IMPERIAL ARAMAIC SECTION SIGN}", "§"),
("\N{IMPERIAL ARAMAIC NUMBER ONE}", "1"),
("\N{IMPERIAL ARAMAIC NUMBER TWO}", "2"),
("\N{IMPERIAL ARAMAIC NUMBER THREE}", "3"),
("\N{IMPERIAL ARAMAIC NUMBER TEN}", "10"),
("\N{IMPERIAL ARAMAIC NUMBER TWENTY}", "20"),
("\N{IMPERIAL ARAMAIC NUMBER ONE HUNDRED}", "100"),
("\N{IMPERIAL ARAMAIC NUMBER ONE THOUSAND}", "1000"),
("\N{IMPERIAL ARAMAIC NUMBER TEN THOUSAND}", "10000"),
]
def imperial_to_square_table():
new_table = []
for el in TABLE:
if len(el) > 2:
new_table.append((el[1], el[0]))
new_table.append((el[2], el[0]))
else:
new_table.append((el[1], el[0]))
return new_table
SQUARE_TO_IMPERIAL_TABLE = imperial_to_square_table()
SQUARE_TO_IMPERIAL = {k: v for k, v in SQUARE_TO_IMPERIAL_TABLE}
def square_to_imperial_char(s: str) -> str:
return SQUARE_TO_IMPERIAL[s] if s in SQUARE_TO_IMPERIAL else s
def square_to_imperial(square_script: str) -> str:
return "".join(map(square_to_imperial_char, square_script))
| 47.160714
| 86
| 0.639341
|
c7431baf9ba94633f3ae3f42dd9e3c6a3e4f69e6
| 7,994
|
py
|
Python
|
msoffcrypto/method/ecma376_agile.py
|
nolze/msoffcrypto-tool
|
01c5b23ee81bd39a37ea090af2656e6891767be7
|
[
"MIT"
] | 336
|
2018-04-23T16:07:18.000Z
|
2022-03-24T08:23:38.000Z
|
msoffcrypto/method/ecma376_agile.py
|
FDlucifer/msoffcrypto-tool
|
01c5b23ee81bd39a37ea090af2656e6891767be7
|
[
"MIT"
] | 58
|
2018-05-07T03:50:42.000Z
|
2022-02-06T15:08:40.000Z
|
msoffcrypto/method/ecma376_agile.py
|
FDlucifer/msoffcrypto-tool
|
01c5b23ee81bd39a37ea090af2656e6891767be7
|
[
"MIT"
] | 75
|
2018-05-06T11:02:10.000Z
|
2022-03-05T15:28:09.000Z
|
import functools
import hmac
import io
import logging
from hashlib import sha1, sha256, sha384, sha512
from struct import pack, unpack
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
ALGORITHM_HASH = {
"SHA1": sha1,
"SHA256": sha256,
"SHA384": sha384,
"SHA512": sha512,
}
def _get_hash_func(algorithm):
return ALGORITHM_HASH.get(algorithm, sha1)
def _decrypt_aes_cbc(data, key, iv):
aes = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
decryptor = aes.decryptor()
decrypted = decryptor.update(data) + decryptor.finalize()
return decrypted
class ECMA376Agile:
def __init__(self):
pass
@staticmethod
def _derive_iterated_hash_from_password(password, saltValue, hashAlgorithm, spinValue):
r"""
Do a partial password-based hash derivation.
Note the block key is not taken into consideration in this function.
"""
# TODO: This function is quite expensive and it should only be called once.
# We need to save the result for later use.
# This is not covered by the specification, but MS Word does so.
hashCalc = _get_hash_func(hashAlgorithm)
# NOTE: Initial round sha512(salt + password)
h = hashCalc(saltValue + password.encode("UTF-16LE"))
# NOTE: Iteration of 0 -> spincount-1; hash = sha512(iterator + hash)
for i in range(0, spinValue, 1):
h = hashCalc(pack("<I", i) + h.digest())
return h
@staticmethod
def _derive_encryption_key(h, blockKey, hashAlgorithm, keyBits):
r"""
Finish the password-based key derivation by hashing last hash + blockKey.
"""
hashCalc = _get_hash_func(hashAlgorithm)
h_final = hashCalc(h + blockKey)
# NOTE: Needed to truncate encryption key to bitsize
encryption_key = h_final.digest()[: keyBits // 8]
return encryption_key
@staticmethod
def decrypt(key, keyDataSalt, hashAlgorithm, ibuf):
r"""
Return decrypted data.
>>> key = b'@ f\t\xd9\xfa\xad\xf2K\x07j\xeb\xf2\xc45\xb7B\x92\xc8\xb8\xa7\xaa\x81\xbcg\x9b\xe8\x97\x11\xb0*\xc2'
>>> keyDataSalt = b'\x8f\xc7x"+P\x8d\xdcL\xe6\x8c\xdd\x15<\x16\xb4'
>>> hashAlgorithm = 'SHA512'
"""
SEGMENT_LENGTH = 4096
hashCalc = _get_hash_func(hashAlgorithm)
obuf = io.BytesIO()
totalSize = unpack("<I", ibuf.read(4))[0]
logger.debug("totalSize: {}".format(totalSize))
remaining = totalSize
ibuf.seek(8)
for i, buf in enumerate(iter(functools.partial(ibuf.read, SEGMENT_LENGTH), b"")):
saltWithBlockKey = keyDataSalt + pack("<I", i)
iv = hashCalc(saltWithBlockKey).digest()
iv = iv[:16]
aes = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
decryptor = aes.decryptor()
dec = decryptor.update(buf) + decryptor.finalize()
if remaining < len(buf):
dec = dec[:remaining]
obuf.write(dec)
remaining -= len(buf)
return obuf.getvalue() # return obuf.getbuffer()
@staticmethod
def verify_password(password, saltValue, hashAlgorithm, encryptedVerifierHashInput, encryptedVerifierHashValue, spinValue, keyBits):
r"""
Return True if the given password is valid.
>>> password = 'Password1234_'
>>> saltValue = b'\xcb\xca\x1c\x99\x93C\xfb\xad\x92\x07V4\x15\x004\xb0'
>>> hashAlgorithm = 'SHA512'
>>> encryptedVerifierHashInput = b'9\xee\xa5N&\xe5\x14y\x8c(K\xc7qM8\xac'
>>> encryptedVerifierHashValue = b'\x147mm\x81s4\xe6\xb0\xffO\xd8"\x1a|g\x8e]\x8axN\x8f\x99\x9fL\x18\x890\xc3jK)\xc5\xb33`' + \
... b'[\\\xd4\x03\xb0P\x03\xad\xcf\x18\xcc\xa8\xcb\xab\x8d\xeb\xe3s\xc6V\x04\xa0\xbe\xcf\xae\\\n\xd0'
>>> spinValue = 100000
>>> keyBits = 256
>>> ECMA376Agile.verify_password(password, saltValue, hashAlgorithm, encryptedVerifierHashInput, encryptedVerifierHashValue, spinValue, keyBits)
True
"""
# NOTE: See https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-offcrypto/a57cb947-554f-4e5e-b150-3f2978225e92
block1 = bytearray([0xFE, 0xA7, 0xD2, 0x76, 0x3B, 0x4B, 0x9E, 0x79])
block2 = bytearray([0xD7, 0xAA, 0x0F, 0x6D, 0x30, 0x61, 0x34, 0x4E])
h = ECMA376Agile._derive_iterated_hash_from_password(password, saltValue, hashAlgorithm, spinValue)
key1 = ECMA376Agile._derive_encryption_key(h.digest(), block1, hashAlgorithm, keyBits)
key2 = ECMA376Agile._derive_encryption_key(h.digest(), block2, hashAlgorithm, keyBits)
hash_input = _decrypt_aes_cbc(encryptedVerifierHashInput, key1, saltValue)
hashCalc = _get_hash_func(hashAlgorithm)
acutal_hash = hashCalc(hash_input)
acutal_hash = acutal_hash.digest()
expected_hash = _decrypt_aes_cbc(encryptedVerifierHashValue, key2, saltValue)
return acutal_hash == expected_hash
@staticmethod
def verify_integrity(secretKey, keyDataSalt, keyDataHashAlgorithm, keyDataBlockSize, encryptedHmacKey, encryptedHmacValue, stream):
r"""
Return True if the HMAC of the data payload is valid.
"""
# NOTE: See https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-offcrypto/63d9c262-82b9-4fa3-a06d-d087b93e3b00
block4 = bytearray([0x5F, 0xB2, 0xAD, 0x01, 0x0C, 0xB9, 0xE1, 0xF6])
block5 = bytearray([0xA0, 0x67, 0x7F, 0x02, 0xB2, 0x2C, 0x84, 0x33])
hashCalc = _get_hash_func(keyDataHashAlgorithm)
iv1 = hashCalc(keyDataSalt + block4).digest()
iv1 = iv1[:keyDataBlockSize]
iv2 = hashCalc(keyDataSalt + block5).digest()
iv2 = iv2[:keyDataBlockSize]
hmacKey = _decrypt_aes_cbc(encryptedHmacKey, secretKey, iv1)
hmacValue = _decrypt_aes_cbc(encryptedHmacValue, secretKey, iv2)
msg_hmac = hmac.new(hmacKey, stream.read(), hashCalc)
actualHmac = msg_hmac.digest()
stream.seek(0)
return hmacValue == actualHmac
@staticmethod
def makekey_from_privkey(privkey, encryptedKeyValue):
privkey = serialization.load_pem_private_key(privkey.read(), password=None, backend=default_backend())
skey = privkey.decrypt(encryptedKeyValue, padding.PKCS1v15())
return skey
@staticmethod
def makekey_from_password(password, saltValue, hashAlgorithm, encryptedKeyValue, spinValue, keyBits):
r"""
Generate intermediate key from given password.
>>> password = 'Password1234_'
>>> saltValue = b'Lr]E\xdca\x0f\x93\x94\x12\xa0M\xa7\x91\x04f'
>>> hashAlgorithm = 'SHA512'
>>> encryptedKeyValue = b"\xa1l\xd5\x16Zz\xb9\xd2q\x11>\xd3\x86\xa7\x8c\xf4\x96\x92\xe8\xe5'\xb0\xc5\xfc\x00U\xed\x08\x0b|\xb9K"
>>> spinValue = 100000
>>> keyBits = 256
>>> expected = b'@ f\t\xd9\xfa\xad\xf2K\x07j\xeb\xf2\xc45\xb7B\x92\xc8\xb8\xa7\xaa\x81\xbcg\x9b\xe8\x97\x11\xb0*\xc2'
>>> ECMA376Agile.makekey_from_password(password, saltValue, hashAlgorithm, encryptedKeyValue, spinValue, keyBits) == expected
True
"""
block3 = bytearray([0x14, 0x6E, 0x0B, 0xE7, 0xAB, 0xAC, 0xD0, 0xD6])
h = ECMA376Agile._derive_iterated_hash_from_password(password, saltValue, hashAlgorithm, spinValue)
encryption_key = ECMA376Agile._derive_encryption_key(h.digest(), block3, hashAlgorithm, keyBits)
skey = _decrypt_aes_cbc(encryptedKeyValue, encryption_key, saltValue)
return skey
| 41.635417
| 156
| 0.662497
|
6414b9784cd4e7d660bea40b247cd33e8ffbdbd0
| 715
|
py
|
Python
|
var/spack/repos/builtin/packages/xprop/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/xprop/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/xprop/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xprop(AutotoolsPackage):
"""xprop is a command line tool to display and/or set window and font
properties of an X server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xprop"
url = "https://www.x.org/archive/individual/app/xprop-1.2.2.tar.gz"
version('1.2.2', 'db03a6bcf7b0d0c2e691ea3083277cbc')
depends_on('libx11')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 31.086957
| 76
| 0.703497
|
b2cbe8b7f0e18ce727d40f3e76f3ee3006eebf33
| 358
|
py
|
Python
|
Walkthru_14/walkthru14.py
|
Witziger/Walkthru-Python
|
5a06c1f7730df07849c19c3a118a270f309aaf56
|
[
"MIT"
] | null | null | null |
Walkthru_14/walkthru14.py
|
Witziger/Walkthru-Python
|
5a06c1f7730df07849c19c3a118a270f309aaf56
|
[
"MIT"
] | null | null | null |
Walkthru_14/walkthru14.py
|
Witziger/Walkthru-Python
|
5a06c1f7730df07849c19c3a118a270f309aaf56
|
[
"MIT"
] | null | null | null |
stuff = list()
stuff.append('python')
stuff.append('chuck')
stuff.sort()
print (stuff[0])
print (stuff.__getitem__(0))
print (list.__getitem__(stuff,0))
print(dir(stuff))
class PartyAnimal:
x = 0
def party(self) :
self.x = self.x + 1
print("So far", self.x)
an = PartyAnimal()
an.party()
an.party()
an.party()
PartyAnimal.party(an)
| 17.047619
| 33
| 0.645251
|
faf3f4b2ea0756400fa4813e66287ccc06e299ac
| 2,456
|
py
|
Python
|
setup_support.py
|
litepresence/secp256k1prp-py
|
499cdedc9687392761124a506d47c53658c2ca1b
|
[
"MIT"
] | 2
|
2018-05-12T07:53:10.000Z
|
2018-06-04T14:39:35.000Z
|
setup_support.py
|
litepresence/secp256k1prp-py
|
499cdedc9687392761124a506d47c53658c2ca1b
|
[
"MIT"
] | null | null | null |
setup_support.py
|
litepresence/secp256k1prp-py
|
499cdedc9687392761124a506d47c53658c2ca1b
|
[
"MIT"
] | null | null | null |
import glob
import os
import shutil
from contextlib import contextmanager
from tempfile import mkdtemp
import subprocess
@contextmanager
def workdir():
cwd = os.getcwd()
tmpdir = mkdtemp()
os.chdir(tmpdir)
try:
yield
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
@contextmanager
def redirect(stdchannel, dest_filename):
oldstdchannel = os.dup(stdchannel.fileno())
dest_file = open(dest_filename, 'w')
os.dup2(dest_file.fileno(), stdchannel.fileno())
try:
yield
finally:
if oldstdchannel is not None:
os.dup2(oldstdchannel, stdchannel.fileno())
if dest_file is not None:
dest_file.close()
def absolute(*paths):
op = os.path
return op.realpath(op.abspath(op.join(op.dirname(__file__), *paths)))
def build_flags(library, type_, path):
"""Return separated build flags from pkg-config output"""
pkg_config_path = [path]
if "PKG_CONFIG_PATH" in os.environ:
pkg_config_path.append(os.environ['PKG_CONFIG_PATH'])
if "LIB_DIR" in os.environ:
pkg_config_path.append(os.environ['LIB_DIR'])
pkg_config_path.append(os.path.join(os.environ['LIB_DIR'], "pkgconfig"))
options = [
"--static",
{
'I': "--cflags-only-I",
'L': "--libs-only-L",
'l': "--libs-only-l"
}[type_]
]
return [
flag.strip("-{}".format(type_))
for flag
in subprocess.check_output(
["pkg-config"] + options + [library],
env=dict(os.environ, PKG_CONFIG_PATH=":".join(pkg_config_path))
).decode("UTF-8").split()
]
def _find_lib():
from cffi import FFI
ffi = FFI()
try:
ffi.dlopen("secp256k1")
except OSError:
if 'LIB_DIR' in os.environ:
for path in glob.glob(os.path.join(os.environ['LIB_DIR'], "*secp256k1*")):
try:
FFI().dlopen(path)
return True
except OSError:
pass
# We couldn't locate libsecp256k1 so we'll use the bundled one
return False
else:
# If we got this far then the system library should be good enough
return True
_has_system_lib = None
def has_system_lib():
return False # !!! NO
global _has_system_lib
if _has_system_lib is None:
_has_system_lib = _find_lib()
return _has_system_lib
| 24.808081
| 86
| 0.595277
|
fa34fcf7a2b8c8267d88c2f928576c31eca9d268
| 4,715
|
py
|
Python
|
docs/source/conf.py
|
asdfgallardo/horoscopejs
|
37090c165239f94b3ee95cc46a1d65af76265888
|
[
"Apache-2.0"
] | 153
|
2017-06-24T20:31:00.000Z
|
2022-03-31T13:25:20.000Z
|
docs/source/conf.py
|
asdfgallardo/horoscopejs
|
37090c165239f94b3ee95cc46a1d65af76265888
|
[
"Apache-2.0"
] | 13
|
2017-10-01T12:39:22.000Z
|
2022-01-09T02:59:09.000Z
|
docs/source/conf.py
|
asdfgallardo/horoscopejs
|
37090c165239f94b3ee95cc46a1d65af76265888
|
[
"Apache-2.0"
] | 54
|
2017-07-03T07:16:03.000Z
|
2022-02-11T01:54:31.000Z
|
# -*- coding: utf-8 -*-
#
# aztro documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 23 22:49:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'aztro'
copyright = u'2021, Sameer Kumar'
author = u'Sameer Kumar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'aztrodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'aztro.tex', u'aztro Documentation',
u'Sameer Kumar', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aztro', u'aztro Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'aztro', u'aztro Documentation',
author, 'aztro', 'One line description of project.',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri']
| 29.841772
| 79
| 0.679745
|
c355f278844f58a7f1ad1f6b1c0b3a758e3a231c
| 1,099
|
py
|
Python
|
screamshot/browser_manager_script.py
|
makinacorpus/screamshot
|
ad49ad1d6ce389db0f89ca933f7144e33c80cd43
|
[
"BSD-2-Clause"
] | 2
|
2019-04-15T13:22:16.000Z
|
2019-04-17T23:51:16.000Z
|
screamshot/browser_manager_script.py
|
makinacorpus/screamshot
|
ad49ad1d6ce389db0f89ca933f7144e33c80cd43
|
[
"BSD-2-Clause"
] | 19
|
2019-04-16T11:20:28.000Z
|
2022-03-11T23:45:33.000Z
|
screamshot/browser_manager_script.py
|
makinacorpus/screamshot
|
ad49ad1d6ce389db0f89ca933f7144e33c80cd43
|
[
"BSD-2-Clause"
] | 7
|
2019-04-17T11:39:42.000Z
|
2019-04-29T15:20:22.000Z
|
"""
Create or close a browser
"""
from argparse import ArgumentParser
from screamshot.utils import close_browser, open_browser, to_sync
def main():
parser = ArgumentParser(description=__doc__)
# Mandatory arguments
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", "--close", action="store_true", help="Close the browsers in the \
endpointlist.txt file")
group.add_argument("-o", "--open", action="store_true", help="Open a browser and store its \
websocket endpoint in endpointlist.txt")
# Optionnal argument
parser.add_argument("-g", "--graphic", dest="headless", action="store_false", help="Open the \
browser in graphic mode")
parser.add_argument("-ns", "--no-sandbox", action="store_const", const=["--no-sandbox"],
default="[]", help="Open the browser without sandbox")
args = parser.parse_args()
if args.close:
to_sync(close_browser())
if args.open:
to_sync(open_browser(args.headless, launch_args=args.no_sandbox))
if __name__ == '__main__':
main()
| 30.527778
| 98
| 0.66879
|
693972491ff9268c7ad6d4c6a074762fcf9dc13c
| 56,391
|
py
|
Python
|
test_syncobj.py
|
Troyhy/PySyncObj
|
33a70bd237efae0713992c6c9db87ce128286a4f
|
[
"MIT"
] | null | null | null |
test_syncobj.py
|
Troyhy/PySyncObj
|
33a70bd237efae0713992c6c9db87ce128286a4f
|
[
"MIT"
] | null | null | null |
test_syncobj.py
|
Troyhy/PySyncObj
|
33a70bd237efae0713992c6c9db87ce128286a4f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import time
import pytest
import random
import threading
import sys
import pysyncobj.pickle as pickle
if sys.version_info >= (3, 0):
xrange = range
from functools import partial
import functools
import struct
import logging
from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON, _COMMAND_TYPE, \
createJournal, HAS_CRYPTO, replicated_sync, Utility, SyncObjException, SyncObjConsumer, _RAFT_STATE
from pysyncobj.batteries import ReplCounter, ReplList, ReplDict, ReplSet, ReplLockManager, ReplQueue, ReplPriorityQueue
from pysyncobj.node import TCPNode
from collections import defaultdict
logging.basicConfig(format=u'[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s', level=logging.DEBUG)
_bchr = functools.partial(struct.pack, 'B')
class TEST_TYPE:
DEFAULT = 0
COMPACTION_1 = 1
COMPACTION_2 = 2
RAND_1 = 3
JOURNAL_1 = 4
AUTO_TICK_1 = 5
WAIT_BIND = 6
LARGE_COMMAND = 7
class TestObj(SyncObj):
def __init__(self, selfNodeAddr, otherNodeAddrs,
testType=TEST_TYPE.DEFAULT,
compactionMinEntries=0,
dumpFile=None,
journalFile=None,
password=None,
dynamicMembershipChange=False,
useFork=True,
testBindAddr=False,
consumers=None,
onStateChanged=None,
leaderFallbackTimeout=None):
cfg = SyncObjConf(autoTick=False, appendEntriesUseBatch=False)
cfg.appendEntriesPeriod = 0.1
cfg.raftMinTimeout = 0.5
cfg.raftMaxTimeout = 1.0
cfg.dynamicMembershipChange = dynamicMembershipChange
cfg.onStateChanged = onStateChanged
if leaderFallbackTimeout is not None:
cfg.leaderFallbackTimeout = leaderFallbackTimeout
if testBindAddr:
cfg.bindAddress = selfNodeAddr
if dumpFile is not None:
cfg.fullDumpFile = dumpFile
if password is not None:
cfg.password = password
cfg.useFork = useFork
if testType == TEST_TYPE.COMPACTION_1:
cfg.logCompactionMinEntries = compactionMinEntries
cfg.logCompactionMinTime = 0.1
cfg.appendEntriesUseBatch = True
if testType == TEST_TYPE.COMPACTION_2:
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
if testType == TEST_TYPE.LARGE_COMMAND:
cfg.connectionTimeout = 15.0
cfg.logCompactionMinEntries = 99999
cfg.logCompactionMinTime = 99999
cfg.fullDumpFile = dumpFile
cfg.raftMinTimeout = 1.5
cfg.raftMaxTimeout = 2.5
# cfg.appendEntriesBatchSizeBytes = 2 ** 13
if testType == TEST_TYPE.RAND_1:
cfg.autoTickPeriod = 0.05
cfg.appendEntriesPeriod = 0.02
cfg.raftMinTimeout = 0.1
cfg.raftMaxTimeout = 0.2
cfg.logCompactionMinTime = 9999999
cfg.logCompactionMinEntries = 9999999
cfg.journalFile = journalFile
if testType == TEST_TYPE.JOURNAL_1:
cfg.logCompactionMinTime = 999999
cfg.logCompactionMinEntries = 999999
cfg.fullDumpFile = dumpFile
cfg.journalFile = journalFile
if testType == TEST_TYPE.AUTO_TICK_1:
cfg.autoTick = True
cfg.pollerType = 'select'
if testType == TEST_TYPE.WAIT_BIND:
cfg.maxBindRetries = 1
cfg.autoTick = True
super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg, consumers)
self.__counter = 0
self.__data = {}
@replicated
def addValue(self, value):
self.__counter += value
return self.__counter
@replicated
def addKeyValue(self, key, value):
self.__data[key] = value
@replicated_sync
def addValueSync(self, value):
self.__counter += value
return self.__counter
@replicated
def testMethod(self):
self.__data['testKey'] = 'valueVer1'
@replicated(ver=1)
def testMethod(self):
self.__data['testKey'] = 'valueVer2'
def getCounter(self):
return self.__counter
def getValue(self, key):
return self.__data.get(key, None)
def dumpKeys(self):
print('keys:', sorted(self.__data.keys()))
def singleTickFunc(o, timeToTick, interval, stopFunc):
currTime = time.time()
finishTime = currTime + timeToTick
while time.time() < finishTime:
o._onTick(interval)
if stopFunc is not None:
if stopFunc():
break
def utilityTickFunc(args, currRes, key, timeToTick):
u = Utility(args)
currTime = time.time()
finishTime = currTime + timeToTick
while time.time() < finishTime:
if u.getResult() is not None:
currRes[key] = u.getResult()
break
def doSyncObjAdminTicks(objects, arguments, timeToTick, currRes, interval=0.05, stopFunc=None):
objThreads = []
utilityThreads = []
for o in objects:
t1 = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t1.start()
objThreads.append(t1)
if arguments.get(o) is not None:
t2 = threading.Thread(target=utilityTickFunc, args=(arguments[o], currRes, o, timeToTick))
t2.start()
utilityThreads.append(t2)
for t in objThreads:
t.join()
for t in utilityThreads:
t.join()
def doTicks(objects, timeToTick, interval=0.05, stopFunc=None):
threads = []
for o in objects:
t = threading.Thread(target=singleTickFunc, args=(o, timeToTick, interval, stopFunc))
t.start()
threads.append(t)
for t in threads:
t.join()
def doAutoTicks(interval=0.05, stopFunc=None):
deadline = time.time() + interval
while not stopFunc():
time.sleep(0.02)
t2 = time.time()
if t2 >= deadline:
break
_g_nextAddress = 6000 + 60 * (int(time.time()) % 600)
def getNextAddr(ipv6=False, isLocalhost=False):
global _g_nextAddress
_g_nextAddress += 1
if ipv6:
return '::1:%d' % _g_nextAddress
if isLocalhost:
return 'localhost:%d' % _g_nextAddress
return '127.0.0.1:%d' % _g_nextAddress
_g_nextDumpFile = 1
_g_nextJournalFile = 1
def getNextDumpFile():
global _g_nextDumpFile
fname = 'dump%d.bin' % _g_nextDumpFile
_g_nextDumpFile += 1
return fname
def getNextJournalFile():
global _g_nextJournalFile
fname = 'journal%d.bin' % _g_nextJournalFile
_g_nextJournalFile += 1
return fname
def test_syncTwoObjects():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_singleObject():
random.seed(42)
a = [getNextAddr(), ]
o1 = TestObj(a[0], [])
objs = [o1, ]
assert not o1._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady())
o1._printStatus()
assert o1._getLeader().address in a
assert o1._isReady()
o1.addValue(150)
o1.addValue(200)
doTicks(objs, 3.0, stopFunc=lambda: o1.getCounter() == 350)
assert o1._isReady()
assert o1.getCounter() == 350
o1._destroy()
def test_syncThreeObjectsLeaderFail():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
states = defaultdict(list)
o1 = TestObj(a[0], [a[1], a[2]], testBindAddr=True, onStateChanged=lambda old, new: states[a[0]].append(new))
o2 = TestObj(a[1], [a[2], a[0]], testBindAddr=True, onStateChanged=lambda old, new: states[a[1]].append(new))
o3 = TestObj(a[2], [a[0], a[1]], testBindAddr=True, onStateChanged=lambda old, new: states[a[2]].append(new))
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
assert _RAFT_STATE.LEADER in states[o1._getLeader().address]
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
assert o3.getCounter() == 350
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
assert _RAFT_STATE.LEADER in states[newObjs[0]._getLeader().address]
newObjs[1].addValue(50)
doTicks(newObjs, 10, stopFunc=lambda: newObjs[0].getCounter() == 400)
assert newObjs[0].getCounter() == 400
doTicks(objs, 10.0, stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs]) == len(objs))
for o in objs:
assert o.getCounter() == 400
o1._destroy()
o2._destroy()
o3._destroy()
def test_manyActionsLogCompaction():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.COMPACTION_1, compactionMinEntries=100)
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 1000 and \
o2.getCounter() == 1000 and \
o3.getCounter() == 1000)
assert o1.getCounter() == 1000
assert o2.getCounter() == 1000
assert o3.getCounter() == 1000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
newObjs = [o1, o2]
doTicks(newObjs, 10, stopFunc=lambda: o3._getLeader() is None)
for i in xrange(0, 500):
o1.addValue(1)
o2.addValue(1)
doTicks(newObjs, 10, stopFunc=lambda: o1.getCounter() == 2000 and \
o2.getCounter() == 2000)
assert o1.getCounter() == 2000
assert o2.getCounter() == 2000
assert o3.getCounter() != 2000
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 2000)
assert o3.getCounter() == 2000
assert o1._getRaftLogSize() <= 100
assert o2._getRaftLogSize() <= 100
assert o3._getRaftLogSize() <= 100
o1._destroy()
o2._destroy()
o3._destroy()
def onAddValue(res, err, info):
assert res == 3
assert err == FAIL_REASON.SUCCESS
info['callback'] = True
def test_checkCallbacksSimple():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]])
o2 = TestObj(a[1], [a[2], a[0]])
o3 = TestObj(a[2], [a[0], a[1]])
objs = [o1, o2, o3]
assert not o1._isReady()
assert not o2._isReady()
assert not o3._isReady()
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._getLeader() == o3._getLeader()
callbackInfo = {
'callback': False
}
o1.addValue(3, callback=partial(onAddValue, info=callbackInfo))
doTicks(objs, 10, stopFunc=lambda: o2.getCounter() == 3 and callbackInfo['callback'] == True)
assert o2.getCounter() == 3
assert callbackInfo['callback'] == True
o1._destroy()
o2._destroy()
o3._destroy()
def removeFiles(files):
for f in (files):
if os.path.isfile(f):
for i in xrange(0, 15):
try:
if os.path.isfile(f):
os.remove(f)
break
else:
break
except:
time.sleep(1.0)
def checkDumpToFile(useFork):
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 1.5)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0], useFork=useFork)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1], useFork=useFork)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
def test_checkDumpToFile():
if hasattr(os, 'fork'):
checkDumpToFile(True)
checkDumpToFile(False)
def getRandStr():
return '%0100000x' % random.randrange(16 ** 100000)
def test_checkBigStorage():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Store ~50Mb data.
testRandStr = getRandStr()
for i in xrange(0, 500):
o1.addKeyValue(i, getRandStr())
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr)
assert o1.getValue('test') == testRandStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.COMPACTION_2, dumpFile=dumpFiles[1])
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('test') == testRandStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
def test_encryptionCorrectPassword():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='asd')
o2 = TestObj(a[1], [a[0]], password='asd')
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_encryptionWrongPassword():
assert HAS_CRYPTO
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], password='asd')
o2 = TestObj(a[1], [a[2], a[0]], password='asd')
o3 = TestObj(a[2], [a[0], a[1]], password='qwe')
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
doTicks(objs, 1.0)
assert o3._getLeader() is None
o1._destroy()
o2._destroy()
o3._destroy()
def _checkSameLeader(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 != obj1._SyncObj__selfNode:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 != obj2._SyncObj__selfNode:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def _checkSameLeader2(objs):
for obj1 in objs:
l1 = obj1._getLeader()
if l1 is None:
continue
t1 = obj1._getTerm()
for obj2 in objs:
l2 = obj2._getLeader()
if l2 is None:
continue
if obj2._getTerm() != t1:
continue
if l2 != l1:
obj1._printStatus()
obj2._printStatus()
return False
return True
def test_randomTest1():
journalFiles = [getNextJournalFile(), getNextJournalFile(), getNextJournalFile()]
removeFiles(journalFiles)
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while time.time() - st < 120.0:
doTicks(objs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
newObjs = list(objs)
newObjs.pop(random.randint(0, len(newObjs) - 1))
doTicks(newObjs, random.random() * 0.3, interval=0.05)
assert _checkSameLeader(objs)
assert _checkSameLeader2(objs)
for i in xrange(0, random.randint(0, 2)):
random.choice(objs).addValue(random.randint(0, 10))
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter()):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter())
raise AssertionError('Values not equal')
counter = o1.getCounter()
o1._destroy()
o2._destroy()
o3._destroy()
del o1
del o2
del o3
time.sleep(0.1)
o1 = TestObj(a[0], [a[1], a[2]], TEST_TYPE.RAND_1, journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], TEST_TYPE.RAND_1, journalFile=journalFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], TEST_TYPE.RAND_1, journalFile=journalFiles[2])
objs = [o1, o2, o3]
st = time.time()
while not (o1.getCounter() == o2.getCounter() == o3.getCounter() == counter):
doTicks(objs, 2.0, interval=0.05)
if time.time() - st > 30:
break
if not (o1.getCounter() == o2.getCounter() == o3.getCounter() >= counter):
o1._printStatus()
o2._printStatus()
o3._printStatus()
print('Logs same:', o1._SyncObj__raftLog == o2._SyncObj__raftLog == o3._SyncObj__raftLog)
print(time.time(), 'counters:', o1.getCounter(), o2.getCounter(), o3.getCounter(), counter)
raise AssertionError('Values not equal')
removeFiles(journalFiles)
# Ensure that raftLog after serialization is the same as in serialized data
def test_logCompactionRegressionTest1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._SyncObj__forceLogCompaction == False
logAfterCompaction = o1._SyncObj__raftLog
o1._SyncObj__loadDumpFile(True)
logAfterDeserialize = o1._SyncObj__raftLog
assert logAfterCompaction == logAfterDeserialize
o1._destroy()
o2._destroy()
def test_logCompactionRegressionTest2():
dumpFiles = [getNextDumpFile(), getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dumpFile=dumpFiles[0])
o2 = TestObj(a[1], [a[2], a[0]], dumpFile=dumpFiles[1])
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
objs = [o1, o2, o3]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 10, stopFunc=lambda: o3.getCounter() == 5)
o3._forceLogCompaction()
doTicks(objs, 0.5)
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader() == o3._getLeader()
o3._destroy()
objs = [o1, o2]
o1.addValue(2)
o1.addValue(3)
doTicks(objs, 0.5)
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o3 = TestObj(a[2], [a[0], a[1]], dumpFile=dumpFiles[2])
objs = [o1, o2, o3]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1._destroy()
o2._destroy()
o3._destroy()
removeFiles(dumpFiles)
def __checkParnerNodeExists(obj, nodeAddr, shouldExist=True):
nodeAddrSet = {node.address for node in obj._SyncObj__otherNodes}
return (
nodeAddr in nodeAddrSet) == shouldExist # either nodeAddr is in nodeAddrSet and shouldExist is True, or nodeAddr isn't in the set and shouldExist is False
def test_doChangeClusterUT1():
dumpFiles = [getNextDumpFile()]
removeFiles(dumpFiles)
baseAddr = getNextAddr()
oterAddr = getNextAddr()
o1 = TestObj(baseAddr, ['localhost:1235', oterAddr], dumpFile=dumpFiles[0], dynamicMembershipChange=True)
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', False)
__checkParnerNodeExists(o1, 'localhost:1235', True)
noop = _bchr(_COMMAND_TYPE.NO_OP)
member = _bchr(_COMMAND_TYPE.MEMBERSHIP)
# Check regular configuration change - adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:12345'), {
'type': 'append_entries',
'term': 1,
'prevLogIdx': 1,
'prevLogTerm': 0,
'commit_index': 2,
'entries': [(noop, 2, 1), (noop, 3, 1), (member + pickle.dumps(['add', 'localhost:1238']), 4, 1)]
})
__checkParnerNodeExists(o1, 'localhost:1238', True)
__checkParnerNodeExists(o1, 'localhost:1239', False)
# Check rollback adding
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 2,
'prevLogTerm': 1,
'commit_index': 3,
'entries': [(noop, 3, 2), (member + pickle.dumps(['add', 'localhost:1239']), 4, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, oterAddr, True)
# Check regular configuration change - removing
o1._SyncObj__onMessageReceived(TCPNode('localhost:1236'), {
'type': 'append_entries',
'term': 2,
'prevLogIdx': 4,
'prevLogTerm': 2,
'commit_index': 4,
'entries': [(member + pickle.dumps(['rem', 'localhost:1235']), 5, 2)]
})
__checkParnerNodeExists(o1, 'localhost:1238', False)
__checkParnerNodeExists(o1, 'localhost:1239', True)
__checkParnerNodeExists(o1, 'localhost:1235', False)
# Check log compaction
o1._forceLogCompaction()
doTicks([o1], 0.5)
o1._destroy()
o2 = TestObj(oterAddr, [baseAddr, 'localhost:1236'], dumpFile='dump1.bin', dynamicMembershipChange=True)
doTicks([o2], 0.5)
__checkParnerNodeExists(o2, oterAddr, False)
__checkParnerNodeExists(o2, baseAddr, True)
__checkParnerNodeExists(o2, 'localhost:1238', False)
__checkParnerNodeExists(o2, 'localhost:1239', True)
__checkParnerNodeExists(o2, 'localhost:1235', False)
o2._destroy()
removeFiles(dumpFiles)
def test_doChangeClusterUT2():
a = [getNextAddr(), getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[2], a[0]], dynamicMembershipChange=True)
o3 = TestObj(a[2], [a[0], a[1]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady() == o2._isReady() == o3._isReady() == True
o3.addValue(50)
o2._addNodeToCluster(a[3])
success = False
for i in xrange(10):
doTicks([o1, o2, o3], 0.5)
res = True
res &= __checkParnerNodeExists(o1, a[3], True)
res &= __checkParnerNodeExists(o2, a[3], True)
res &= __checkParnerNodeExists(o3, a[3], True)
if res:
success = True
break
o2._addNodeToCluster(a[3])
assert success
o4 = TestObj(a[3], [a[0], a[1], a[2]], dynamicMembershipChange=True)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4._isReady())
o1.addValue(450)
doTicks([o1, o2, o3, o4], 10, stopFunc=lambda: o4.getCounter() == 500)
assert o4.getCounter() == 500
o1._destroy()
o2._destroy()
o3._destroy()
o4._destroy()
def test_journalTest1():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
journalFiles = [getNextJournalFile(), getNextJournalFile()]
removeFiles(dumpFiles)
removeFiles(journalFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1.addValue(100)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 600 and o2.getCounter() == 600)
assert o1.getCounter() == 600
assert o2.getCounter() == 600
o1._forceLogCompaction()
o2._forceLogCompaction()
doTicks(objs, 0.5)
o1.addValue(150)
o2.addValue(150)
doTicks(objs, 10, stopFunc=lambda: o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[0], journalFile=journalFiles[0])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.JOURNAL_1, dumpFile=dumpFiles[1], journalFile=journalFiles[1])
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady() and \
o1.getCounter() == 900 and o2.getCounter() == 900)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getCounter() == 900
assert o2.getCounter() == 900
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
removeFiles(journalFiles)
def test_journalTest2():
journalFiles = [getNextJournalFile()]
removeFiles(journalFiles)
removeFiles(journalFiles)
journal = createJournal(journalFiles[0])
journal.add(b'cmd1', 1, 0)
journal.add(b'cmd2', 2, 0)
journal.add(b'cmd3', 3, 0)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 3
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd3', 3, 0)
journal.deleteEntriesFrom(2)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 2
assert journal[0] == (b'cmd1', 1, 0)
assert journal[-1] == (b'cmd2', 2, 0)
journal.deleteEntriesTo(1)
journal._destroy()
journal = createJournal(journalFiles[0])
assert len(journal) == 1
assert journal[0] == (b'cmd2', 2, 0)
journal._destroy()
removeFiles(journalFiles)
def test_autoTick1():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1)
assert not o1._isReady()
assert not o2._isReady()
time.sleep(4.5)
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
time.sleep(1.5)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
assert o2.addValueSync(10) == 360
assert o1.addValueSync(20) == 380
o1._destroy()
o2._destroy()
time.sleep(0.5)
def test_largeCommands():
dumpFiles = [getNextDumpFile(), getNextDumpFile()]
removeFiles(dumpFiles)
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
doTicks(objs, 10, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
# Generate ~20Mb data.
testRandStr = getRandStr()
bigStr = ''
for i in xrange(0, 200):
bigStr += getRandStr()
o1.addKeyValue('big', bigStr)
o1.addKeyValue('test', testRandStr)
# Wait for replication.
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr)
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._forceLogCompaction()
o2._forceLogCompaction()
# Wait for disk dump
doTicks(objs, 8.0)
o1._destroy()
o2._destroy()
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[0], leaderFallbackTimeout=60.0)
o2 = TestObj(a[1], [a[0]], TEST_TYPE.LARGE_COMMAND, dumpFile=dumpFiles[1], leaderFallbackTimeout=60.0)
objs = [o1, o2]
# Wait for disk load, election and replication
doTicks(objs, 60, stopFunc=lambda: o1.getValue('test') == testRandStr and \
o2.getValue('test') == testRandStr and \
o1.getValue('big') == bigStr and \
o2.getValue('big') == bigStr and \
o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
assert o1.getValue('test') == testRandStr
assert o2.getValue('big') == bigStr
o1._destroy()
o2._destroy()
removeFiles(dumpFiles)
def test_readOnlyNodes():
random.seed(12)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1], a[2]])
o2 = TestObj(a[1], [a[2], a[0]])
o3 = TestObj(a[2], [a[0], a[1]])
objs = [o1, o2, o3]
b1 = TestObj(None, [a[0], a[1], a[2]])
b2 = TestObj(None, [a[0], a[1], a[2]])
roObjs = [b1, b2]
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o3.getCounter() == 350)
doTicks(objs + roObjs, 4.0, stopFunc=lambda: b1.getCounter() == 350 and b2.getCounter() == 350)
assert b1.getCounter() == b2.getCounter() == 350
assert o1._getLeader() == b1._getLeader() == o2._getLeader() == b2._getLeader()
assert b1._getLeader().address in a
prevLeader = o1._getLeader()
newObjs = [o for o in objs if o._SyncObj__selfNode != prevLeader]
assert len(newObjs) == 2
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0]._getLeader() != prevLeader and \
newObjs[0]._getLeader() is not None and \
newObjs[0]._getLeader().address in a and \
newObjs[0]._getLeader() == newObjs[1]._getLeader())
assert newObjs[0]._getLeader() != prevLeader
assert newObjs[0]._getLeader().address in a
assert newObjs[0]._getLeader() == newObjs[1]._getLeader()
newObjs[1].addValue(50)
doTicks(newObjs + roObjs, 10.0, stopFunc=lambda: newObjs[0].getCounter() == 400 and b1.getCounter() == 400)
o1._printStatus()
o2._printStatus()
o3._printStatus()
b1._printStatus()
assert newObjs[0].getCounter() == 400
assert b1.getCounter() == 400
doTicks(objs + roObjs, 10.0,
stopFunc=lambda: sum([int(o.getCounter() == 400) for o in objs + roObjs]) == len(objs + roObjs))
for o in objs + roObjs:
assert o.getCounter() == 400
currRes = {}
def onAdd(res, err):
currRes[0] = err
b1.addValue(50, callback=onAdd)
doTicks(objs + roObjs, 5.0, stopFunc=lambda: o1.getCounter() == 450 and \
b1.getCounter() == 450 and \
b2.getCounter() == 450 and
currRes.get(0) == FAIL_REASON.SUCCESS)
assert o1.getCounter() == 450
assert b1.getCounter() == 450
assert b2.getCounter() == 450
assert currRes.get(0) == FAIL_REASON.SUCCESS
# check that all objects have 2 readonly nodes
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 2, objs))
# disconnect readonly node
b1._destroy()
doTicks(objs, 2.0)
assert all(map(lambda o: o.getStatus()['readonly_nodes_count'] == 1, objs))
o1._destroy()
o2._destroy()
o3._destroy()
b1._destroy()
b2._destroy()
def test_syncobjAdminStatus():
assert HAS_CRYPTO
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], password='123')
o2 = TestObj(a[1], [a[0]], password='123')
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
status1 = o1._getStatus()
status2 = o2._getStatus()
assert 'version' in status1
assert 'log_len' in status2
trueRes = {
o1: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status1.items())),
o2: '\n'.join('%s: %s' % (k, v) for k, v in sorted(status2.items())),
}
currRes = {
}
args = {
o1: ['-conn', a[0], '-pass', '123', '-status'],
o2: ['-conn', a[1], '-pass', '123', '-status'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes,
stopFunc=lambda: currRes.get(o1) is not None and currRes.get(o2) is not None)
assert len(currRes[o1]) == len(trueRes[o1])
assert len(currRes[o2]) == len(trueRes[o2])
o1._destroy()
o2._destroy()
def test_syncobjAdminAddRemove():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
trueRes = 'SUCCESS ADD ' + a[2]
currRes = {}
args = {
o1: ['-conn', a[0], '-add', a[2]],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
o3 = TestObj(a[2], [a[1], a[0]], dynamicMembershipChange=True)
doTicks([o1, o2, o3], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady() and o3._isReady())
assert o1._isReady()
assert o2._isReady()
assert o3._isReady()
trueRes = 'SUCCESS REMOVE ' + a[2]
args[o1] = None
args[o2] = ['-conn', a[1], '-remove', a[2]]
doSyncObjAdminTicks([o1, o2, o3], args, 10.0, currRes, stopFunc=lambda: currRes.get(o2) is not None)
assert currRes[o2] == trueRes
o3._destroy()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
o1._destroy()
o2._destroy()
def test_syncobjAdminSetVersion():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], dynamicMembershipChange=True)
o2 = TestObj(a[1], [a[0]], dynamicMembershipChange=True)
assert not o1._isReady()
assert not o2._isReady()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1.getCodeVersion() == 0
assert o2.getCodeVersion() == 0
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer1' and \
o2.getValue('testKey') == 'valueVer1')
assert o1.getValue('testKey') == 'valueVer1'
assert o2.getValue('testKey') == 'valueVer1'
trueRes = 'SUCCESS SET_VERSION 1'
currRes = {}
args = {
o1: ['-conn', a[0], '-set_version', '1'],
}
doSyncObjAdminTicks([o1, o2], args, 10.0, currRes, stopFunc=lambda: currRes.get(o1) is not None)
assert currRes[o1] == trueRes
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getCodeVersion() == 1 and o2.getCodeVersion() == 1)
assert o1.getCodeVersion() == 1
assert o2.getCodeVersion() == 1
o2.testMethod()
doTicks([o1, o2], 10.0, stopFunc=lambda: o1.getValue('testKey') == 'valueVer2' and \
o2.getValue('testKey') == 'valueVer2')
assert o1.getValue('testKey') == 'valueVer2'
assert o2.getValue('testKey') == 'valueVer2'
o1._destroy()
o2._destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_syncobjWaitBinded():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], testType=TEST_TYPE.WAIT_BIND)
o2 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
o1.waitBinded()
o2.waitBinded()
o3 = TestObj(a[1], [a[0]], testType=TEST_TYPE.WAIT_BIND)
with pytest.raises(SyncObjException):
o3.waitBinded()
o1.destroy()
o2.destroy()
o3.destroy()
@pytest.mark.skipif(os.name == 'nt', reason='temporary disabled for windows')
def test_unpickle():
data = {'foo': 'bar', 'command': b'\xfa', 'entries': [b'\xfb', b'\xfc']}
python2_cpickle = b'\x80\x02}q\x01(U\x03fooq\x02U\x03barq\x03U\x07commandq\x04U\x01\xfaU\x07entriesq\x05]q\x06(U\x01\xfbU\x01\xfceu.'
python2_pickle = b'\x80\x02}q\x00(U\x03fooq\x01U\x03barq\x02U\x07commandq\x03U\x01\xfaq\x04U\x07entriesq\x05]q\x06(U\x01\xfbq\x07U\x01\xfcq\x08eu.'
python3_pickle = b'\x80\x02}q\x00(X\x03\x00\x00\x00fooq\x01X\x03\x00\x00\x00barq\x02X\x07\x00\x00\x00commandq\x03c_codecs\nencode\nq\x04X\x02\x00\x00\x00\xc3\xbaq\x05X\x06\x00\x00\x00latin1q\x06\x86q\x07Rq\x08X\x07\x00\x00\x00entriesq\t]q\n(h\x04X\x02\x00\x00\x00\xc3\xbbq\x0bh\x06\x86q\x0cRq\rh\x04X\x02\x00\x00\x00\xc3\xbcq\x0eh\x06\x86q\x0fRq\x10eu.'
python2_cpickle_data = pickle.loads(python2_cpickle)
assert data == python2_cpickle_data, 'Failed to unpickle data pickled by python2 cPickle'
python2_pickle_data = pickle.loads(python2_pickle)
assert data == python2_pickle_data, 'Failed to unpickle data pickled by python2 pickle'
python3_pickle_data = pickle.loads(python3_pickle)
assert data == python3_pickle_data, 'Failed to unpickle data pickled by python3 pickle'
class TestConsumer1(SyncObjConsumer):
def __init__(self):
super(TestConsumer1, self).__init__()
self.__counter = 0
@replicated
def add(self, value):
self.__counter += value
@replicated
def set(self, value):
self.__counter = value
def get(self):
return self.__counter
class TestConsumer2(SyncObjConsumer):
def __init__(self):
super(TestConsumer2, self).__init__()
self.__values = {}
@replicated
def set(self, key, value):
self.__values[key] = value
def get(self, key):
return self.__values.get(key)
def test_consumers():
random.seed(42)
a = [getNextAddr(), getNextAddr(), getNextAddr()]
c11 = TestConsumer1()
c12 = TestConsumer1()
c13 = TestConsumer2()
c21 = TestConsumer1()
c22 = TestConsumer1()
c23 = TestConsumer2()
c31 = TestConsumer1()
c32 = TestConsumer1()
c33 = TestConsumer2()
o1 = TestObj(a[0], [a[1], a[2]], consumers=[c11, c12, c13])
o2 = TestObj(a[1], [a[0], a[2]], consumers=[c21, c22, c23])
o3 = TestObj(a[2], [a[0], a[1]], consumers=[c31, c32, c33])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
c11.set(42)
c11.add(10)
c12.add(15)
c13.set('testKey', 'testValue')
doTicks(objs, 10.0, stopFunc=lambda: c21.get() == 52 and c22.get() == 15 and c23.get('testKey') == 'testValue')
assert c21.get() == 52
assert c22.get() == 15
assert c23.get('testKey') == 'testValue'
o1.forceLogCompaction()
o2.forceLogCompaction()
doTicks(objs, 0.5)
objs = [o1, o2, o3]
doTicks(objs, 10.0, stopFunc=lambda: c31.get() == 52 and c32.get() == 15 and c33.get('testKey') == 'testValue')
assert c31.get() == 52
assert c32.get() == 15
assert c33.get('testKey') == 'testValue'
o1.destroy()
o2.destroy()
o3.destroy()
def test_batteriesCommon():
d1 = ReplDict()
l1 = ReplLockManager(autoUnlockTime=30.0)
d2 = ReplDict()
l2 = ReplLockManager(autoUnlockTime=30.0)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1, consumers=[d1, l1])
o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1, consumers=[d2, l2])
doAutoTicks(10.0, stopFunc=lambda: o1.isReady() and o2.isReady())
assert o1.isReady() and o2.isReady()
d1.set('testKey', 'testValue', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d2.get('testKey') == 'testValue')
assert d2['testKey'] == 'testValue'
d2.pop('testKey', sync=True)
doAutoTicks(3.0, stopFunc=lambda: d1.get('testKey') == None)
assert d1.get('testKey') == None
assert l1.tryAcquire('test.lock1', sync=True) == True
assert l2.tryAcquire('test.lock1', sync=True) == False
assert l2.isAcquired('test.lock1') == False
l1id = l1._ReplLockManager__selfID
l1._ReplLockManager__lockImpl.prolongate(l1id, 0, _doApply=True)
l1.release('test.lock1', sync=True)
assert l2.tryAcquire('test.lock1', sync=True) == True
assert d1.setdefault('keyA', 'valueA', sync=True) == 'valueA'
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueA'
d2.pop('keyA', sync=True)
assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueB'
o1.destroy()
o2.destroy()
l1.destroy()
l2.destroy()
def test_ReplCounter():
c = ReplCounter()
c.set(42, _doApply=True)
assert c.get() == 42
c.add(10, _doApply=True)
assert c.get() == 52
c.sub(20, _doApply=True)
assert c.get() == 32
c.inc(_doApply=True)
assert c.get() == 33
def test_ReplList():
l = ReplList()
l.reset([1, 2, 3], _doApply=True)
assert l.rawData() == [1, 2, 3]
l.set(1, 10, _doApply=True)
assert l.rawData() == [1, 10, 3]
l.append(42, _doApply=True)
assert l.rawData() == [1, 10, 3, 42]
l.extend([5, 6], _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.insert(2, 66, _doApply=True)
assert l.rawData() == [1, 10, 66, 3, 42, 5, 6]
l.remove(66, _doApply=True)
assert l.rawData() == [1, 10, 3, 42, 5, 6]
l.pop(1, _doApply=True)
assert l.rawData() == [1, 3, 42, 5, 6]
l.sort(reverse=True, _doApply=True)
assert l.rawData() == [42, 6, 5, 3, 1]
assert l.index(6) == 1
assert l.count(42) == 1
assert l.get(2) == 5
assert l[4] == 1
assert len(l) == 5
l.__setitem__(0, 43, _doApply=True)
assert l[0] == 43
def test_ReplDict():
d = ReplDict()
d.reset({
1: 1,
2: 22,
}, _doApply=True)
assert d.rawData() == {
1: 1,
2: 22,
}
d.__setitem__(1, 10, _doApply=True)
assert d.rawData() == {
1: 10,
2: 22,
}
d.set(1, 20, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
}
assert d.setdefault(1, 50, _doApply=True) == 20
assert d.setdefault(3, 50, _doApply=True) == 50
d.update({
5: 5,
6: 7,
}, _doApply=True)
assert d.rawData() == {
1: 20,
2: 22,
3: 50,
5: 5,
6: 7,
}
assert d.pop(3, _doApply=True) == 50
assert d.pop(6, _doApply=True) == 7
assert d.pop(6, _doApply=True) == None
assert d.pop(6, 0, _doApply=True) == 0
assert d.rawData() == {
1: 20,
2: 22,
5: 5,
}
assert d[1] == 20
assert d.get(2) == 22
assert d.get(22) == None
assert d.get(22, 10) == 10
assert len(d) == 3
assert 2 in d
assert 22 not in d
assert sorted(d.keys()) == [1, 2, 5]
assert sorted(d.values()) == [5, 20, 22]
assert d.items() == d.rawData().items()
d.clear(_doApply=True)
assert len(d) == 0
def test_ReplSet():
s = ReplSet()
s.reset({1, 4}, _doApply=True)
assert s.rawData() == {1, 4}
s.add(10, _doApply=True)
assert s.rawData() == {1, 4, 10}
s.remove(1, _doApply=True)
s.discard(10, _doApply=True)
assert s.rawData() == {4}
assert s.pop(_doApply=True) == 4
s.add(48, _doApply=True)
s.update({9, 2, 3}, _doApply=True)
assert s.rawData() == {9, 2, 3, 48}
assert len(s) == 4
assert 9 in s
assert 42 not in s
s.clear(_doApply=True)
assert len(s) == 0
assert 9 not in s
def test_ReplQueue():
q = ReplQueue()
q.put(42, _doApply=True)
q.put(33, _doApply=True)
q.put(14, _doApply=True)
assert q.get(_doApply=True) == 42
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 14
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 42
def test_ReplPriorityQueue():
q = ReplPriorityQueue()
q.put(42, _doApply=True)
q.put(14, _doApply=True)
q.put(33, _doApply=True)
assert q.get(_doApply=True) == 14
assert q.qsize() == 2
assert len(q) == 2
assert q.empty() == False
assert q.get(_doApply=True) == 33
assert q.get(-1, _doApply=True) == 42
assert q.get(_doApply=True) == None
assert q.get(-1, _doApply=True) == -1
assert q.empty()
q = ReplPriorityQueue(3)
q.put(42, _doApply=True)
q.put(33, _doApply=True)
assert q.full() == False
assert q.put(14, _doApply=True) == True
assert q.full() == True
assert q.put(19, _doApply=True) == False
assert q.get(_doApply=True) == 14
# https://github.com/travis-ci/travis-ci/issues/8695
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('TRAVIS') == 'true', reason='temporary disabled for windows')
def test_ipv6():
random.seed(42)
a = [getNextAddr(ipv6=True), getNextAddr(ipv6=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 10.0, stopFunc=lambda: o1._isReady() and o2._isReady())
assert o1._isReady()
assert o2._isReady()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_localhost():
random.seed(42)
a = [getNextAddr(isLocalhost=True), getNextAddr(isLocalhost=True)]
o1 = TestObj(a[0], [a[1]])
o2 = TestObj(a[1], [a[0]])
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 3.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1.waitBinded()
o2.waitBinded()
o1._printStatus()
assert o1._getLeader().address in a
assert o1._getLeader() == o2._getLeader()
assert o1._isReady()
assert o2._isReady()
o1.addValue(150)
o2.addValue(200)
doTicks(objs, 10.0, stopFunc=lambda: o1.getCounter() == 350 and o2.getCounter() == 350)
assert o1._isReady()
assert o2._isReady()
assert o1.getCounter() == 350
assert o2.getCounter() == 350
o1._destroy()
o2._destroy()
def test_leaderFallback():
random.seed(42)
a = [getNextAddr(), getNextAddr()]
o1 = TestObj(a[0], [a[1]], leaderFallbackTimeout=30.0)
o2 = TestObj(a[1], [a[0]], leaderFallbackTimeout=30.0)
objs = [o1, o2]
assert not o1._isReady()
assert not o2._isReady()
doTicks(objs, 5.0, stopFunc=lambda: o1._isReady() and o2._isReady())
o1._SyncObj__conf.leaderFallbackTimeout = 3.0
o2._SyncObj__conf.leaderFallbackTimeout = 3.0
doTicks([o for o in objs if o._isLeader()], 2.0)
assert o1._isLeader() or o2._isLeader()
doTicks([o for o in objs if o._isLeader()], 2.0)
assert not o1._isLeader() and not o2._isLeader()
class ZeroDeployConsumerAlpha(SyncObjConsumer):
@replicated(ver=1)
def someMethod(self):
pass
@replicated
def methodTwo(self):
pass
class ZeroDeployConsumerBravo(SyncObjConsumer):
@replicated
def alphaMethod(self):
pass
@replicated(ver=3)
def methodTwo(self):
pass
class ZeroDeployTestObj(SyncObj):
def __init__(self, selfAddr, otherAddrs, consumers):
cfg = SyncObjConf(autoTick=False)
super(ZeroDeployTestObj, self).__init__(selfAddr, otherAddrs, cfg, consumers=consumers)
@replicated
def someMethod(self):
pass
@replicated
def otherMethod(self):
pass
@replicated(ver=1)
def thirdMethod(self):
pass
@replicated(ver=2)
def lastMethod(self):
pass
@replicated(ver=3)
def lastMethod(self):
pass
def test_zeroDeployVersions():
random.seed(42)
a = [getNextAddr()]
cAlpha = ZeroDeployConsumerAlpha()
cBravo = ZeroDeployConsumerBravo()
o1 = ZeroDeployTestObj(a[0], [], [cAlpha, cBravo])
assert hasattr(o1, 'otherMethod_v0') == True
assert hasattr(o1, 'lastMethod_v2') == True
assert hasattr(o1, 'lastMethod_v3') == True
assert hasattr(o1, 'lastMethod_v4') == False
assert hasattr(cAlpha, 'methodTwo_v0') == True
assert hasattr(cBravo, 'methodTwo_v3') == True
assert o1._methodToID['lastMethod_v2'] > o1._methodToID['otherMethod_v0']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['lastMethod_v2']
assert o1._methodToID['lastMethod_v3'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['thirdMethod_v1'] > o1._methodToID['someMethod_v0']
assert o1._methodToID['lastMethod_v2'] > o1._methodToID[(id(cAlpha), 'methodTwo_v0')]
assert o1._methodToID[id(cBravo), 'methodTwo_v3'] > o1._methodToID['lastMethod_v2']
assert 'someMethod' not in o1._methodToID
assert 'thirdMethod' not in o1._methodToID
assert 'lastMethod' not in o1._methodToID
| 28.069189
| 357
| 0.613591
|
3c82cf8e95b54c931d152fb731a2647a6c30df66
| 7,956
|
py
|
Python
|
data_acquisition_framework/tests/test_utilities.py
|
Open-Speech-EkStep/data-acquisition-pipeline
|
b28df36d417010d85d3e5c5f6882eb8fe89ce5ae
|
[
"MIT"
] | 14
|
2021-06-18T17:02:31.000Z
|
2022-01-23T16:04:34.000Z
|
data_acquisition_framework/tests/test_utilities.py
|
susmitabhatt/data-acquisition-pipeline
|
b28df36d417010d85d3e5c5f6882eb8fe89ce5ae
|
[
"MIT"
] | 2
|
2021-06-19T09:46:08.000Z
|
2021-09-10T13:57:57.000Z
|
data_acquisition_framework/tests/test_utilities.py
|
susmitabhatt/data-acquisition-pipeline
|
b28df36d417010d85d3e5c5f6882eb8fe89ce5ae
|
[
"MIT"
] | 6
|
2021-04-12T05:03:52.000Z
|
2021-09-11T13:54:45.000Z
|
import os
import random
import struct
import unittest
import wave
from unittest import TestCase
from unittest.mock import patch
from data_acquisition_framework.utilities import get_mp3_duration_in_seconds, get_license_info, get_file_format, \
get_media_info, is_unwanted_words_present, is_unwanted_extension_present, \
is_extension_present, sanitize, is_unwanted_wiki, write, get_meta_filename, is_url_start_with_cc, \
is_license_terms_in_text
def create_dummy_file(file_name):
noise_output = wave.open(file_name, 'w')
noise_output.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))
for _ in range(0, 88200):
value = random.randint(-32767, 32767)
packed_value = struct.pack('h', value)
noise_output.writeframes(packed_value)
noise_output.writeframes(packed_value)
noise_output.close()
class TestUtilities(TestCase):
def test_get_mp3_duration_in_seconds(self):
wav = 'noise.wav'
create_dummy_file(wav)
result = get_mp3_duration_in_seconds(wav)
self.assertEqual(2.0, result)
os.remove(wav)
def test_get_file_format(self):
test_file_name = 'test.mp4'
actual_format = get_file_format(test_file_name)
self.assertEqual('mp4', actual_format)
def test_sanitize(self):
test_word = ' test word \n '
self.assertEqual('test word', sanitize(test_word))
def test_write(self):
test_file_name = 'test.txt'
test_content = 'sample content'
write(test_file_name, test_content)
with open(test_file_name, 'r') as f:
self.assertEqual(test_content + '\n', f.read())
os.remove(test_file_name)
def test_get_meta_filename(self):
test_file_name = 'test.txt'
expected_meta_filename = 'test.csv'
actual_meta_filename = get_meta_filename(test_file_name)
self.assertEqual(expected_meta_filename, actual_meta_filename)
class TestGetLicenseInfo(TestCase):
def test_get_license_info_if_cc_not_present(self):
test_license_urls = ['http://www.abcd.com', 'http://www.efgh.com']
actual_license_info = get_license_info(test_license_urls)
self.assertEqual(", ".join(test_license_urls), actual_license_info)
def test_get_license_info_if_cc_present(self):
test_license_urls = ['http://www.abcd.creativecommons', 'http://www.efgh.com']
actual_license_info = get_license_info(test_license_urls)
self.assertEqual('Creative Commons', actual_license_info)
class TestGetMediaInfo(TestCase):
@patch('data_acquisition_framework.utilities.editor')
def test_get_media_info_for_mp4(self, mock_editor):
test_file_name = 'test.mp4'
source = "test"
language = "test_language"
source_url = 'test_url'
license_urls = ['test_license_url']
media_url = 'test_media_url'
test_duration = 120.0
mock_editor.VideoFileClip.return_value.duration = test_duration
expected_media_info = {'duration': 2,
'raw_file_name': test_file_name,
'name': None,
'gender': None,
'source_url': media_url,
'license': get_license_info(license_urls),
"source": source,
"language": language,
'source_website': source_url}
expected_result = (expected_media_info, 120)
actual_media_info = get_media_info(test_file_name, source, language, source_url, license_urls, media_url)
self.assertEqual(expected_result, actual_media_info)
def test_get_media_info_for_wav(self):
test_file_name = 'test.wav'
create_dummy_file(test_file_name)
source = "test"
language = "test_language"
source_url = 'test_url'
license_urls = ['test_license_url']
media_url = 'test_media_url'
expected_media_info = {'duration': 0.033,
'raw_file_name': test_file_name,
'name': None,
'gender': None,
'source_url': media_url,
'license': get_license_info(license_urls),
"source": source,
"language": language,
'source_website': source_url}
expected_result = (expected_media_info, 2)
actual_media_info = get_media_info(test_file_name, source, language, source_url, license_urls, media_url)
self.assertEqual(expected_result, actual_media_info)
os.remove(test_file_name)
class TestUrlStartWithCC(TestCase):
def test_is_url_start_with_cc_true(self):
test_url = "https://creativecommons.org/publicdomain/mark/abcd"
self.assertTrue(is_url_start_with_cc(test_url))
def test_is_url_start_with_cc_false(self):
test_url = "http://test_website/abcd/something"
self.assertFalse(is_url_start_with_cc(test_url))
class TextLicenseTermsInText(TestCase):
def test_is_license_terms_in_text_true(self):
test_text = 'sample something license testing'
self.assertTrue(is_license_terms_in_text(test_text))
def test_is_license_terms_in_text_false(self):
test_text = 'sample something testing nothing'
self.assertFalse(is_license_terms_in_text(test_text))
class TestUnwantedWordsPresent(TestCase):
def test_is_unwanted_words_present_if_true(self):
test_words_ignore = ['word_one', 'word_two', 'word_three']
test_url = 'http://test_website/word_one/something'
self.assertTrue(is_unwanted_words_present(test_words_ignore, test_url))
def test_is_unwanted_words_present_if_false(self):
test_words_ignore = ['word_one', 'word_two', 'word_three']
test_url = 'http://test_website/word_four/something'
self.assertFalse(is_unwanted_words_present(test_words_ignore, test_url))
class TestUnwantedExtensionPresent(TestCase):
def test_is_unwanted_extension_present_if_true(self):
test_extensions_ignore = ['ext_one', 'ext_two', 'ext_three']
test_url = 'http://test_website.ext_two'
self.assertTrue(is_unwanted_extension_present(test_extensions_ignore, test_url))
def test_is_unwanted_extension_present_if_false(self):
test_extensions_ignore = ['ext_one', 'ext_two', 'ext_three']
test_url = 'http://test_website.ext_four'
self.assertFalse(is_unwanted_extension_present(test_extensions_ignore, test_url))
class TestExtensionPresent(TestCase):
def test_is_extension_present_if_true(self):
test_extensions_to_include = ['ext_one', 'ext_two', 'ext_three']
test_url = 'http://test_website.ext_three'
self.assertTrue(is_extension_present(test_extensions_to_include, test_url))
def test_is_extension_present_if_false(self):
test_extensions_to_include = ['ext_one', 'ext_two', 'ext_three']
test_url = 'http://test_website.ext_four'
self.assertFalse(is_extension_present(test_extensions_to_include, test_url))
class TestUnwantedWiki(TestCase):
def test_is_unwanted_wiki_if_url_has_related_wiki(self):
test_language_code = 'ab'
test_url = 'http://wikipedia.org'
self.assertFalse(is_unwanted_wiki(test_language_code, test_url))
def test_is_unwanted_wiki_if_url_has_unrelated_wiki(self):
test_language_code = 'ab'
test_url = 'http://be.wikipedia.org'
self.assertTrue(is_unwanted_wiki(test_language_code, test_url))
def test_is_unwanted_wiki_if_url_has_no_wiki(self):
test_language_code = 'ab'
test_url = 'http://test_url.com'
self.assertFalse(is_unwanted_wiki(test_language_code, test_url))
if __name__ == "__main__":
unittest.main()
| 37.17757
| 114
| 0.679236
|
73f56f931710984717c7d7bf0019849798955845
| 1,507
|
py
|
Python
|
scripts/tabspaces.py
|
Paktosan/final-countdown
|
562692acc1b5283a9df4d4c77d5f2b055fcbb009
|
[
"CC0-1.0"
] | null | null | null |
scripts/tabspaces.py
|
Paktosan/final-countdown
|
562692acc1b5283a9df4d4c77d5f2b055fcbb009
|
[
"CC0-1.0"
] | null | null | null |
scripts/tabspaces.py
|
Paktosan/final-countdown
|
562692acc1b5283a9df4d4c77d5f2b055fcbb009
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2018 Christoph Kepler <development@kepler.international>
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import re
def only_spaces(file):
tabs = re.compile("/t")
for line in file:
if tabs.match(line):
return 13
return 0
| 36.756098
| 73
| 0.749834
|
3eaf043c8e44ff76097a64b1374253c1126b178b
| 4,128
|
py
|
Python
|
lg_common/src/lg_common/managed_window.py
|
constantegonzalez/lg_ros_nodes
|
1c7b08c42e90205922602c86805285508d1b7971
|
[
"Apache-2.0"
] | 16
|
2015-10-10T11:55:37.000Z
|
2022-02-24T22:47:48.000Z
|
lg_common/src/lg_common/managed_window.py
|
EndPointCorp/lg_ros_nodes
|
37ef895cead3b7257a5d5593ab886e74ff157b4b
|
[
"Apache-2.0"
] | 292
|
2015-09-29T21:59:53.000Z
|
2022-03-31T15:59:31.000Z
|
lg_common/src/lg_common/managed_window.py
|
constantegonzalez/lg_ros_nodes
|
1c7b08c42e90205922602c86805285508d1b7971
|
[
"Apache-2.0"
] | 5
|
2017-05-03T06:22:43.000Z
|
2021-08-19T16:54:14.000Z
|
import json
import rospy
import subprocess
import threading
import re
from lg_msg_defs.msg import WindowGeometry
class ManagedWindow(object):
LAYER_BELOW = 'below'
LAYER_NORMAL = 'normal'
LAYER_ABOVE = 'above'
def __init__(self, w_name=None, w_class=None, w_instance=None,
geometry=None, visible=True, chrome_kiosk_workaround=False,
layer=LAYER_NORMAL):
self.w_name = w_name
self.w_class = w_class
self.w_instance = w_instance
self.geometry = geometry
self.is_visible = visible
self.layer = layer
self.lock = threading.Lock()
def __str__(self):
return 'name={name}, class={cls}, instance={inst}, {w}x{h} {x},{y}'.format(
name=self.w_name,
cls=self.w_class,
inst=self.w_instance,
w=self.geometry.width if self.geometry is not None else None,
h=self.geometry.height if self.geometry is not None else None,
x=self.geometry.x if self.geometry is not None else None,
y=self.geometry.y if self.geometry is not None else None,
)
@staticmethod
def parse_geometry(geometry):
"""
Parses Xorg window geometry in the form WxH[+-]X[+-]Y
Raises ValueError if the geometry string is invalid.
"""
m = re.match(r'^(\d+)x(\d+)([+-]\d+)([+-]\d+)$', geometry)
if m is None:
raise ValueError(
'Invalid window geometry: {}'.format(geometry))
dims = list(map(int, m.groups()))
return WindowGeometry(width=dims[0], height=dims[1],
x=dims[2], y=dims[3])
@staticmethod
def format_geometry(geometry):
"""
Formats WindowGeometry as a string.
"""
return "{}x{}{:+}{:+}".format(geometry.width, geometry.height,
geometry.x, geometry.y)
@staticmethod
def lookup_viewport_geometry(viewport_key):
"""
Looks up geometry for the given viewport name.
Raises KeyError if the viewport is not configured.
"""
param_name = '/viewport/{}'.format(viewport_key)
if not rospy.has_param(param_name):
raise KeyError(
'Viewport parameter not set: {}'.format(param_name))
viewport_value = rospy.get_param(param_name)
return ManagedWindow.parse_geometry(viewport_value)
@staticmethod
def get_viewport_geometry():
"""
Returns WindowGeometry if the private '~viewport' param is set.
Returns None if the private '~viewport' param is not set.
"""
if rospy.has_param('~viewport'):
viewport = rospy.get_param('~viewport')
geometry = ManagedWindow.lookup_viewport_geometry(viewport)
else:
geometry = None
return geometry
def _get_command(self):
msg = {
'op': 'converge',
'data': {}
}
if self.w_name:
msg['data']['wm_name'] = self.w_name
if self.w_instance:
msg['data']['wm_instance'] = self.w_instance
if self.w_class:
msg['data']['wm_class'] = self.w_class
if self.geometry:
msg['data']['rectangle'] = ManagedWindow.format_geometry(self.geometry)
if self.layer:
msg['data']['layer'] = self.layer
msg['data']['hidden'] = not self.is_visible
return ['lg_wm_send', json.dumps(msg, ensure_ascii=False)]
def set_visibility(self, visible):
with self.lock:
self.is_visible = visible
def set_geometry(self, geometry):
with self.lock:
self.geometry = geometry
def converge(self):
with self.lock:
cmd = self._get_command()
rospy.logwarn('running: {}'.format(cmd))
try:
subprocess.check_call(cmd, close_fds=True)
except Exception as e:
rospy.logerr('failed to run {} : {}'.format(cmd, str(e)))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 30.352941
| 83
| 0.577519
|
a248f5a373557c39e435e6877c23dd2c8fab24ad
| 7,783
|
py
|
Python
|
Anchors/add-anchors.py
|
arialcrime/Robofont-scripts
|
34dd92ec3ef67c7e0f1405767a5c648b1c2051ba
|
[
"MIT"
] | 46
|
2015-01-25T18:12:26.000Z
|
2021-12-13T22:54:29.000Z
|
Anchors/add-anchors.py
|
arialcrime/Robofont-scripts
|
34dd92ec3ef67c7e0f1405767a5c648b1c2051ba
|
[
"MIT"
] | 24
|
2015-02-17T10:43:08.000Z
|
2019-05-02T18:23:39.000Z
|
Anchors/add-anchors.py
|
arialcrime/Robofont-scripts
|
34dd92ec3ef67c7e0f1405767a5c648b1c2051ba
|
[
"MIT"
] | 28
|
2015-01-16T19:35:41.000Z
|
2020-02-25T09:58:02.000Z
|
"""
june 2014 — Loïc Sander
– Adds anchors to any glyph in the baseGlyphList, if present in font.keys() and no specific glyph is selected,
otherwise, adds anchors only to the selected glyphs
– If a glyph already has the named anchor, the script skips this anchor
– Any glyph that received new anchors is colored in a weird blue
– Takes italic angle into consideration when positioning anchors
– baseGlyphList structure:
{
"glyphName": [
["anchorName", "yPos", "xPos"]
]
}
keywords for positions:
xPos: left, center, right (based on the glyphs bounding box)
yPos: baseline, xHeight(+offset), xHeightCenter, ascender(+offset), capHeight(+offset), belowBaseline, belowGlyph, glyphCenter, glyphBase, glyphTop
to add yours, see xPos and yPos dicts below.
"""
baseGlyphList = {
### UPPERCASE GLYPHS ############################
"A": [
["top", "capHeight", "center"],
["right", "baseline", "right"] ],
"AE": [
["top", "capHeight", "center"] ],
"C": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"D": [
["top", "capHeight", "center"] ],
"E": [
["top", "capHeight", "center"],
["right", "baseline", "right"],
["bottom", "baseline", "center"] ],
"G": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"H": [
["top", "capHeight", "center"] ],
"I": [
["top", "capHeight", "center"],
["right", "baseline", "center"],
["bottom", "belowBaseline", "center"] ],
"J": [
["top", "capHeight", "center"] ],
"K": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"L": [
["top", "capHeight", "left"],
["right", "capHeight", "center"],
["mid", "center", "center"],
["bottom", "baseline", "center"] ],
"N": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"O": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"R": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"S": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"T": [
["top", "capHeight", "center"],
["bottom", "baseline", "center"] ],
"U": [
["top", "capHeight", "center"],
["right", "baseline", "center"],
["bottom", "belowBaseline", "center"] ],
"W": [
["top", "capHeight", "center"] ],
"Y": [
["top", "capHeight", "center"] ],
"Z": [
["top", "capHeight", "center"] ],
### LOWERCASE ############################
"a": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"],
["right", "baseline", "right"] ],
"ae": [
["top", "xHeight", "center"] ],
"c": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"] ],
"d": [
["right", "ascender", "right"] ],
"e": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"],
["right", "baseline", "right"] ],
"g": [
["top", "xHeight", "center"],
["bottom", "belowGlyph", "center"] ],
"h": [
["top", "ascender", "left"],
["bottom", "baseline", "center"] ],
"i": [
["right", "baseline", "right"] ],
"dotlessi": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"],
["right", "baseline", "right"] ],
"dotlessj": [
["top", "xHeight", "center"] ],
"k": [
["top", "ascender", "left"],
["bottom", "baseline", "center"] ],
"l": [
["top", "ascender", "center"],
["bottom", "baseline", "center"],
["mid", "xHeightCenter", "center"],
["right", "ascender", "right"] ],
"n": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"] ],
"o": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"] ],
"r": [
["top", "xHeight", "center"],
["bottom", "baseline", "left"] ],
"s": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"] ],
"t": [
["right", "glyphTop", "right"],
["bottom", "baseline", "center"] ],
"u": [
["top", "xHeight", "center"],
["bottom", "baseline", "center"],
["right", "baseline", "right"] ],
"w": [
["top", "xHeight", "center"], ],
"y": [
["top", "xHeight", "center"], ],
"z": [
["top", "xHeight", "center"], ],
### DIACRITICS ############################
"acute": [
["_top", "glyphBase", "center"] ],
"acute.cap": [
["_top", "glyphBase", "center"] ],
"grave": [
["_top", "glyphBase", "center"] ],
"grave.cap": [
["_top", "glyphBase", "center"] ],
"circumflex": [
["_top", "glyphBase", "center"] ],
"circumflex.cap": [
["_top", "glyphBase", "center"] ],
"dieresis": [
["_top", "glyphBase", "center"] ],
"dieresis.cap": [
["_top", "glyphBase", "center"] ],
"ring": [
["_top", "glyphBase", "center"] ],
"ring.cap": [
["_top", "glyphBase", "center"] ],
"tilde": [
["_top", "glyphBase", "center"] ],
"tilde.cap": [
["_top", "glyphBase", "center"] ],
"cedilla": [
["_bottom", "baseline", "center"] ],
"caron": [
["_top", "glyphBase", "center"] ],
"caron.cap": [
["_top", "glyphBase", "center"] ],
"breve": [
["_top", "glyphBase", "center"] ],
"breve.cap": [
["_top", "glyphBase", "center"] ],
"ogonek": [
["_bottom", "belowBaseline", "center"],
["_right", "baseline", "right"] ],
"macron": [
["_top", "glyphBase", "center"] ],
"macron.cap": [
["_top", "glyphBase", "center"] ],
"dotaccent": [
["_top", "glyphBase", "center"],
["_right", "center", "left"] ],
"dotaccent.cap": [
["_top", "glyphBase", "center"],
["_right", "center", "left"] ],
"commaaccent.cap": [
["_top", "glyphTop", "center"] ],
"commaaccent.alt": [
["_top", "glyphBase", "center"],
["_right", "glyphBase", "right"] ],
"hungarumlaut": [
["_top", "glyphBase", "center"] ],
"hungarumlaut.cap": [
["_top", "glyphBase", "center"] ],
}
def addAnchors(font, glyphName, anchors, offset=None):
if font is not None:
selectedGlyphs = font.selection
if (glyphName in font.keys()) and ((len(selectedGlyphs) == 0) or ((len(selectedGlyphs) > 0) and (glyphName in selectedGlyphs))):
glyph = font[glyphName]
anchorList = [ glyph.anchors[anchor].name for anchor in range(len(glyph.anchors))]
from math import radians, tan
italicAngle = radians(font.info.italicAngle)
if offset == None:
offset = round(font.info.unitsPerEm * 0.03)
xPos = {
"center": ((glyph.width - (glyph.angledLeftMargin + glyph.angledRightMargin)) / 2) + glyph.angledLeftMargin,
"right": glyph.width - glyph.angledRightMargin,
"left": glyph.angledLeftMargin
}
yPos = {
"baseline": 0,
"ascender": font.info.ascender + offset,
"capHeight": font.info.capHeight + offset,
"xHeight": font.info.xHeight + (offset*2),
"xHeightCenter": font.info.xHeight / 2,
"belowBaseline": -offset,
"belowGlyph": glyph.box[1] - offset,
"center": (glyph.box[3] - glyph.box[1]) / 2,
"glyphBase": glyph.box[1],
"glyphTop": glyph.box[3],
}
for anchor in anchors:
italicOffset = -round(yPos[anchor[1]]) * tan(italicAngle)
anchorName = anchor[0]
anchorPos = (round(xPos[anchor[2]]) + italicOffset, round(yPos[anchor[1]]))
if anchorName not in anchorList:
glyph.prepareUndo("add-anchors")
glyph.appendAnchor(anchorName, anchorPos)
setattr(glyph, "mark", (0.5, 0.8, 0.8, 0.5))
glyph.performUndo()
else:
print "No current font."
font = CurrentFont()
# value defining the offset of anchors from their reference point (as in baseGlyphList supra)
# defaults to 3% of UPM value (~30 units for 1000 UPM, ~60 units for 2048)
offset = None
for glyphName, anchors in baseGlyphList.items():
addAnchors(font, glyphName, anchors, offset)
| 23.372372
| 149
| 0.536939
|
dc802eb52a00f7ab625b43775f8322bc59fbf7bd
| 20,139
|
py
|
Python
|
SciDataTool/Classes/DataPattern.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Classes/DataPattern.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Classes/DataPattern.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/DataPattern.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/SciDataTool/tree/master/SciDataTool/Methods//DataPattern
"""
from os import linesep
from sys import getsizeof
from ._check import set_array, check_var, raise_
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .Data import Data
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.DataPattern.get_length import get_length
except ImportError as error:
get_length = error
try:
from ..Methods.DataPattern.get_values import get_values
except ImportError as error:
get_values = error
try:
from ..Methods.DataPattern.has_period import has_period
except ImportError as error:
has_period = error
from numpy import array, array_equal
from numpy import isnan
from ._check import InitUnKnowClassError
class DataPattern(Data):
"""Class for axes defined as vectors"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.DataPattern.get_length
if isinstance(get_length, ImportError):
get_length = property(
fget=lambda x: raise_(
ImportError(
"Can't use DataPattern method get_length: " + str(get_length)
)
)
)
else:
get_length = get_length
# cf Methods.DataPattern.get_values
if isinstance(get_values, ImportError):
get_values = property(
fget=lambda x: raise_(
ImportError(
"Can't use DataPattern method get_values: " + str(get_values)
)
)
)
else:
get_values = get_values
# cf Methods.DataPattern.has_period
if isinstance(has_period, ImportError):
has_period = property(
fget=lambda x: raise_(
ImportError(
"Can't use DataPattern method has_period: " + str(has_period)
)
)
)
else:
has_period = has_period
# save and copy methods are available in all object
save = save
copy = copy
def __init__(
self,
rebuild_indices=None,
unique_indices=None,
is_step=True,
values=None,
is_components=False,
symmetries=-1,
values_whole=None,
is_overlay=False,
symbol="",
name="",
unit="",
normalizations=-1,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for SciDataTool type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for SciDataTool Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "rebuild_indices" in list(init_dict.keys()):
rebuild_indices = init_dict["rebuild_indices"]
if "unique_indices" in list(init_dict.keys()):
unique_indices = init_dict["unique_indices"]
if "is_step" in list(init_dict.keys()):
is_step = init_dict["is_step"]
if "values" in list(init_dict.keys()):
values = init_dict["values"]
if "is_components" in list(init_dict.keys()):
is_components = init_dict["is_components"]
if "symmetries" in list(init_dict.keys()):
symmetries = init_dict["symmetries"]
if "values_whole" in list(init_dict.keys()):
values_whole = init_dict["values_whole"]
if "is_overlay" in list(init_dict.keys()):
is_overlay = init_dict["is_overlay"]
if "symbol" in list(init_dict.keys()):
symbol = init_dict["symbol"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "unit" in list(init_dict.keys()):
unit = init_dict["unit"]
if "normalizations" in list(init_dict.keys()):
normalizations = init_dict["normalizations"]
# Set the properties (value check and convertion are done in setter)
self.rebuild_indices = rebuild_indices
self.unique_indices = unique_indices
self.is_step = is_step
self.values = values
self.is_components = is_components
self.symmetries = symmetries
self.values_whole = values_whole
self.is_overlay = is_overlay
# Call Data init
super(DataPattern, self).__init__(
symbol=symbol, name=name, unit=unit, normalizations=normalizations
)
# The class is frozen (in Data init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
DataPattern_str = ""
# Get the properties inherited from Data
DataPattern_str += super(DataPattern, self).__str__()
DataPattern_str += (
"rebuild_indices = "
+ linesep
+ str(self.rebuild_indices).replace(linesep, linesep + "\t")
+ linesep
)
DataPattern_str += (
"unique_indices = "
+ linesep
+ str(self.unique_indices).replace(linesep, linesep + "\t")
+ linesep
)
DataPattern_str += "is_step = " + str(self.is_step) + linesep
DataPattern_str += (
"values = "
+ linesep
+ str(self.values).replace(linesep, linesep + "\t")
+ linesep
+ linesep
)
DataPattern_str += "is_components = " + str(self.is_components) + linesep
DataPattern_str += "symmetries = " + str(self.symmetries) + linesep
DataPattern_str += (
"values_whole = "
+ linesep
+ str(self.values_whole).replace(linesep, linesep + "\t")
+ linesep
+ linesep
)
DataPattern_str += "is_overlay = " + str(self.is_overlay) + linesep
return DataPattern_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Data
if not super(DataPattern, self).__eq__(other):
return False
if other.rebuild_indices != self.rebuild_indices:
return False
if other.unique_indices != self.unique_indices:
return False
if other.is_step != self.is_step:
return False
if not array_equal(other.values, self.values):
return False
if other.is_components != self.is_components:
return False
if other.symmetries != self.symmetries:
return False
if not array_equal(other.values_whole, self.values_whole):
return False
if other.is_overlay != self.is_overlay:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from Data
diff_list.extend(
super(DataPattern, self).compare(
other, name=name, ignore_list=ignore_list, is_add_value=is_add_value
)
)
if other._rebuild_indices != self._rebuild_indices:
if is_add_value:
val_str = (
" (self="
+ str(self._rebuild_indices)
+ ", other="
+ str(other._rebuild_indices)
+ ")"
)
diff_list.append(name + ".rebuild_indices" + val_str)
else:
diff_list.append(name + ".rebuild_indices")
if other._unique_indices != self._unique_indices:
if is_add_value:
val_str = (
" (self="
+ str(self._unique_indices)
+ ", other="
+ str(other._unique_indices)
+ ")"
)
diff_list.append(name + ".unique_indices" + val_str)
else:
diff_list.append(name + ".unique_indices")
if other._is_step != self._is_step:
if is_add_value:
val_str = (
" (self="
+ str(self._is_step)
+ ", other="
+ str(other._is_step)
+ ")"
)
diff_list.append(name + ".is_step" + val_str)
else:
diff_list.append(name + ".is_step")
if not array_equal(other.values, self.values):
diff_list.append(name + ".values")
if other._is_components != self._is_components:
if is_add_value:
val_str = (
" (self="
+ str(self._is_components)
+ ", other="
+ str(other._is_components)
+ ")"
)
diff_list.append(name + ".is_components" + val_str)
else:
diff_list.append(name + ".is_components")
if other._symmetries != self._symmetries:
if is_add_value:
val_str = (
" (self="
+ str(self._symmetries)
+ ", other="
+ str(other._symmetries)
+ ")"
)
diff_list.append(name + ".symmetries" + val_str)
else:
diff_list.append(name + ".symmetries")
if not array_equal(other.values_whole, self.values_whole):
diff_list.append(name + ".values_whole")
if other._is_overlay != self._is_overlay:
if is_add_value:
val_str = (
" (self="
+ str(self._is_overlay)
+ ", other="
+ str(other._is_overlay)
+ ")"
)
diff_list.append(name + ".is_overlay" + val_str)
else:
diff_list.append(name + ".is_overlay")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from Data
S += super(DataPattern, self).__sizeof__()
if self.rebuild_indices is not None:
for value in self.rebuild_indices:
S += getsizeof(value)
if self.unique_indices is not None:
for value in self.unique_indices:
S += getsizeof(value)
S += getsizeof(self.is_step)
S += getsizeof(self.values)
S += getsizeof(self.is_components)
if self.symmetries is not None:
for key, value in self.symmetries.items():
S += getsizeof(value) + getsizeof(key)
S += getsizeof(self.values_whole)
S += getsizeof(self.is_overlay)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from Data
DataPattern_dict = super(DataPattern, self).as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
DataPattern_dict["rebuild_indices"] = (
self.rebuild_indices.copy() if self.rebuild_indices is not None else None
)
DataPattern_dict["unique_indices"] = (
self.unique_indices.copy() if self.unique_indices is not None else None
)
DataPattern_dict["is_step"] = self.is_step
if self.values is None:
DataPattern_dict["values"] = None
else:
if type_handle_ndarray == 0:
DataPattern_dict["values"] = self.values.tolist()
elif type_handle_ndarray == 1:
DataPattern_dict["values"] = self.values.copy()
elif type_handle_ndarray == 2:
DataPattern_dict["values"] = self.values
else:
raise Exception(
"Unknown type_handle_ndarray: " + str(type_handle_ndarray)
)
DataPattern_dict["is_components"] = self.is_components
DataPattern_dict["symmetries"] = (
self.symmetries.copy() if self.symmetries is not None else None
)
if self.values_whole is None:
DataPattern_dict["values_whole"] = None
else:
if type_handle_ndarray == 0:
DataPattern_dict["values_whole"] = self.values_whole.tolist()
elif type_handle_ndarray == 1:
DataPattern_dict["values_whole"] = self.values_whole.copy()
elif type_handle_ndarray == 2:
DataPattern_dict["values_whole"] = self.values_whole
else:
raise Exception(
"Unknown type_handle_ndarray: " + str(type_handle_ndarray)
)
DataPattern_dict["is_overlay"] = self.is_overlay
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
DataPattern_dict["__class__"] = "DataPattern"
return DataPattern_dict
def _set_None(self):
"""Set all the properties to None (except SciDataTool object)"""
self.rebuild_indices = None
self.unique_indices = None
self.is_step = None
self.values = None
self.is_components = None
self.symmetries = None
self.values_whole = None
self.is_overlay = None
# Set to None the properties inherited from Data
super(DataPattern, self)._set_None()
def _get_rebuild_indices(self):
"""getter of rebuild_indices"""
return self._rebuild_indices
def _set_rebuild_indices(self, value):
"""setter of rebuild_indices"""
if type(value) is int and value == -1:
value = list()
check_var("rebuild_indices", value, "list")
self._rebuild_indices = value
rebuild_indices = property(
fget=_get_rebuild_indices,
fset=_set_rebuild_indices,
doc=u"""Indices to rebuild complete axis
:Type: list
""",
)
def _get_unique_indices(self):
"""getter of unique_indices"""
return self._unique_indices
def _set_unique_indices(self, value):
"""setter of unique_indices"""
if type(value) is int and value == -1:
value = list()
check_var("unique_indices", value, "list")
self._unique_indices = value
unique_indices = property(
fget=_get_unique_indices,
fset=_set_unique_indices,
doc=u"""Indices which were taken from complete axis
:Type: list
""",
)
def _get_is_step(self):
"""getter of is_step"""
return self._is_step
def _set_is_step(self, value):
"""setter of is_step"""
check_var("is_step", value, "bool")
self._is_step = value
is_step = property(
fget=_get_is_step,
fset=_set_is_step,
doc=u"""True if the axis is defined by step
:Type: bool
""",
)
def _get_values(self):
"""getter of values"""
return self._values
def _set_values(self, value):
"""setter of values"""
if type(value) is int and value == -1:
value = array([])
elif type(value) is list:
try:
value = array(value)
except:
pass
check_var("values", value, "ndarray")
self._values = value
values = property(
fget=_get_values,
fset=_set_values,
doc=u"""List or ndarray of the axis values
:Type: ndarray
""",
)
def _get_is_components(self):
"""getter of is_components"""
return self._is_components
def _set_is_components(self, value):
"""setter of is_components"""
check_var("is_components", value, "bool")
self._is_components = value
is_components = property(
fget=_get_is_components,
fset=_set_is_components,
doc=u"""True if the axis values are strings
:Type: bool
""",
)
def _get_symmetries(self):
"""getter of symmetries"""
return self._symmetries
def _set_symmetries(self, value):
"""setter of symmetries"""
if type(value) is int and value == -1:
value = dict()
check_var("symmetries", value, "dict")
self._symmetries = value
symmetries = property(
fget=_get_symmetries,
fset=_set_symmetries,
doc=u"""Dictionary of the symmetries along each axis, used to reduce storage
:Type: dict
""",
)
def _get_values_whole(self):
"""getter of values_whole"""
return self._values_whole
def _set_values_whole(self, value):
"""setter of values_whole"""
if type(value) is int and value == -1:
value = array([])
elif type(value) is list:
try:
value = array(value)
except:
pass
check_var("values_whole", value, "ndarray")
self._values_whole = value
values_whole = property(
fget=_get_values_whole,
fset=_set_values_whole,
doc=u"""Complete axis
:Type: ndarray
""",
)
def _get_is_overlay(self):
"""getter of is_overlay"""
return self._is_overlay
def _set_is_overlay(self, value):
"""setter of is_overlay"""
check_var("is_overlay", value, "bool")
self._is_overlay = value
is_overlay = property(
fget=_get_is_overlay,
fset=_set_is_overlay,
doc=u"""True if axis must be used to overlay curves in plots
:Type: bool
""",
)
| 35.085366
| 110
| 0.548836
|
7a2f658c44c571b27ed348bc2b0f279e426436e4
| 2,809
|
py
|
Python
|
carl-har-pls/data/generate_data.py
|
ppiont/carl-har-pls
|
6d744ea8ca329307b045ec7a532bb8c55b15acda
|
[
"Unlicense"
] | null | null | null |
carl-har-pls/data/generate_data.py
|
ppiont/carl-har-pls
|
6d744ea8ca329307b045ec7a532bb8c55b15acda
|
[
"Unlicense"
] | null | null | null |
carl-har-pls/data/generate_data.py
|
ppiont/carl-har-pls
|
6d744ea8ca329307b045ec7a532bb8c55b15acda
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 21:59:41 2021.
@author: peterp
"""
# %%%%%%%%%%%%%%%%%%%%% Anonymous functions %%%%%%%%%%%
# mncn = @(x) (x-mean(x)); % column-wise mean center
# auto = @(x) ((x-mean(x))./std(x)); % column-wise mean center and scale to unit variance
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import numpy as np
def auto(x):
"""Compute column-wise mean center and scale to unit variance."""
return (x - np.mean(x, axis=0)) / np.std(x, axis=0)
# Constants
I = 100 # number of calibration samples
Itest = 500 # number of test samples
It = 50 # number of iterations
J = 50 # number of variables in X, explainatory variables
K = 3 # rank of system
r = np.array([[0.2, 0.95], [0.3, 0.9]]) # correlation coefficient
# Andet gøgl
T1 = auto(np.random.rand(I, K)) # left factor matrix, calibration set 1
# KOMMET hertil, nedenstående virker ikke
# V_orth = (eye(I) - T1(:,1) * inv(T1(:,1)' * T1(:,1)) * T1(:,1)')*T1(:,2:K);
V_orth = (np.eye(I) - T1[:, 0] * np.linalg.inv(T1[:, 0].conj().T * T1[:, 0])
* T1[:, 0].conj().T) * T1[:, 1:K]
# ----------------------------------------------------------------------------
# T2 = nan(size(T1));
# T2(:,1) = T1(:,1);
# for k = 2:K
# T1(:,k) = T1(:,1)*r(k-1,1)+Vorth(:,k-1)*(1-r(k-1,1));
# T2(:,k) = T2(:,1)*r(k-1,2)+Vorth(:,k-1)*(1-r(k-1,2));
# end
# clear k
# T1(:,K) = rand(I,1);
# T2(:,K) = rand(I,1);
# T1 = auto(T1);
# T2 = auto(T2);
# Ttest = auto(rand(Itest,K)); % left factor matrix, test set
# % P = rand(J,K); % right factor matrix
# P = nan(J,K); % Signals
# sigma = 5;
# mu = [15;20;30];
# for k = 1:K
# % S(:,i) = normpdf(1:J,mu(1),sigma)+i*normpdf(1:J,mu(2),sigma);
# P(:,k) = normpdf(1:J,mu(k),sigma);
# P(:,k) = P(:,k)/sqrt(P(:,k)'*P(:,k));
# end
# clear k mu sigma
# X1 = T1*P'; % explainatory variables, calibration set 1
# X2 = T2*P'; % explainatory variables, calibration set 2
# Xtest = Ttest*P'; % explainatroy variables, test set
# y1 = T1(:,1); % response, calibration set 1
# y2 = T2(:,1); % response, calibration set 2
# ytest = Ttest(:,1); % response, test set
# s = P(:,1); % analyte signal at unit concentration
# clear T1 T2 Ttest P
# % %% Calculate true reg. vec and loadings, NIPALS
# [B,P1true,~,~,~,~] = nipals_pls1(X1,y1,K);
# btrue = B(:,K);
# [~,P2true,~,~,~,~] = nipals_pls1(X2,y2,K);
# Proj = btrue*inv(btrue'*btrue)*btrue'; % projection matrix for Net Analyte Signal, NAS
# clear B
# % %% Add noise to X and y
# sigX = 0.6E-1;
# sigy = 5E-1;
# Ex_test = mncn(randn(Itest,J)*sigX); % Error for test set
# ey_test = mncn(randn(Itest,1)*sigy); % Error for test set
# Xtest = Xtest+Ex_test;
# ytest = ytest+ey_test;
| 30.868132
| 89
| 0.527234
|
8c74dbf61e4eca36a37939edb4aa157553ee4a0e
| 1,212
|
py
|
Python
|
ooobuild/lo/drawing/shapes.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/shapes.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/shapes.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
from .x_shapes import XShapes as XShapes_9a800ab0
class Shapes(XShapes_9a800ab0):
"""
Service Class
This service is for a generic collection of shapes.
See Also:
`API Shapes <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1drawing_1_1Shapes.html>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing'
__ooo_full_ns__: str = 'com.sun.star.drawing.Shapes'
__ooo_type_name__: str = 'service'
__all__ = ['Shapes']
| 30.3
| 116
| 0.735974
|
1dd4c0f2074a330079aeb004144d12770c5d304d
| 1,553
|
py
|
Python
|
src/commcare_cloud/fab/const.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
src/commcare_cloud/fab/const.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
src/commcare_cloud/fab/const.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import os
PROJECT_ROOT = os.path.dirname(__file__)
REPO_BASE = os.path.realpath(os.path.join(PROJECT_ROOT, '..', '..'))
ROLES_ALL = ['all']
ROLES_ALL_SRC = [
'django_monolith',
'django_app',
'django_celery',
'django_pillowtop',
'formplayer',
'staticfiles',
'airflow',
'django_manage'
]
ROLES_ALL_SERVICES = [
'django_monolith',
'django_app',
'django_celery',
'django_pillowtop',
'formplayer',
'staticfiles',
'airflow'
]
ROLES_CELERY = ['django_monolith', 'django_celery']
ROLES_PILLOWTOP = ['django_monolith', 'django_pillowtop']
ROLES_DJANGO = ['django_monolith', 'django_app']
ROLES_FORMPLAYER = ['django_monolith', 'formplayer']
ROLES_STATIC = ['django_monolith', 'staticfiles']
ROLES_POSTGRESQL = ['pg', 'pgstandby', 'django_monolith']
ROLES_ELASTICSEARCH = ['elasticsearch', 'django_monolith']
ROLES_DEPLOY = ['deploy', 'django_monolith']
ROLES_MANAGE = ['django_manage']
ROLES_CONTROL = ['control']
ROLES_AIRFLOW = ['airflow']
RELEASE_RECORD = 'RELEASES.txt'
KEEP_UNTIL_PREFIX = 'KEEP_UNTIL__'
DATE_FMT = '%Y-%m-%d_%H.%M'
RSYNC_EXCLUDE = (
'.DS_Store',
'.git',
'*.pyc',
'*.example',
'*.db',
)
CACHED_DEPLOY_ENV_FILENAME = 'cached_deploy_env.pickle'
CACHED_DEPLOY_CHECKPOINT_FILENAME = 'cached_deploy_checkpoint.pickle'
FORMPLAYER_BUILD_DIR = 'formplayer_build'
OFFLINE_STAGING_DIR = 'offline-staging'
BOWER_ZIP_NAME = 'bower.tar.gz'
NPM_ZIP_NAME = 'npm.tar.gz'
WHEELS_ZIP_NAME = 'wheels.tar.gz'
YARN_LOCK = 'yarn.lock'
| 24.650794
| 69
| 0.709594
|
65f7cae7b67dc1f34e203038399da7817afb25f1
| 146
|
py
|
Python
|
predicode/hierarchical/test/__init__.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 2
|
2019-09-24T14:43:17.000Z
|
2021-02-07T08:34:54.000Z
|
predicode/hierarchical/test/.ipynb_checkpoints/__init__-checkpoint.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 8
|
2019-09-09T16:01:10.000Z
|
2022-02-10T00:20:45.000Z
|
predicode/hierarchical/test/.ipynb_checkpoints/__init__-checkpoint.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 1
|
2019-09-24T15:00:39.000Z
|
2019-09-24T15:00:39.000Z
|
"""Tests predicode.hierarchical."""
from predicode.hierarchical.test.initializer import *
from predicode.hierarchical.test.hierarchical import *
| 29.2
| 54
| 0.815068
|
b68aec72b6b9c6b10284c2928a52c1afc712de5c
| 5,687
|
py
|
Python
|
WINDOWS/Lambda/FN03_AS2_Windows_Automation_Run_Image_Assistant/lambda_function.py
|
aws-samples/appstream-serverless-image-creation-automation
|
2d58cca89b92c623c7fe22d5956f0e81435d9b8a
|
[
"MIT-0"
] | null | null | null |
WINDOWS/Lambda/FN03_AS2_Windows_Automation_Run_Image_Assistant/lambda_function.py
|
aws-samples/appstream-serverless-image-creation-automation
|
2d58cca89b92c623c7fe22d5956f0e81435d9b8a
|
[
"MIT-0"
] | null | null | null |
WINDOWS/Lambda/FN03_AS2_Windows_Automation_Run_Image_Assistant/lambda_function.py
|
aws-samples/appstream-serverless-image-creation-automation
|
2d58cca89b92c623c7fe22d5956f0e81435d9b8a
|
[
"MIT-0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import boto3
import json
import datetime
import winrm
import base64
import sys
from datetime import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
secretsmgr = boto3.client('secretsmanager')
def lambda_handler(event, context):
logger.info("Beginning execution of AS2_Automation_Windows_Run_Image_Assistant function.")
# Retrieve image builder IP address from event data
logger.info("Querying for Image Builder instance IP address.")
try :
host = event['BuilderStatus']['ImageBuilders'][0]['NetworkAccessConfiguration']['EniPrivateIpAddress']
logger.info("IP address found: %s.", host)
except Exception as e :
logger.error(e)
logger.info("Unable to find IP address for Image Builder instance.")
# Read image builder administrator username and password from Secrets Manager
logger.info("Retreiving instance username and password from Secrets Manager.")
secret_name = "as2/builder/pw"
secret_response = secretsmgr.get_secret_value(SecretId=secret_name)
if 'SecretString' in secret_response:
secret = json.loads(secret_response['SecretString'])
else:
secret = base64.b64decode(secret_response['SecretBinary'])
user = secret['as2_builder_admin_user']
password = secret['as2_builder_admin_pw']
logger.info("Remote access credentials obtained: %s", user)
try :
# Retrieve image name from event data
try :
image_name = event['AutomationParameters']['ImageOutputPrefix']
logger.info("Image name prefix found in event data: %s.", image_name)
except Exception as e :
image_name = 'AS2_Automation_Image'
logger.info("No image name prefix found in event data, defaulting to AS2_Automation_Image.")
# Retrieve UseLatestAgent from event data, default to True
try :
UseLatestAgent = event['AutomationParameters']['UseLatestAgent']
logger.info("UseLatestAgent found in event data, setting to %s.", UseLatestAgent)
except Exception as e2 :
UseLatestAgent = True
logger.info("UseLatestAgent not found in event data, defaulting to True")
if UseLatestAgent :
latest_agent = ' --use-latest-agent-version'
else :
latest_agent = ' --no-use-latest-agent-version'
# Retrieve image tags from event data
try :
ImageTags = event['AutomationParameters']['ImageTags']
if ImageTags :
logger.info("Image Tags found in event data: %s.", ImageTags)
else :
logger.info("No Image Tags found in event data, generated image will not be tagged.")
except Exception as e3 :
ImageTags = False
logger.info("No Image Tags found in event data, generated image will not be tagged.")
if ImageTags :
tag_image = ' --tags ' + ImageTags
else :
tag_image = ''
# Base image assistant command
prefix = 'C:/PROGRA~1/Amazon/Photon/ConsoleImageBuilder/image-assistant.exe create-image --name '
# Generate full image name using image name prefix and timestamp
now = datetime.now()
dt_string = now.strftime("-%Y-%m-%d-%H-%M-%S")
full_image_name = image_name + dt_string
# Final image assistant command
command = prefix + full_image_name + latest_agent + tag_image
# Connect to remote image builder using pywinrm library
logger.info("Connecting to host: %s", host)
session = winrm.Session(host, auth=(user, password))
logger.info("Session connection result: %s", session)
# Run image assistant command to create image
logger.info("Executing Image Assistant command: %s", command)
result = session.run_cmd(command)
logger.info("Results from image assistant command: %s", result.std_out)
if b"ERROR" in result.std_out:
logger.info("ERROR running Image Assistant!")
sys.exit(1)
else:
logger.info("Completed execution of Image Assistant command.")
except Exception as e3 :
logger.error(e3)
full_image_name = "Not Found"
logger.info("Completed AS2_Automation_Windows_Run_Image_Assistant function, returning values to Step Function.")
return {
"Images": [
{
"Name": full_image_name
}
]
}
| 42.759398
| 117
| 0.650782
|
ebd9437cb52c13e3e539083a9cbe287b89b862a1
| 4,706
|
py
|
Python
|
Mass_Plotting.py
|
HRG-Lab/HFSS_Python
|
8a80122690110846f7170df5a167cd86748e7b1c
|
[
"Apache-2.0"
] | 8
|
2019-07-25T06:49:33.000Z
|
2021-11-08T12:20:01.000Z
|
Mass_Plotting.py
|
HRG-Lab/HFSS_Python
|
8a80122690110846f7170df5a167cd86748e7b1c
|
[
"Apache-2.0"
] | null | null | null |
Mass_Plotting.py
|
HRG-Lab/HFSS_Python
|
8a80122690110846f7170df5a167cd86748e7b1c
|
[
"Apache-2.0"
] | 6
|
2018-03-10T13:09:03.000Z
|
2020-10-23T08:57:39.000Z
|
import HFSSLibrary as hfss
import pandas as pd
import matplotlib.pyplot as plt
[oAnsys, oDesktop] = hfss.openHFSS()
oProject = oDesktop.SetActiveProject("ECEN641_Homework2")
Segments = [3,8,10,20,60]
for N in Segments:
for problem_num in ["4.18","4.19"]:
# oDesign = oProject.SetActiveDesign("Problem {1} N = {0}".format(N,problem_num))
# oModule = oDesign.GetModule("ReportSetup")
# oModule.ExportToFile("Reflection", "C:/Users/joshruff/PycharmProjects/HFSS_Python/{1}_Reflection_N_{0}.csv".format(N,problem_num))
# oModule.ExportToFile("Transmission","C:/Users/joshruff/PycharmProjects/HFSS_Python/{1}_Transmission_N_{0}.csv".format(N,problem_num))
# oModule.ExportToFile("Zin_Mag","C:/Users/joshruff/PycharmProjects/HFSS_Python/{1}_Input_Impedance_mag_N_{0}.csv".format(N,problem_num))
# oModule.ExportToFile("Zin_Phase","C:/Users/joshruff/PycharmProjects/HFSS_Python/{1}_Input_Impedance_phase_N_{0}.csv".format(N,problem_num))
reflection_df = pd.read_csv("{1}_Reflection_N_{0}.csv".format(N,problem_num))
transmission_df = pd.read_csv("{1}_Transmission_N_{0}.csv".format(N,problem_num))
input_impedance_mag_df = pd.read_csv("{1}_Input_Impedance_mag_N_{0}.csv".format(N,problem_num))
input_impedance_phase_df = pd.read_csv("{1}_Input_Impedance_phase_N_{0}.csv".format(N,problem_num))
if problem_num == '4.18':
variation = ' - C_pul=\'0.4244pF\' L_pul=\'1.06nH\''
print(variation)
impedance_mag_string = 'mag(Z(Port1,Port1)) [ohm] - C_pul=\'0.4244pF\' L_pul=\'1.06nH\''
impedance_phase_string = 'cang_deg(Z(Port1,Port1)) [deg] - C_pul=\'0.4244pF\' L_pul=\'1.06nH\''
else:
print(variation)
variation = ''#'' - C_pul=\'0.21pF\' L_pul=\'0.531nH\''
impedance_mag_string = 'mag(Z(Port1,Port1)) [ohm]'
impedance_phase_string = 'cang_deg(Z(Port1,Port1)) [deg]'
print(N)
linestyle = '-'
plt.figure()
plt.title("Reflection Coefficient (S11)")
test = reflection_df['F [GHz]']
# print(reflection_df.head())
plt.plot(reflection_df['F [GHz]'],reflection_df['dB(S(Port1,Port1)) []{0}'.format(variation)],label="N={0}".format(N),linestyle = linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("S11 Magnitude [dB]")
plt.legend()
plt.savefig("{0}_N_{1}_S11_Magnitude".format(problem_num, N) + ".png", dpi=300)
plt.title("Reflection Coefficient (S11)")
test = reflection_df['F [GHz]']
# print(reflection_df.head())
plt.plot(reflection_df['F [GHz]'], reflection_df['cang_deg(S(Port1,Port1)) [deg]{0}'.format(variation)],
label="N={0}".format(N), linestyle=linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("S11 Phase [deg]")
plt.legend()
plt.savefig("{0}_N_{1}_S11_Phase".format(problem_num, N) + ".png", dpi=300)
plt.figure()
plt.title("Transmission Coefficient (S21)")
plt.plot(transmission_df['F [GHz]'], transmission_df['dB(S(Port2,Port1)) []{0}'.format(variation)], label="N={0}".format(N),
linestyle=linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("S21 [dB]")
plt.legend()
plt.savefig("{0}_N_{1}_S21_Magnitude".format(problem_num, N) + ".png", dpi=300)
plt.figure()
plt.title("Transmission Coefficient (S21)")
plt.plot(transmission_df['F [GHz]'], transmission_df['cang_deg(S(Port2,Port1)) [deg]{0}'.format(variation)],
label="N={0}".format(N),
linestyle=linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("S21 Phase [deg]")
plt.legend()
plt.savefig("{0}_N_{1}_S21_Phase".format(problem_num, N) + ".png", dpi=300)
plt.figure()
plt.title("Zin Magnitude")
plt.plot(input_impedance_mag_df['F [GHz]'],input_impedance_mag_df[impedance_mag_string],label="N={0}".format(N), linestyle = linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("mag(Zin) [kOhm]")
plt.legend()
# plt.ylim([-20, 2
plt.savefig("{0}_N_{1}_Zin_Magnitude".format(problem_num, N) + ".png", dpi=300)
plt.figure()
plt.title("Zin Phase")
plt.plot(input_impedance_mag_df['F [GHz]'], input_impedance_phase_df[impedance_phase_string],
label="N={0}".format(N), linestyle=linestyle)
plt.xlabel("Frequency [GHz]")
plt.ylabel("cang(Zin) [degrees]")
plt.legend()
# plt.ylim([-20, 2
plt.savefig("{0}_N_{1}_Zin_Phase".format(problem_num,N)+ ".png", dpi=300)
plt.show()
| 47.535354
| 149
| 0.622184
|
e5d2c20bdbc6099beeae73d823adb562a91192a6
| 3,922
|
py
|
Python
|
tests/functional/retries/test_bucket.py
|
kellertk/botocore
|
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/retries/test_bucket.py
|
kellertk/botocore
|
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/retries/test_bucket.py
|
kellertk/botocore
|
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
|
[
"Apache-2.0"
] | null | null | null |
import random
import threading
import time
from botocore.retries import bucket
from tests import unittest
class InstrumentedTokenBucket(bucket.TokenBucket):
def _acquire(self, amount, block):
rval = super(InstrumentedTokenBucket, self)._acquire(amount, block)
assert self._current_capacity >= 0
return rval
class TestTokenBucketThreading(unittest.TestCase):
def setUp(self):
self.shutdown_threads = False
self.caught_exceptions = []
self.acquisitions_by_thread = {}
def run_in_thread(self):
while not self.shutdown_threads:
capacity = random.randint(1, self.max_capacity)
self.retry_quota.acquire(capacity)
self.seen_capacities.append(self.retry_quota.available_capacity)
self.retry_quota.release(capacity)
self.seen_capacities.append(self.retry_quota.available_capacity)
def create_clock(self):
return bucket.Clock()
def test_can_change_max_rate_while_blocking(self):
# This isn't a stress test, we just want to verify we can change
# the rate at which we acquire a token.
min_rate = 0.1
max_rate = 1
token_bucket = bucket.TokenBucket(
min_rate=min_rate, max_rate=max_rate,
clock=self.create_clock(),
)
# First we'll set the max_rate to 0.1 (min_rate). This means that
# it will take 10 seconds to accumulate a single token. We'll start
# a thread and have it acquire() a token.
# Then in the main thread we'll change the max_rate to something
# really quick (e.g 100). We should immediately get a token back.
# This is going to be timing sensitive, but we can verify that
# as long as it doesn't take 10 seconds to get a token, we were
# able to update the rate as needed.
thread = threading.Thread(target=token_bucket.acquire)
token_bucket.max_rate = min_rate
start_time = time.time()
thread.start()
# This shouldn't block the main thread.
token_bucket.max_rate = 100
thread.join()
end_time = time.time()
self.assertLessEqual(end_time - start_time, 1.0 / min_rate)
def acquire_in_loop(self, token_bucket):
while not self.shutdown_threads:
try:
self.assertTrue(token_bucket.acquire())
thread_name = threading.current_thread().name
self.acquisitions_by_thread[thread_name] += 1
except Exception as e:
self.caught_exceptions.append(e)
def randomly_set_max_rate(self, token_bucket, min_val, max_val):
while not self.shutdown_threads:
new_rate = random.randint(min_val, max_val)
token_bucket.max_rate = new_rate
time.sleep(0.01)
def test_stress_test_token_bucket(self):
token_bucket = InstrumentedTokenBucket(
max_rate=10,
clock=self.create_clock(),
)
all_threads = []
for _ in range(2):
all_threads.append(
threading.Thread(target=self.randomly_set_max_rate,
args=(token_bucket, 30, 200))
)
for _ in range(10):
t = threading.Thread(target=self.acquire_in_loop,
args=(token_bucket,))
self.acquisitions_by_thread[t.name] = 0
all_threads.append(t)
for thread in all_threads:
thread.start()
try:
# If you're working on this code you can bump this number way
# up to stress test it more locally.
time.sleep(3)
finally:
self.shutdown_threads = True
for thread in all_threads:
thread.join()
# Verify all threads complete sucessfully
self.assertEqual(self.caught_exceptions, [])
| 38.07767
| 76
| 0.623151
|
1353d9b426cd0fd62adf48f83f08b1cda5fcd4b7
| 460
|
py
|
Python
|
2019/05/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2019/05/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2019/05/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
import sys
from pathlib import Path
from queue import Queue
sys.path.insert(0, str(Path(__file__).parent.parent))
from intcode import Intcode
sys.path = sys.path[1:]
puzzle_input_path = Path(__file__).parent / "input.txt"
with open(puzzle_input_path) as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
program = [int(x) for x in puzzle_input_raw.split(",")]
intcode = Intcode(program, inputs=[5])
output = intcode.run()
print(output[-1])
| 27.058824
| 55
| 0.756522
|
6fc6d7947e5d12b05bdf5d6f3aeb5ca8ac806fb2
| 14,070
|
py
|
Python
|
keylime/ima_file_signatures.py
|
THS-on/keylime
|
bb904fc98d9674832e630542d211e71102873b4d
|
[
"Apache-2.0"
] | null | null | null |
keylime/ima_file_signatures.py
|
THS-on/keylime
|
bb904fc98d9674832e630542d211e71102873b4d
|
[
"Apache-2.0"
] | 1
|
2021-09-13T10:54:29.000Z
|
2021-09-13T10:54:29.000Z
|
keylime/ima_file_signatures.py
|
THS-on/keylime
|
bb904fc98d9674832e630542d211e71102873b4d
|
[
"Apache-2.0"
] | 1
|
2020-07-16T00:07:20.000Z
|
2020-07-16T00:07:20.000Z
|
#!/usr/bin/env python3
'''
SPDX-License-Identifier: Apache-2.0
'''
import base64
import enum
import json
import struct
from cryptography import x509
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
from cryptography.exceptions import InvalidSignature
from keylime import keylime_logging
logger = keylime_logging.init_logging('ima_file_signatures')
"""
Tools for IMA file signature verification
"""
class HashAlgo(enum.IntEnum):
""" The hash_algo's as Linux defines them:
https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/hash_info.h#L17
"""
HASH_ALGO_MD4 = 0
HASH_ALGO_MD5 = 1
HASH_ALGO_SHA1 = 2
HASH_ALGO_RIPE_MD_160 = 3
HASH_ALGO_SHA256 = 4
HASH_ALGO_SHA384 = 5
HASH_ALGO_SHA512 = 6
HASH_ALGO_SHA224 = 7
HASH_ALGO_RIPE_MD_128 = 8
HASH_ALGO_RIPE_MD_256 = 9
HASH_ALGO_RIPE_MD_320 = 10
HASH_ALGO_WP_256 = 11
HASH_ALGO_WP_384 = 12
HASH_ALGO_WP_512 = 13
HASH_ALGO_TGR_128 = 14
HASH_ALGO_TGR_160 = 15
HASH_ALGO_TGR_192 = 16
HASH_ALGO_TGR_256 = 17
HASH_ALGO_STREEBOG_256 = 18
HASH_ALGO_STREEBOG_512 = 19
# Streebog is supported by evmctl
@utils.register_interface(hashes.HashAlgorithm)
class MyStreebog256():
""" Basic class for Streebog256 """
name = "streebog256"
digest_size = 32
block_size = 64
@utils.register_interface(hashes.HashAlgorithm)
class MyStreebog512():
""" Basic class for Streebog512 """
name = "streebog512"
digest_size = 64
block_size = 64
HASH_FUNCS = {
# The list of hash functions we need for signature verification.
HashAlgo.HASH_ALGO_MD5: hashes.__dict__.get('MD5'),
HashAlgo.HASH_ALGO_SHA1: hashes.__dict__.get('SHA1'),
HashAlgo.HASH_ALGO_SHA256: hashes.__dict__.get('SHA256'),
HashAlgo.HASH_ALGO_SHA384: hashes.__dict__.get('SHA384'),
HashAlgo.HASH_ALGO_SHA512: hashes.__dict__.get('SHA512'),
HashAlgo.HASH_ALGO_SHA224: hashes.__dict__.get('SHA224'),
HashAlgo.HASH_ALGO_STREEBOG_256: MyStreebog256,
HashAlgo.HASH_ALGO_STREEBOG_512: MyStreebog512,
}
class EvmImaXattrType(enum.IntEnum):
""" https://elixir.bootlin.com/linux/v5.9.8/source/security/integrity/integrity.h#L74
"""
IMA_XATTR_DIGEST = 1
EVM_XATTR_HMAC = 2
EVM_IMA_XATTR_DIGSIG = 3
IMA_XATTR_DIGEST_NG = 4
EVM_XATTR_PORTABLE_DIGSIG = 5
class PubkeyAlgo(enum.IntEnum):
""" https://elixir.bootlin.com/linux/v5.9.8/source/include/linux/digsig.h#L17 """
PUBKEY_ALGO_RSA = 0
class ImaKeyring:
""" ImaKeyring models an IMA keyring where keys are indexed by their keyid """
def __init__(self):
""" Constructor """
self.ringv2 = {}
@staticmethod
def _get_keyidv2(pubkey):
""" Calculate the keyidv2 of a given public key object. The keyidv2
are the lowest 4 bytes of the sha1 hash over the public key bytes
of a DER-encoded key in PKCS1 format.
"""
if isinstance(pubkey, RSAPublicKey):
fmt = serialization.PublicFormat.PKCS1
pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER,
format=fmt)
elif isinstance(pubkey, EllipticCurvePublicKey):
fmt = serialization.PublicFormat.UncompressedPoint
pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962,
format=fmt)
else:
raise UnsupportedAlgorithm("Unsupported public key type %s" %
type(pubkey))
default_be = backends.default_backend()
digest = hashes.Hash(hashes.SHA1(), backend=default_be)
digest.update(pubbytes)
keydigest = digest.finalize()
return int.from_bytes(keydigest[16:], 'big')
def add_pubkey(self, pubkey, keyidv2):
""" Add a public key object to the keyring; a keyidv2 may be passed in
and if it is 'None' it will be determined using the commonly used
sha1 hash function for calculating the Subject Key Identifier.
"""
if not keyidv2:
keyidv2 = ImaKeyring._get_keyidv2(pubkey)
# it's unlikely that two different public keys have the same 32 bit keyidv2
self.ringv2[keyidv2] = pubkey
logger.debug("Added key with keyid: 0x%08x" % keyidv2)
def get_pubkey_by_keyidv2(self, keyidv2):
""" Get a public key object given its keyidv2 """
return self.ringv2.get(keyidv2)
def to_json(self):
""" Convert the ImaKeyring into a JSON object """
fmt = serialization.PublicFormat.SubjectPublicKeyInfo
obj = {}
lst = []
for pubkey in self.ringv2.values():
try:
pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER,
format=fmt)
except Exception as ex:
logger.error("Could not serialize key: %s" % str(ex))
lst.append(pubbytes)
obj['pubkeys'] = [base64.b64encode(pubkey).decode('ascii') for pubkey in lst]
obj['keyids'] = list(self.ringv2.keys())
return obj
def to_string(self):
""" Generate a string representation """
return json.dumps(self.to_json())
@staticmethod
def _base64_to_der_keylist(base64_keylist, keyidv2_list):
""" Convert a base64-encoded list of public keys to a list of DER-encoded
public keys; a keyidv2_list may also be given that contains
the keyidv2 of each key
"""
res = []
for idx, entry in enumerate(base64_keylist):
keyidv2 = keyidv2_list[idx] if idx < len(keyidv2_list) else None
res.append((base64.b64decode(entry), keyidv2))
return res
@staticmethod
def from_string(stringrepr):
""" Convert a string-encoded ImaKeyring to an ImaKeyring object """
if not stringrepr:
return None
ima_keyring = ImaKeyring()
default_be = backends.default_backend()
# An empty Db entry comes as a string '[]'. A valid DB entry as a string
# ith escaped quotes and needs to be loaded twice
obj = json.loads(stringrepr)
if isinstance(obj, str):
obj = json.loads(obj)
if not isinstance(obj, dict):
return None
keyids = obj.get('keyids', [])
for (der_key, keyidv2) in ImaKeyring._base64_to_der_keylist(obj['pubkeys'], keyids):
try:
pubkey = serialization.load_der_public_key(der_key, backend=default_be)
ima_keyring.add_pubkey(pubkey, keyidv2)
except Exception as ex:
logger.error("Could not load a base64-decoded DER key: %s" % str(ex))
return ima_keyring
@staticmethod
def _verify(pubkey, sig, filehash, hashfunc):
""" Do signature verification with the given public key """
if isinstance(pubkey, RSAPublicKey):
pubkey.verify(sig, filehash,
padding.PKCS1v15(), Prehashed(hashfunc))
elif isinstance(pubkey, EllipticCurvePublicKey):
pubkey.verify(sig, filehash,
ec.ECDSA(Prehashed(hashfunc)))
def _asymmetric_verify(self, signature, filehash, filehash_type):
""" Do an IMA signature verification given the signature data from
the log, which is formatted as 'struct signature_v2_hdr'.
This function resembles the kernel code:
https://elixir.bootlin.com/linux/v5.9/source/security/integrity/digsig_asymmetric.c#L76
https://elixir.bootlin.com/linux/v5.9/source/security/integrity/integrity.h#L116
"""
siglen = len(signature)
# The data are in big endian
fmt = '>BBBIH'
hdrlen = struct.calcsize(fmt)
if len(signature) < hdrlen:
logger.warning("Signature header is too short")
return False
_, _, hash_algo, keyidv2, sig_size = struct.unpack(fmt, signature[:hdrlen])
siglen -= hdrlen
if siglen != sig_size:
logger.warning("Malformed signature")
return False
hashfunc = HASH_FUNCS.get(hash_algo)
if not hashfunc:
logger.warning("Unsupported hash algo with id '%d'" % hash_algo)
return False
if filehash_type != hashfunc().name:
logger.warning("Mismatching filehash type %s and ima signature hash used %s" %
(filehash_type, hashfunc().name))
return False
pubkey = self.get_pubkey_by_keyidv2(keyidv2)
if not pubkey:
logger.warning("No key with id 0x%08x available" % keyidv2)
return False
try:
ImaKeyring._verify(pubkey, signature[hdrlen:], filehash, hashfunc())
except InvalidSignature:
return False
return True
def integrity_digsig_verify(self, signature, filehash, filehash_type):
""" Given a system-specific keyring validate the signature against the
given hash. This function resembles the kernel code at:
https://elixir.bootlin.com/linux/v5.9/source/security/integrity/digsig.c#L59
"""
fmt = '>BB'
if len(signature) < struct.calcsize(fmt):
logger.warning("Malformed signature: not enough bytes")
return False
typ, version = struct.unpack(fmt, signature[:struct.calcsize(fmt)])
if typ not in [EvmImaXattrType.EVM_IMA_XATTR_DIGSIG,
EvmImaXattrType.EVM_XATTR_PORTABLE_DIGSIG]:
logger.warning("Malformed signature: wrong type")
return False
if version == 2:
return self._asymmetric_verify(signature, filehash, filehash_type)
logger.warning("Malformed signature: wrong version (%d)" % version)
return False
def _get_pubkey_from_der_public_key(filedata, backend):
""" Load the filedata as a DER public key """
try:
return serialization.load_der_public_key(filedata, backend=backend), None
except Exception:
return None, None
def _get_pubkey_from_pem_public_key(filedata, backend):
""" Load the filedata as a PEM public key """
try:
return serialization.load_pem_public_key(filedata, backend=backend), None
except Exception:
return None, None
def _get_pubkey_from_der_private_key(filedata, backend):
""" Load the filedata as a DER private key """
try:
privkey = serialization.load_der_private_key(filedata, None,
backend=backend)
return privkey.public_key(), None
except Exception:
return None, None
def _get_pubkey_from_pem_private_key(filedata, backend):
""" Load the filedata as a PEM private key """
try:
privkey = serialization.load_pem_private_key(filedata, None,
backend=backend)
return privkey.public_key(), None
except Exception:
return None, None
def _get_keyidv2_from_cert(cert):
""" Get the keyidv2 from the cert's Subject Key Identifier """
if not cert.extensions:
return None
skid = cert.extensions.get_extension_for_oid(x509.oid.ExtensionOID.SUBJECT_KEY_IDENTIFIER)
if skid and skid.value and len(skid.value.digest) >= 4:
keyidv2 = int.from_bytes(skid.value.digest[-4:], 'big')
logger.debug("Extracted keyidv2 from cert: 0x%08x", keyidv2)
return keyidv2
return None
def _get_pubkey_from_der_x509_certificate(filedata, backend):
""" Load the filedata as a DER x509 certificate """
try:
cert = x509.load_der_x509_certificate(filedata, backend=backend)
return cert.public_key(), _get_keyidv2_from_cert(cert)
except Exception:
return None, None
def _get_pubkey_from_pem_x509_certificate(filedata, backend):
""" Load the filedata as a PEM x509 certificate """
try:
cert = x509.load_pem_x509_certificate(filedata, backend=backend)
return cert.public_key(), _get_keyidv2_from_cert(cert)
except Exception:
return None, None
def get_pubkey(filedata):
""" Get the public key from the filedata; if an x509 certificate is
given, also determine the keyidv2 from the Subject Key Identifier,
otherwise return None
To make it easy for the user, we try to parse the filedata as
PEM- or DER-encoded public key, x509 certificate, or even private key.
This function then returns the public key object or None if the file
contents could not be interpreted as a key.
"""
default_be = backends.default_backend()
for func in [_get_pubkey_from_der_x509_certificate,
_get_pubkey_from_pem_x509_certificate,
_get_pubkey_from_der_public_key,
_get_pubkey_from_pem_public_key,
_get_pubkey_from_der_private_key,
_get_pubkey_from_pem_private_key]:
pubkey, keyidv2 = func(filedata, default_be)
if pubkey:
return pubkey, keyidv2
return None, None
def get_pubkey_from_file(filename):
""" Get the public key object from a file """
try:
with open(filename, "rb") as fobj:
filedata = fobj.read()
pubkey, keyidv2 = get_pubkey(filedata)
if pubkey:
return pubkey, keyidv2
except Exception:
pass
return None, None
| 35.71066
| 99
| 0.65366
|
b80b4cc0df674c2c1adbc9b8de73c1f0a088d9ae
| 17,457
|
py
|
Python
|
sscanss/core/instrument/simulation.py
|
StephenNneji/SScanSS-2
|
b0a3697d803439840a8f29c4486428f1b7a010f3
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/core/instrument/simulation.py
|
StephenNneji/SScanSS-2
|
b0a3697d803439840a8f29c4486428f1b7a010f3
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/core/instrument/simulation.py
|
StephenNneji/SScanSS-2
|
b0a3697d803439840a8f29c4486428f1b7a010f3
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import time
import numpy as np
from multiprocessing import Event, Process, Queue, sharedctypes
from PyQt5 import QtCore
from .collision import CollisionManager
from .robotics import IKSolver
from ..geometry.intersection import path_length_calculation
from ..math import VECTOR_EPS
from ..scene.node import create_instrument_node
from ..util.misc import Attributes
from ...config import settings, setup_logging
def update_colliders(manager, sample_pose, sample_ids, positioner_nodes, positioner_ids):
"""Updates the sample and positioner colliders
:param manager: collision manager
:type manager: CollisionManager
:param sample_pose: sample transformation matrix
:type sample_pose: Matrix44
:param sample_ids: list of sample collider ids
:type sample_ids: List[int]
:param positioner_nodes: list of positioner nodes
:type positioner_nodes: List[Node]
:param positioner_ids: list of positioner ids
:type positioner_ids: List[int]
"""
for i in sample_ids:
manager.colliders[i].geometry.transform(sample_pose)
for i, node in zip(positioner_ids, positioner_nodes):
manager.colliders[i].geometry.transform(node.transform)
manager.createAABBSets()
def populate_collision_manager(manager, sample, sample_pose, instrument_node):
"""Adds sample and instrument scene colliders to the collision manager and builds
scene bounding boxes
:param manager: collision manager
:type manager: CollisionManager
:param sample: list of sample mesh
:type sample: List[Mesh]
:param sample_pose: sample transformation matrix
:type sample_pose: matrix44
:param instrument_node: instrument node and ids
:type instrument_node: Tuple[List[Node], Dict[str, int]]
:return: sample and positioner collider ids
:rtype: Tuple[List[int], List[int]]
"""
manager.clear()
transform = [sample_pose for _ in range(len(sample))]
manager.addColliders(sample, transform, manager.Exclude.All, True)
sample_ids = list(range(len(sample)))
positioner_ids = []
node, indices = instrument_node
start_index = 0
for name, end_index in indices.items():
attribute_node = node.children[start_index:end_index]
transform = [n.transform for n in attribute_node]
if name == Attributes.Positioner.value:
start_id = manager.colliders[-1].id + 1
manager.addColliders(attribute_node, transform, exclude=manager.Exclude.Consecutive, movable=True)
last_link_collider = manager.colliders[-1]
for index, obj in enumerate(manager.colliders[0:len(sample)]):
obj.excludes[last_link_collider.id] = True
last_link_collider.excludes[index] = True
positioner_ids.extend(range(start_id, last_link_collider.id + 1))
else:
exclude = manager.Exclude.Nothing if name == Attributes.Fixture.value else manager.Exclude.Consecutive
manager.addColliders(attribute_node, transform, exclude=exclude, movable=False)
start_index = end_index
manager.createAABBSets()
return sample_ids, positioner_ids
class SimulationResult:
"""Data class for the simulation result
:param result_id: result identifier
:type result_id: str
:param ik: inverse kinematics result
:type ik: Union[IKResult, None]
:param q_formatted: formatted positioner offsets
:type q_formatted: Tuple
:param alignment: alignment index
:type alignment: int
:param path_length: path length result
:type path_length: Union[Tuple[float], None]
:param collision_mask: mask showing which objects collided
:type collision_mask: Union[List[bool], None]
:param skipped: indicates if the result is skipped
:type skipped: bool
:param note: note about result such as reason for skipping
:type note: str
"""
def __init__(self, result_id, ik=None, q_formatted=(None, None),
alignment=0, path_length=None, collision_mask=None, skipped=False, note=''):
self.id = result_id
self.ik = ik
self.alignment = alignment
self.joint_labels, self.formatted = q_formatted
self.path_length = path_length
self.collision_mask = collision_mask
self.skipped = skipped
self.note = note
class Simulation(QtCore.QObject):
"""Simulates the experiment by computing inverse kinematics of positioning system to place measurement
points in the gauge volume with the appropriate orientation. The simulation is performed on a different
process to avoid freezing the main thread and a signal is sent when new results are available.
:param instrument: instrument object
:type instrument: Instrument
:param sample: sample meshes
:type sample: Dict[Mesh]
:param points: measurement points
:type points: numpy.recarray
:param vectors: measurement vectors
:type vectors: numpy.ndarray
:param alignment: alignment matrix
:type alignment: Matrix44
"""
result_updated = QtCore.pyqtSignal(bool)
stopped = QtCore.pyqtSignal()
def __init__(self, instrument, sample, points, vectors, alignment):
super().__init__()
self.timer = QtCore.QTimer()
self.timer.setInterval(20)
self.timer.timeout.connect(self.checkResult)
self.args = {'ikine_kwargs': {'local_max_eval': settings.value(settings.Key.Local_Max_Eval),
'global_max_eval': settings.value(settings.Key.Global_Max_Eval),
'tol': (settings.value(settings.Key.Position_Stop_Val),
settings.value(settings.Key.Angular_Stop_Val)),
'bounded': True},
'skip_zero_vectors': settings.value(settings.Key.Skip_Zero_Vectors),
'align_first_order': settings.value(settings.Key.Align_First)}
self.results = []
self.process = None
self.compute_path_length = False
self.render_graphics = False
self.check_limits = True
self.check_collision = False
self.has_valid_result = False
self.args['positioner'] = instrument.positioning_stack
self.args['points'] = points.points
self.args['vectors'] = vectors
self.args['enabled'] = points.enabled
self.shape = (vectors.shape[0], vectors.shape[1] // 3, vectors.shape[2])
self.count = self.shape[0] * self.shape[2]
self.args['results'] = Queue(self.count + 1)
self.args['exit_event'] = Event()
matrix = alignment.transpose()
self.args['points'] = self.args['points'] @ matrix[0:3, 0:3] + matrix[3, 0:3]
for k in range(self.args['vectors'].shape[2]):
for j in range(0, self.args['vectors'].shape[1], 3):
self.args['vectors'][:, j:j+3, k] = self.args['vectors'][:, j:j+3, k] @ matrix[0:3, 0:3]
self.args['sample'] = []
for key, mesh in sample.items():
self.args['sample'].append(mesh.transformed(alignment))
self.args['beam_axis'] = np.array(instrument.jaws.beam_direction)
self.args['gauge_volume'] = np.array(instrument.gauge_volume)
self.args['q_vectors'] = np.array(instrument.q_vectors)
self.args['diff_axis'] = np.array([d.diffracted_beam for d in instrument.detectors.values()])
self.args['beam_in_gauge'] = instrument.beam_in_gauge_volume
self.detector_names = list(instrument.detectors.keys())
self.params = self.extractInstrumentParameters(instrument)
self.args['instrument_scene'] = create_instrument_node(instrument, True)
def extractInstrumentParameters(self, instrument):
"""Extract detector and jaws state
:param instrument: instrument object
:type instrument: Instrument
:return: dict containing indicates if the instrument state has not changed
:rtype: Dict
"""
params = {}
for key, detector in instrument.detectors.items():
if detector.positioner is not None:
params[f'{Attributes.Detector.value}_{key}'] = detector.positioner.configuration
params[f'{Attributes.Detector.value}_{key}_collimator'] = ''
if detector.current_collimator is not None:
params[f'{Attributes.Detector.value}_{key}_collimator'] = detector.current_collimator.name
if instrument.jaws.positioner is not None:
params[Attributes.Jaws.value] = instrument.jaws.positioner.configuration
return params
def validateInstrumentParameters(self, instrument):
"""Validates if the instrument state have been changed since the simulation was last run
:param instrument: instrument object
:type instrument: Instrument
:return: indicates if the instrument state has not changed
:rtype: bool
"""
params = self.extractInstrumentParameters(instrument)
for key, value in self.params.items():
if isinstance(value, str):
if value != params.get(key):
return False
else:
if not np.allclose(value, params.get(key, []), 0, 0.001):
return False
return True
@property
def positioner(self):
return self.args['positioner']
@property
def scene_size(self):
return len(self.args['instrument_scene'][0].children) + len(self.args['sample'])
@property
def compute_path_length(self):
return self.args['compute_path_length']
@compute_path_length.setter
def compute_path_length(self, value):
self.args['compute_path_length'] = value
if value:
self.args['path_lengths'] = sharedctypes.RawArray('f', [0.] * np.prod(self.shape))
@property
def check_collision(self):
return self.args['check_collision']
@check_collision.setter
def check_collision(self, value):
self.args['check_collision'] = value
@property
def render_graphics(self):
return self.args['render_graphics']
@render_graphics.setter
def render_graphics(self, value):
self.args['render_graphics'] = value
@property
def check_limits(self):
return self.args['ikine_kwargs']['bounded']
@check_limits.setter
def check_limits(self, value):
self.args['ikine_kwargs']['bounded'] = value
def start(self):
"""starts the simulation"""
self.process = Process(target=Simulation.execute, args=(self.args,))
self.process.daemon = True
self.process.start()
self.timer.start()
def checkResult(self):
"""checks and notifies if result are available"""
queue = self.args['results']
if self.args['results'].empty():
return
if not self.process.is_alive():
self.timer.stop()
queue.put(None)
error = False
for result in iter(queue.get, None):
if isinstance(result, SimulationResult):
self.results.append(result)
if not result.skipped and result.ik.status != IKSolver.Status.Failed:
self.has_valid_result = True
else:
error = True
self.result_updated.emit(error)
@staticmethod
def execute(args):
"""Computes inverse kinematics, path length, and collisions for each measurement in the
simulation.
:param args: argument required for the simulation
:type args: Dict
"""
setup_logging('simulation.log')
logger = logging.getLogger(__name__)
logger.info('Initializing new simulation...')
q_vec = args['q_vectors']
beam_axis = args['beam_axis']
gauge_volume = args['gauge_volume']
diff_axis = args['diff_axis']
beam_in_gauge = args['beam_in_gauge']
results = args['results']
exit_event = args['exit_event']
ikine_kwargs = args['ikine_kwargs']
positioner = args['positioner']
joint_labels = [positioner.links[order].name for order in positioner.order]
vectors = args['vectors']
shape = (vectors.shape[0], vectors.shape[1]//3, vectors.shape[2])
points = args['points']
enabled = args['enabled']
sample = args['sample']
compute_path_length = args['compute_path_length']
render_graphics = args['render_graphics']
check_collision = args['check_collision']
if compute_path_length and beam_in_gauge:
path_lengths = np.frombuffer(args['path_lengths'], dtype=np.float32, count=np.prod(shape)).reshape(shape)
if check_collision:
instrument_scene = args['instrument_scene']
manager = CollisionManager(len(args['instrument_scene'][0].children) + len(args['sample']))
sample_ids, positioner_ids = populate_collision_manager(manager, sample, np.identity(4), instrument_scene)
skip_zero_vectors = args['skip_zero_vectors']
if args['align_first_order']:
order = [(i, j) for i in range(shape[0]) for j in range(shape[2])]
else:
order = [(i, j) for j in range(shape[2]) for i in range(shape[0])]
logger.info(f'Simulation ({shape[0]} points, {shape[2]} alignments) initialized with '
f'render graphics: {render_graphics}, check_collision: {check_collision}, compute_path_length: '
f'{compute_path_length}, check_limits: {args["ikine_kwargs"]["bounded"]}')
try:
for index, ij in enumerate(order):
i, j = ij
label = f'# {index + 1} - Point {i + 1}, Alignment {j + 1}' if shape[2] > 1 else f'Point {i + 1}'
if not enabled[i]:
results.put(SimulationResult(label, skipped=True, note='The measurement point is disabled'))
logger.info(f'Skipped Point {i}, Alignment {j} (Point Disabled)')
continue
all_mvs = vectors[i, :, j].reshape(-1, 3)
selected = np.where(np.linalg.norm(all_mvs, axis=1) > VECTOR_EPS)[0]
if selected.size == 0:
if skip_zero_vectors:
results.put(SimulationResult(label, skipped=True, note='The measurement vector is unset'))
logger.info(f'Skipped Point {i}, Alignment {j} (Vector Unset)')
continue
q_vectors = np.atleast_2d(q_vec[0])
measurement_vectors = np.atleast_2d(positioner.pose[0:3, 0:3].transpose() @ q_vec[0])
else:
q_vectors = np.atleast_2d(q_vec[selected])
measurement_vectors = np.atleast_2d(all_mvs[selected])
logger.info(f'Started Point {i}, Alignment {j}')
r = positioner.ikine((points[i, :], measurement_vectors), (gauge_volume, q_vectors), **ikine_kwargs)
if exit_event.is_set():
break
result = SimulationResult(label, r, (joint_labels, positioner.toUserFormat(r.q)), j)
if r.status != IKSolver.Status.Failed:
pose = positioner.fkine(r.q) @ positioner.tool_link
if compute_path_length and beam_in_gauge:
transformed_sample = sample[0].transformed(pose)
result.path_length = path_length_calculation(transformed_sample, gauge_volume,
beam_axis, diff_axis)
path_lengths[i, :, j] = result.path_length
if exit_event.is_set():
break
if check_collision:
update_colliders(manager, pose, sample_ids, positioner.model().flatten().children,
positioner_ids)
result.collision_mask = manager.collide()
if exit_event.is_set():
break
results.put(result)
if render_graphics:
# Sleep to allow graphics render
time.sleep(0.2)
logger.info(f'Finished Point {i}, Alignment {j}')
if exit_event.is_set():
break
logger.info('Simulation Finished')
except Exception:
results.put('Error')
logging.exception('An error occurred while running the simulation.')
logging.shutdown()
@property
def path_lengths(self):
if self.compute_path_length:
return np.frombuffer(self.args['path_lengths'], dtype=np.float32,
count=np.prod(self.shape)).reshape(self.shape)
return None
def isRunning(self):
"""Indicates if the simulation is running.
:return: flag indicating the simulation is running
:rtype: bool
"""
if self.process is None:
return False
return self.process.is_alive() and not self.args['exit_event'].is_set()
def abort(self):
"""Aborts the simulation, but not guaranteed to be instantaneous."""
self.args['exit_event'].set()
self.timer.stop()
self.stopped.emit()
| 40.038991
| 118
| 0.626224
|
db26abfcf5dce7e8bb826f7d2c63f00c8ac050c8
| 36,593
|
py
|
Python
|
impacket/ese.py
|
Remediant/impacket
|
1429750288e026f196b656e0f4227d69894c5775
|
[
"Apache-1.1"
] | null | null | null |
impacket/ese.py
|
Remediant/impacket
|
1429750288e026f196b656e0f4227d69894c5775
|
[
"Apache-1.1"
] | null | null | null |
impacket/ese.py
|
Remediant/impacket
|
1429750288e026f196b656e0f4227d69894c5775
|
[
"Apache-1.1"
] | null | null | null |
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# Microsoft Extensive Storage Engine parser, just focused on trying
# to parse NTDS.dit files (not meant as a full parser, although it might work)
#
# Author:
# Alberto Solino (@agsolino)
#
# Reference for:
# Structure.
#
# Excellent reference done by Joachim Metz
# http://forensic-proof.com/wp-content/uploads/2011/07/Extensible-Storage-Engine-ESE-Database-File-EDB-format.pdf
#
# ToDo:
# [ ] Parse multi-values properly
# [ ] Support long values properly
from impacket import LOG
try:
from collections import OrderedDict
except:
try:
from ordereddict.ordereddict import OrderedDict
except:
from ordereddict import OrderedDict
from impacket.structure import Structure
from struct import unpack
from binascii import hexlify
# Constants
FILE_TYPE_DATABASE = 0
FILE_TYPE_STREAMING_FILE = 1
# Database state
JET_dbstateJustCreated = 1
JET_dbstateDirtyShutdown = 2
JET_dbstateCleanShutdown = 3
JET_dbstateBeingConverted = 4
JET_dbstateForceDetach = 5
# Page Flags
FLAGS_ROOT = 1
FLAGS_LEAF = 2
FLAGS_PARENT = 4
FLAGS_EMPTY = 8
FLAGS_SPACE_TREE = 0x20
FLAGS_INDEX = 0x40
FLAGS_LONG_VALUE = 0x80
FLAGS_NEW_FORMAT = 0x2000
FLAGS_NEW_CHECKSUM = 0x2000
# Tag Flags
TAG_UNKNOWN = 0x1
TAG_DEFUNCT = 0x2
TAG_COMMON = 0x4
# Fixed Page Numbers
DATABASE_PAGE_NUMBER = 1
CATALOG_PAGE_NUMBER = 4
CATALOG_BACKUP_PAGE_NUMBER = 24
# Fixed FatherDataPages
DATABASE_FDP = 1
CATALOG_FDP = 2
CATALOG_BACKUP_FDP = 3
# Catalog Types
CATALOG_TYPE_TABLE = 1
CATALOG_TYPE_COLUMN = 2
CATALOG_TYPE_INDEX = 3
CATALOG_TYPE_LONG_VALUE = 4
CATALOG_TYPE_CALLBACK = 5
# Column Types
JET_coltypNil = 0
JET_coltypBit = 1
JET_coltypUnsignedByte = 2
JET_coltypShort = 3
JET_coltypLong = 4
JET_coltypCurrency = 5
JET_coltypIEEESingle = 6
JET_coltypIEEEDouble = 7
JET_coltypDateTime = 8
JET_coltypBinary = 9
JET_coltypText = 10
JET_coltypLongBinary = 11
JET_coltypLongText = 12
JET_coltypSLV = 13
JET_coltypUnsignedLong = 14
JET_coltypLongLong = 15
JET_coltypGUID = 16
JET_coltypUnsignedShort= 17
JET_coltypMax = 18
ColumnTypeToName = {
JET_coltypNil : 'NULL',
JET_coltypBit : 'Boolean',
JET_coltypUnsignedByte : 'Signed byte',
JET_coltypShort : 'Signed short',
JET_coltypLong : 'Signed long',
JET_coltypCurrency : 'Currency',
JET_coltypIEEESingle : 'Single precision FP',
JET_coltypIEEEDouble : 'Double precision FP',
JET_coltypDateTime : 'DateTime',
JET_coltypBinary : 'Binary',
JET_coltypText : 'Text',
JET_coltypLongBinary : 'Long Binary',
JET_coltypLongText : 'Long Text',
JET_coltypSLV : 'Obsolete',
JET_coltypUnsignedLong : 'Unsigned long',
JET_coltypLongLong : 'Long long',
JET_coltypGUID : 'GUID',
JET_coltypUnsignedShort: 'Unsigned short',
JET_coltypMax : 'Max',
}
ColumnTypeSize = {
JET_coltypNil : None,
JET_coltypBit : (1,'B'),
JET_coltypUnsignedByte : (1,'B'),
JET_coltypShort : (2,'<h'),
JET_coltypLong : (4,'<l'),
JET_coltypCurrency : (8,'<Q'),
JET_coltypIEEESingle : (4,'<f'),
JET_coltypIEEEDouble : (8,'<d'),
JET_coltypDateTime : (8,'<Q'),
JET_coltypBinary : None,
JET_coltypText : None,
JET_coltypLongBinary : None,
JET_coltypLongText : None,
JET_coltypSLV : None,
JET_coltypUnsignedLong : (4,'<L'),
JET_coltypLongLong : (8,'<Q'),
JET_coltypGUID : (16,'16s'),
JET_coltypUnsignedShort: (2,'<H'),
JET_coltypMax : None,
}
# Tagged Data Type Flags
TAGGED_DATA_TYPE_VARIABLE_SIZE = 1
TAGGED_DATA_TYPE_COMPRESSED = 2
TAGGED_DATA_TYPE_STORED = 4
TAGGED_DATA_TYPE_MULTI_VALUE = 8
TAGGED_DATA_TYPE_WHO_KNOWS = 10
# Code pages
CODEPAGE_UNICODE = 1200
CODEPAGE_ASCII = 20127
CODEPAGE_WESTERN = 1252
StringCodePages = {
CODEPAGE_UNICODE : 'utf-16le',
CODEPAGE_ASCII : 'ascii',
CODEPAGE_WESTERN : 'cp1252',
}
# Structures
TABLE_CURSOR = {
'TableData' : '',
'FatherDataPageNumber': 0,
'CurrentPageData' : '',
'CurrentTag' : 0,
}
class ESENT_JET_SIGNATURE(Structure):
structure = (
('Random','<L=0'),
('CreationTime','<Q=0'),
('NetBiosName','16s=""'),
)
class ESENT_DB_HEADER(Structure):
structure = (
('CheckSum','<L=0'),
('Signature','"\xef\xcd\xab\x89'),
('Version','<L=0'),
('FileType','<L=0'),
('DBTime','<Q=0'),
('DBSignature',':',ESENT_JET_SIGNATURE),
('DBState','<L=0'),
('ConsistentPosition','<Q=0'),
('ConsistentTime','<Q=0'),
('AttachTime','<Q=0'),
('AttachPosition','<Q=0'),
('DetachTime','<Q=0'),
('DetachPosition','<Q=0'),
('LogSignature',':',ESENT_JET_SIGNATURE),
('Unknown','<L=0'),
('PreviousBackup','24s=""'),
('PreviousIncBackup','24s=""'),
('CurrentFullBackup','24s=""'),
('ShadowingDisables','<L=0'),
('LastObjectID','<L=0'),
('WindowsMajorVersion','<L=0'),
('WindowsMinorVersion','<L=0'),
('WindowsBuildNumber','<L=0'),
('WindowsServicePackNumber','<L=0'),
('FileFormatRevision','<L=0'),
('PageSize','<L=0'),
('RepairCount','<L=0'),
('RepairTime','<Q=0'),
('Unknown2','28s=""'),
('ScrubTime','<Q=0'),
('RequiredLog','<Q=0'),
('UpgradeExchangeFormat','<L=0'),
('UpgradeFreePages','<L=0'),
('UpgradeSpaceMapPages','<L=0'),
('CurrentShadowBackup','24s=""'),
('CreationFileFormatVersion','<L=0'),
('CreationFileFormatRevision','<L=0'),
('Unknown3','16s=""'),
('OldRepairCount','<L=0'),
('ECCCount','<L=0'),
('LastECCTime','<Q=0'),
('OldECCFixSuccessCount','<L=0'),
('ECCFixErrorCount','<L=0'),
('LastECCFixErrorTime','<Q=0'),
('OldECCFixErrorCount','<L=0'),
('BadCheckSumErrorCount','<L=0'),
('LastBadCheckSumTime','<Q=0'),
('OldCheckSumErrorCount','<L=0'),
('CommittedLog','<L=0'),
('PreviousShadowCopy','24s=""'),
('PreviousDifferentialBackup','24s=""'),
('Unknown4','40s=""'),
('NLSMajorVersion','<L=0'),
('NLSMinorVersion','<L=0'),
('Unknown5','148s=""'),
('UnknownFlags','<L=0'),
)
class ESENT_PAGE_HEADER(Structure):
structure_2003_SP0 = (
('CheckSum','<L=0'),
('PageNumber','<L=0'),
)
structure_0x620_0x0b = (
('CheckSum','<L=0'),
('ECCCheckSum','<L=0'),
)
structure_win7 = (
('CheckSum','<Q=0'),
)
common = (
('LastModificationTime','<Q=0'),
('PreviousPageNumber','<L=0'),
('NextPageNumber','<L=0'),
('FatherDataPage','<L=0'),
('AvailableDataSize','<H=0'),
('AvailableUncommittedDataSize','<H=0'),
('FirstAvailableDataOffset','<H=0'),
('FirstAvailablePageTag','<H=0'),
('PageFlags','<L=0'),
)
extended_win7 = (
('ExtendedCheckSum1','<Q=0'),
('ExtendedCheckSum2','<Q=0'),
('ExtendedCheckSum3','<Q=0'),
('PageNumber','<Q=0'),
('Unknown','<Q=0'),
)
def __init__(self, version, revision, pageSize=8192, data=None):
if (version < 0x620) or (version == 0x620 and revision < 0x0b):
# For sure the old format
self.structure = self.structure_2003_SP0 + self.common
elif version == 0x620 and revision < 0x11:
# Exchange 2003 SP1 and Windows Vista and later
self.structure = self.structure_0x620_0x0b + self.common
else:
# Windows 7 and later
self.structure = self.structure_win7 + self.common
if pageSize > 8192:
self.structure += self.extended_win7
Structure.__init__(self,data)
class ESENT_ROOT_HEADER(Structure):
structure = (
('InitialNumberOfPages','<L=0'),
('ParentFatherDataPage','<L=0'),
('ExtentSpace','<L=0'),
('SpaceTreePageNumber','<L=0'),
)
class ESENT_BRANCH_HEADER(Structure):
structure = (
('CommonPageKey',':'),
)
class ESENT_BRANCH_ENTRY(Structure):
common = (
('CommonPageKeySize','<H=0'),
)
structure = (
('LocalPageKeySize','<H=0'),
('_LocalPageKey','_-LocalPageKey','self["LocalPageKeySize"]'),
('LocalPageKey',':'),
('ChildPageNumber','<L=0'),
)
def __init__(self, flags, data=None):
if flags & TAG_COMMON > 0:
# Include the common header
self.structure = self.common + self.structure
Structure.__init__(self,data)
class ESENT_LEAF_HEADER(Structure):
structure = (
('CommonPageKey',':'),
)
class ESENT_LEAF_ENTRY(Structure):
common = (
('CommonPageKeySize','<H=0'),
)
structure = (
('LocalPageKeySize','<H=0'),
('_LocalPageKey','_-LocalPageKey','self["LocalPageKeySize"]'),
('LocalPageKey',':'),
('EntryData',':'),
)
def __init__(self, flags, data=None):
if flags & TAG_COMMON > 0:
# Include the common header
self.structure = self.common + self.structure
Structure.__init__(self,data)
class ESENT_SPACE_TREE_HEADER(Structure):
structure = (
('Unknown','<Q=0'),
)
class ESENT_SPACE_TREE_ENTRY(Structure):
structure = (
('PageKeySize','<H=0'),
('LastPageNumber','<L=0'),
('NumberOfPages','<L=0'),
)
class ESENT_INDEX_ENTRY(Structure):
structure = (
('RecordPageKey',':'),
)
class ESENT_DATA_DEFINITION_HEADER(Structure):
structure = (
('LastFixedSize','<B=0'),
('LastVariableDataType','<B=0'),
('VariableSizeOffset','<H=0'),
)
class ESENT_CATALOG_DATA_DEFINITION_ENTRY(Structure):
fixed = (
('FatherDataPageID','<L=0'),
('Type','<H=0'),
('Identifier','<L=0'),
)
column_stuff = (
('ColumnType','<L=0'),
('SpaceUsage','<L=0'),
('ColumnFlags','<L=0'),
('CodePage','<L=0'),
)
other = (
('FatherDataPageNumber','<L=0'),
)
table_stuff = (
('SpaceUsage','<L=0'),
# ('TableFlags','<L=0'),
# ('InitialNumberOfPages','<L=0'),
)
index_stuff = (
('SpaceUsage','<L=0'),
('IndexFlags','<L=0'),
('Locale','<L=0'),
)
lv_stuff = (
('SpaceUsage','<L=0'),
# ('LVFlags','<L=0'),
# ('InitialNumberOfPages','<L=0'),
)
common = (
# ('RootFlag','<B=0'),
# ('RecordOffset','<H=0'),
# ('LCMapFlags','<L=0'),
# ('KeyMost','<H=0'),
('Trailing',':'),
)
def __init__(self,data):
# Depending on the type of data we'll end up building a different struct
dataType = unpack('<H', data[4:][:2])[0]
self.structure = self.fixed
if dataType == CATALOG_TYPE_TABLE:
self.structure += self.other + self.table_stuff
elif dataType == CATALOG_TYPE_COLUMN:
self.structure += self.column_stuff
elif dataType == CATALOG_TYPE_INDEX:
self.structure += self.other + self.index_stuff
elif dataType == CATALOG_TYPE_LONG_VALUE:
self.structure += self.other + self.lv_stuff
elif dataType == CATALOG_TYPE_CALLBACK:
raise Exception('CallBack types not supported!')
else:
LOG.error('Unknown catalog type 0x%x' % dataType)
self.structure = ()
Structure.__init__(self,data)
self.structure += self.common
Structure.__init__(self,data)
def pretty_print(x):
if x in '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ':
return x
else:
return '.'
def hexdump(data):
x=str(data)
strLen = len(x)
i = 0
while i < strLen:
print "%04x " % i,
for j in range(16):
if i+j < strLen:
print "%02X" % ord(x[i+j]),
else:
print " ",
if j%16 == 7:
print "",
print " ",
print ''.join(pretty_print(x) for x in x[i:i+16] )
i += 16
def getUnixTime(t):
t -= 116444736000000000
t /= 10000000
return t
class ESENT_PAGE:
def __init__(self, db, data=None):
self.__DBHeader = db
self.data = data
self.record = None
if data is not None:
self.record = ESENT_PAGE_HEADER(self.__DBHeader['Version'], self.__DBHeader['FileFormatRevision'], self.__DBHeader['PageSize'], data)
def printFlags(self):
flags = self.record['PageFlags']
if flags & FLAGS_EMPTY:
print "\tEmpty"
if flags & FLAGS_INDEX:
print "\tIndex"
if flags & FLAGS_LEAF:
print "\tLeaf"
else:
print "\tBranch"
if flags & FLAGS_LONG_VALUE:
print "\tLong Value"
if flags & FLAGS_NEW_CHECKSUM:
print "\tNew Checksum"
if flags & FLAGS_NEW_FORMAT:
print "\tNew Format"
if flags & FLAGS_PARENT:
print "\tParent"
if flags & FLAGS_ROOT:
print "\tRoot"
if flags & FLAGS_SPACE_TREE:
print "\tSpace Tree"
def dump(self):
baseOffset = len(self.record)
self.record.dump()
tags = self.data[-4*self.record['FirstAvailablePageTag']:]
print "FLAGS: "
self.printFlags()
print
for i in range(self.record['FirstAvailablePageTag']):
tag = tags[-4:]
if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] > 11 and self.__DBHeader['PageSize'] > 8192:
valueSize = unpack('<H', tag[:2])[0] & 0x7fff
valueOffset = unpack('<H',tag[2:])[0] & 0x7fff
hexdump((self.data[baseOffset+valueOffset:][:6]))
pageFlags = ord(self.data[baseOffset+valueOffset:][1]) >> 5
#print "TAG FLAG: 0x%x " % (unpack('<L', self.data[baseOffset+valueOffset:][:4]) ) >> 5
#print "TAG FLAG: 0x " , ord(self.data[baseOffset+valueOffset:][0])
else:
valueSize = unpack('<H', tag[:2])[0] & 0x1fff
pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13
valueOffset = unpack('<H',tag[2:])[0] & 0x1fff
print "TAG %-8d offset:0x%-6x flags:0x%-4x valueSize:0x%x" % (i,valueOffset,pageFlags,valueSize)
#hexdump(self.getTag(i)[1])
tags = tags[:-4]
if self.record['PageFlags'] & FLAGS_ROOT > 0:
rootHeader = ESENT_ROOT_HEADER(self.getTag(0)[1])
rootHeader.dump()
elif self.record['PageFlags'] & FLAGS_LEAF == 0:
# Branch Header
flags, data = self.getTag(0)
branchHeader = ESENT_BRANCH_HEADER(data)
branchHeader.dump()
else:
# Leaf Header
flags, data = self.getTag(0)
if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0:
# Space Tree
spaceTreeHeader = ESENT_SPACE_TREE_HEADER(data)
spaceTreeHeader.dump()
else:
leafHeader = ESENT_LEAF_HEADER(data)
leafHeader.dump()
# Print the leaf/branch tags
for tagNum in range(1,self.record['FirstAvailablePageTag']):
flags, data = self.getTag(tagNum)
if self.record['PageFlags'] & FLAGS_LEAF == 0:
# Branch page
branchEntry = ESENT_BRANCH_ENTRY(flags, data)
branchEntry.dump()
elif self.record['PageFlags'] & FLAGS_LEAF > 0:
# Leaf page
if self.record['PageFlags'] & FLAGS_SPACE_TREE > 0:
# Space Tree
spaceTreeEntry = ESENT_SPACE_TREE_ENTRY(data)
#spaceTreeEntry.dump()
elif self.record['PageFlags'] & FLAGS_INDEX > 0:
# Index Entry
indexEntry = ESENT_INDEX_ENTRY(data)
#indexEntry.dump()
elif self.record['PageFlags'] & FLAGS_LONG_VALUE > 0:
# Long Page Value
raise Exception('Long value still not supported')
else:
# Table Value
leafEntry = ESENT_LEAF_ENTRY(flags, data)
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(leafEntry['EntryData'])
dataDefinitionHeader.dump()
catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(leafEntry['EntryData'][len(dataDefinitionHeader):])
catalogEntry.dump()
hexdump(leafEntry['EntryData'])
def getTag(self, tagNum):
if self.record['FirstAvailablePageTag'] < tagNum:
raise Exception('Trying to grab an unknown tag 0x%x' % tagNum)
tags = self.data[-4*self.record['FirstAvailablePageTag']:]
baseOffset = len(self.record)
for i in range(tagNum):
tags = tags[:-4]
tag = tags[-4:]
if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] >= 17 and self.__DBHeader['PageSize'] > 8192:
valueSize = unpack('<H', tag[:2])[0] & 0x7fff
valueOffset = unpack('<H',tag[2:])[0] & 0x7fff
tmpData = list(self.data[baseOffset+valueOffset:][:valueSize])
pageFlags = ord(tmpData[1]) >> 5
tmpData[1] = chr(ord(tmpData[1]) & 0x1f)
tagData = "".join(tmpData)
else:
valueSize = unpack('<H', tag[:2])[0] & 0x1fff
pageFlags = (unpack('<H', tag[2:])[0] & 0xe000) >> 13
valueOffset = unpack('<H',tag[2:])[0] & 0x1fff
tagData = self.data[baseOffset+valueOffset:][:valueSize]
#return pageFlags, self.data[baseOffset+valueOffset:][:valueSize]
return pageFlags, tagData
class ESENT_DB:
def __init__(self, fileName, pageSize = 8192, isRemote = False):
self.__fileName = fileName
self.__pageSize = pageSize
self.__DB = None
self.__DBHeader = None
self.__totalPages = None
self.__tables = OrderedDict()
self.__currentTable = None
self.__isRemote = isRemote
self.mountDB()
def mountDB(self):
LOG.debug("Mounting DB...")
if self.__isRemote is True:
self.__DB = self.__fileName
self.__DB.open()
else:
self.__DB = open(self.__fileName,"rb")
mainHeader = self.getPage(-1)
self.__DBHeader = ESENT_DB_HEADER(mainHeader)
self.__pageSize = self.__DBHeader['PageSize']
self.__DB.seek(0,2)
self.__totalPages = (self.__DB.tell() / self.__pageSize) -2
LOG.debug("Database Version:0x%x, Revision:0x%x"% (self.__DBHeader['Version'], self.__DBHeader['FileFormatRevision']))
LOG.debug("Page Size: %d" % self.__pageSize)
LOG.debug("Total Pages in file: %d" % self.__totalPages)
self.parseCatalog(CATALOG_PAGE_NUMBER)
def printCatalog(self):
indent = ' '
print "Database version: 0x%x, 0x%x" % (self.__DBHeader['Version'], self.__DBHeader['FileFormatRevision'] )
print "Page size: %d " % self.__pageSize
print "Number of pages: %d" % self.__totalPages
print
print "Catalog for %s" % self.__fileName
for table in self.__tables.keys():
print "[%s]" % table
print "%sColumns " % indent
for column in self.__tables[table]['Columns'].keys():
record = self.__tables[table]['Columns'][column]['Record']
print "%s%-5d%-30s%s" % (indent*2, record['Identifier'], column,ColumnTypeToName[record['ColumnType']])
print "%sIndexes"% indent
for index in self.__tables[table]['Indexes'].keys():
print "%s%s" % (indent*2, index)
print ""
def __addItem(self, entry):
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData'])
catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(entry['EntryData'][len(dataDefinitionHeader):])
itemName = self.__parseItemName(entry)
if catalogEntry['Type'] == CATALOG_TYPE_TABLE:
self.__tables[itemName] = OrderedDict()
self.__tables[itemName]['TableEntry'] = entry
self.__tables[itemName]['Columns'] = OrderedDict()
self.__tables[itemName]['Indexes'] = OrderedDict()
self.__tables[itemName]['LongValues'] = OrderedDict()
self.__currentTable = itemName
elif catalogEntry['Type'] == CATALOG_TYPE_COLUMN:
self.__tables[self.__currentTable]['Columns'][itemName] = entry
self.__tables[self.__currentTable]['Columns'][itemName]['Header'] = dataDefinitionHeader
self.__tables[self.__currentTable]['Columns'][itemName]['Record'] = catalogEntry
elif catalogEntry['Type'] == CATALOG_TYPE_INDEX:
self.__tables[self.__currentTable]['Indexes'][itemName] = entry
elif catalogEntry['Type'] == CATALOG_TYPE_LONG_VALUE:
self.__addLongValue(entry)
else:
raise Exception('Unknown type 0x%x' % catalogEntry['Type'])
def __parseItemName(self,entry):
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData'])
if dataDefinitionHeader['LastVariableDataType'] > 127:
numEntries = dataDefinitionHeader['LastVariableDataType'] - 127
else:
numEntries = dataDefinitionHeader['LastVariableDataType']
itemLen = unpack('<H',entry['EntryData'][dataDefinitionHeader['VariableSizeOffset']:][:2])[0]
itemName = entry['EntryData'][dataDefinitionHeader['VariableSizeOffset']:][2*numEntries:][:itemLen]
return itemName
def __addLongValue(self, entry):
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData'])
catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(entry['EntryData'][len(dataDefinitionHeader):])
lvLen = unpack('<H',entry['EntryData'][dataDefinitionHeader['VariableSizeOffset']:][:2])[0]
lvName = entry['EntryData'][dataDefinitionHeader['VariableSizeOffset']:][7:][:lvLen]
self.__tables[self.__currentTable]['LongValues'][lvName] = entry
def parsePage(self, page):
baseOffset = len(page.record)
# Print the leaf/branch tags
for tagNum in range(1,page.record['FirstAvailablePageTag']):
flags, data = page.getTag(tagNum)
if page.record['PageFlags'] & FLAGS_LEAF > 0:
# Leaf page
if page.record['PageFlags'] & FLAGS_SPACE_TREE > 0:
pass
elif page.record['PageFlags'] & FLAGS_INDEX > 0:
pass
elif page.record['PageFlags'] & FLAGS_LONG_VALUE > 0:
pass
else:
# Table Value
leafEntry = ESENT_LEAF_ENTRY(flags, data)
self.__addItem(leafEntry)
def parseCatalog(self, pageNum):
# Parse all the pages starting at pageNum and commit table data
page = self.getPage(pageNum)
self.parsePage(page)
for i in range(1, page.record['FirstAvailablePageTag']):
flags, data = page.getTag(i)
if page.record['PageFlags'] & FLAGS_LEAF == 0:
# Branch page
branchEntry = ESENT_BRANCH_ENTRY(flags, data)
self.parseCatalog(branchEntry['ChildPageNumber'])
def readHeader(self):
LOG.debug("Reading Boot Sector for %s" % self.__volumeName)
def getPage(self, pageNum):
LOG.debug("Trying to fetch page %d (0x%x)" % (pageNum, (pageNum+1)*self.__pageSize))
self.__DB.seek((pageNum+1)*self.__pageSize, 0)
data = self.__DB.read(self.__pageSize)
while len(data) < self.__pageSize:
remaining = self.__pageSize - len(data)
data += self.__DB.read(remaining)
# Special case for the first page
if pageNum <= 0:
return data
else:
return ESENT_PAGE(self.__DBHeader, data)
def close(self):
self.__DB.close()
def openTable(self, tableName):
# Returns a cursos for later use
if tableName in self.__tables:
entry = self.__tables[tableName]['TableEntry']
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(entry['EntryData'])
catalogEntry = ESENT_CATALOG_DATA_DEFINITION_ENTRY(entry['EntryData'][len(dataDefinitionHeader):])
# Let's position the cursor at the leaf levels for fast reading
pageNum = catalogEntry['FatherDataPageNumber']
done = False
while done is False:
page = self.getPage(pageNum)
if page.record['FirstAvailablePageTag'] <= 1:
# There are no records
done = True
for i in range(1, page.record['FirstAvailablePageTag']):
flags, data = page.getTag(i)
if page.record['PageFlags'] & FLAGS_LEAF == 0:
# Branch page, move on to the next page
branchEntry = ESENT_BRANCH_ENTRY(flags, data)
pageNum = branchEntry['ChildPageNumber']
break
else:
done = True
break
cursor = TABLE_CURSOR
cursor['TableData'] = self.__tables[tableName]
cursor['FatherDataPageNumber'] = catalogEntry['FatherDataPageNumber']
cursor['CurrentPageData'] = page
cursor['CurrentTag'] = 0
return cursor
else:
return None
def __getNextTag(self, cursor):
page = cursor['CurrentPageData']
if cursor['CurrentTag'] >= page.record['FirstAvailablePageTag']:
# No more data in this page, chau
return None
flags, data = page.getTag(cursor['CurrentTag'])
if page.record['PageFlags'] & FLAGS_LEAF > 0:
# Leaf page
if page.record['PageFlags'] & FLAGS_SPACE_TREE > 0:
raise Exception('FLAGS_SPACE_TREE > 0')
elif page.record['PageFlags'] & FLAGS_INDEX > 0:
raise Exception('FLAGS_INDEX > 0')
elif page.record['PageFlags'] & FLAGS_LONG_VALUE > 0:
raise Exception('FLAGS_LONG_VALUE > 0')
else:
# Table Value
leafEntry = ESENT_LEAF_ENTRY(flags, data)
return leafEntry
return None
def getNextRow(self, cursor):
cursor['CurrentTag'] += 1
tag = self.__getNextTag(cursor)
#hexdump(tag)
if tag is None:
# No more tags in this page, search for the next one on the right
page = cursor['CurrentPageData']
if page.record['NextPageNumber'] == 0:
# No more pages, chau
return None
else:
cursor['CurrentPageData'] = self.getPage(page.record['NextPageNumber'])
cursor['CurrentTag'] = 0
return self.getNextRow(cursor)
else:
return self.__tagToRecord(cursor, tag['EntryData'])
def __tagToRecord(self, cursor, tag):
# So my brain doesn't forget, the data record is composed of:
# Header
# Fixed Size Data (ID < 127)
# The easiest to parse. Their size is fixed in the record. You can get its size
# from the Column Record, field SpaceUsage
# Variable Size Data (127 < ID < 255)
# At VariableSizeOffset you get an array of two bytes per variable entry, pointing
# to the length of the value. Values start at:
# numEntries = LastVariableDataType - 127
# VariableSizeOffset + numEntries * 2 (bytes)
# Tagged Data ( > 255 )
# After the Variable Size Value, there's more data for the tagged values.
# Right at the beginning there's another array (taggedItems), pointing to the
# values, size.
#
# The interesting thing about this DB records is there's no need for all the columns to be there, hence
# saving space. That's why I got over all the columns, and if I find data (of any type), i assign it. If
# not, the column's empty.
#
# There are a lot of caveats in the code, so take your time to explore it.
#
# ToDo: Better complete this description
#
record = OrderedDict()
taggedItems = OrderedDict()
taggedItemsParsed = False
dataDefinitionHeader = ESENT_DATA_DEFINITION_HEADER(tag)
#dataDefinitionHeader.dump()
variableDataBytesProcessed = (dataDefinitionHeader['LastVariableDataType'] - 127) * 2
prevItemLen = 0
tagLen = len(tag)
fixedSizeOffset = len(dataDefinitionHeader)
variableSizeOffset = dataDefinitionHeader['VariableSizeOffset']
columns = cursor['TableData']['Columns']
for column in columns.keys():
columnRecord = columns[column]['Record']
#columnRecord.dump()
if columnRecord['Identifier'] <= dataDefinitionHeader['LastFixedSize']:
# Fixed Size column data type, still available data
record[column] = tag[fixedSizeOffset:][:columnRecord['SpaceUsage']]
fixedSizeOffset += columnRecord['SpaceUsage']
elif 127 < columnRecord['Identifier'] <= dataDefinitionHeader['LastVariableDataType']:
# Variable data type
index = columnRecord['Identifier'] - 127 - 1
itemLen = unpack('<H',tag[variableSizeOffset+index*2:][:2])[0]
if itemLen & 0x8000:
# Empty item
itemLen = prevItemLen
record[column] = None
else:
itemValue = tag[variableSizeOffset+variableDataBytesProcessed:][:itemLen-prevItemLen]
record[column] = itemValue
#if columnRecord['Identifier'] <= dataDefinitionHeader['LastVariableDataType']:
variableDataBytesProcessed +=itemLen-prevItemLen
prevItemLen = itemLen
elif columnRecord['Identifier'] > 255:
# Have we parsed the tagged items already?
if taggedItemsParsed is False and (variableDataBytesProcessed+variableSizeOffset) < tagLen:
index = variableDataBytesProcessed+variableSizeOffset
#hexdump(tag[index:])
endOfVS = self.__pageSize
firstOffsetTag = (unpack('<H', tag[index+2:][:2])[0] & 0x3fff) + variableDataBytesProcessed+variableSizeOffset
while True:
taggedIdentifier = unpack('<H', tag[index:][:2])[0]
index += 2
taggedOffset = (unpack('<H', tag[index:][:2])[0] & 0x3fff)
# As of Windows 7 and later ( version 0x620 revision 0x11) the
# tagged data type flags are always present
if self.__DBHeader['Version'] == 0x620 and self.__DBHeader['FileFormatRevision'] >= 17 and self.__DBHeader['PageSize'] > 8192:
flagsPresent = 1
else:
flagsPresent = (unpack('<H', tag[index:][:2])[0] & 0x4000)
index += 2
if taggedOffset < endOfVS:
endOfVS = taggedOffset
taggedItems[taggedIdentifier] = (taggedOffset, tagLen, flagsPresent)
#print "ID: %d, Offset:%d, firstOffset:%d, index:%d, flag: 0x%x" % (taggedIdentifier, taggedOffset,firstOffsetTag,index, flagsPresent)
if index >= firstOffsetTag:
# We reached the end of the variable size array
break
# Calculate length of variable items
# Ugly.. should be redone
prevKey = taggedItems.keys()[0]
for i in range(1,len(taggedItems)):
offset0, length, flags = taggedItems[prevKey]
offset, _, _ = taggedItems.items()[i][1]
taggedItems[prevKey] = (offset0, offset-offset0, flags)
#print "ID: %d, Offset: %d, Len: %d, flags: %d" % (prevKey, offset0, offset-offset0, flags)
prevKey = taggedItems.keys()[i]
taggedItemsParsed = True
# Tagged data type
if taggedItems.has_key(columnRecord['Identifier']):
offsetItem = variableDataBytesProcessed + variableSizeOffset + taggedItems[columnRecord['Identifier']][0]
itemSize = taggedItems[columnRecord['Identifier']][1]
# If item have flags, we should skip them
if taggedItems[columnRecord['Identifier']][2] > 0:
itemFlag = ord(tag[offsetItem:offsetItem+1])
offsetItem += 1
itemSize -= 1
else:
itemFlag = 0
#print "ID: %d, itemFlag: 0x%x" %( columnRecord['Identifier'], itemFlag)
if itemFlag & (TAGGED_DATA_TYPE_COMPRESSED ):
LOG.error('Unsupported tag column: %s, flag:0x%x' % (column, itemFlag))
record[column] = None
elif itemFlag & TAGGED_DATA_TYPE_MULTI_VALUE:
# ToDo: Parse multi-values properly
LOG.debug('Multivalue detected in column %s, returning raw results' % (column))
record[column] = (hexlify(tag[offsetItem:][:itemSize]),)
else:
record[column] = tag[offsetItem:][:itemSize]
else:
record[column] = None
else:
record[column] = None
# If we understand the data type, we unpack it and cast it accordingly
# otherwise, we just encode it in hex
if type(record[column]) is tuple:
# A multi value data, we won't decode it, just leave it this way
record[column] = record[column][0]
elif columnRecord['ColumnType'] == JET_coltypText or columnRecord['ColumnType'] == JET_coltypLongText:
# Let's handle strings
if record[column] is not None:
if columnRecord['CodePage'] not in StringCodePages:
raise Exception('Unknown codepage 0x%x'% columnRecord['CodePage'])
stringDecoder = StringCodePages[columnRecord['CodePage']]
try:
record[column] = record[column].decode(stringDecoder)
except Exception as e:
LOG.debug('Record[column] %r' % repr(record[column]))
raise
else:
unpackData = ColumnTypeSize[columnRecord['ColumnType']]
if record[column] is not None:
if unpackData is None:
record[column] = hexlify(record[column])
else:
unpackStr = unpackData[1]
unpackSize = unpackData[0]
record[column] = unpack(unpackStr, record[column])[0]
return record
| 37.685891
| 158
| 0.561255
|
e890d8079ce2c3972fbd258938b99638ca4c0f47
| 2,216
|
py
|
Python
|
x.py
|
rlowrance/re-local-linear
|
ef1400102d4410ec4a2c52e62c32a433efc5ff01
|
[
"MIT"
] | null | null | null |
x.py
|
rlowrance/re-local-linear
|
ef1400102d4410ec4a2c52e62c32a433efc5ff01
|
[
"MIT"
] | null | null | null |
x.py
|
rlowrance/re-local-linear
|
ef1400102d4410ec4a2c52e62c32a433efc5ff01
|
[
"MIT"
] | null | null | null |
'''examples for numpy and pandas'''
import numpy as np
import pandas as pd
# 1D numpy arrays
v = np.array([1, 2, 3], dtype=np.float64) # also: np.int64
v.shape # tuple of array dimensions
v.ndim # number of dimensions
v.size # number of elements
for elem in np.nditer(v): # read-only iteration
pass
for elem in np.nditer(v, op_flags='readwrite'): # mutation iteration
elem[...] = abs(elem) # elipses is required
for elems in np.nditer(v, flags=['external_loop']): # iterate i chunks
print elems # elems is a 1D vector
# basic indexing (using a slice or integer) ALWAYS generates a view
v[0:v.size:1] # start:stop:step
v[10]
v[...]
# advanced indexing (using an ndarray) ALWAYS generates a copy
# advanced indexes are always broadcast
v[np.array([1, 2])] # return new 1D with 2 elements
v[~np.isnan(v)] # return new 1D with v.size elements
# pd.Index
# data: aray-like 1D of hashable items
# dtype: np.dtype
# copy: bool default ?
# name: obj, documentation
# tupleize_cols: bool, default True; if True, attempt to create MultiIndex
i = pd.Index(data, dtype, copy, name, tupleize_cols)
i.shape
i.ndim
i.size
i.values # underlying data as ndarray
# generally don't apply methods directly to Index objects
# pd.Series
# data: array-like, dict, scalar
# index: array-like, index
# dtype: numpy.dtype
# copy: default False (True forces a copy of data)
s = pd.Series(data, index, dtype, name, copy)
s.values # return ndarray
s.shape
s.ndim
s.size
# indexing and iteration
s.get(key[,default]) # key: label
s.loc[key] # key: single label, list or array of labels, slice with labels, bool array
s.iloc[key] # key: int, list or array of int, slice with ints, boolean array
s.iteritems() # iterate over (index, value) pairs
# pd.DataFrame:
# data: numpy ndarray, dict, DataFrame
# index: index or array-like
# columns: index or array-like
# dtype: nparray dtype
# copy: boolean default False
df = pd.DataFrame(data, index, columns, dtype, copy)
df.shape
df.ndim
df.size
df.as_matrix([columns]) # convert to numpy array
df.loc[key] # key: single lable, list or array of labels, slice of labels, bool array
df.iloc[key] # key: int, list or array of int, slice of int, bool array
| 32.588235
| 89
| 0.71074
|
86fe553fa2d5f86b0d249bee1645c0071acef340
| 2,263
|
py
|
Python
|
GetDCCACoefficients.py
|
HenryDuquemin/Detrended-cross-correlation-analysis-DCCA-
|
00e75a04fceb0ae7154327a6ddd547b5f8eb69f1
|
[
"MIT"
] | 5
|
2020-06-25T11:24:02.000Z
|
2021-08-20T03:46:59.000Z
|
GetDCCACoefficients.py
|
HenryDuquemin/Detrended-cross-correlation-analysis-DCCA-
|
00e75a04fceb0ae7154327a6ddd547b5f8eb69f1
|
[
"MIT"
] | null | null | null |
GetDCCACoefficients.py
|
HenryDuquemin/Detrended-cross-correlation-analysis-DCCA-
|
00e75a04fceb0ae7154327a6ddd547b5f8eb69f1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 16:19:06 2019
@author: duqueh
"""
"""DCCA function modified from https://gist.github.com/jaimeide/a9cba18192ee904307298bd110c28b14"""
import numpy as np
from numpy.matlib import repmat
import pandas as pd
def sliding_window(xx,k):
# Function to generate boxes given dataset(xx) and box size (k)
import numpy as np
# generate indexes. O(1) way of doing it :)
idx = np.arange(k)[None, :]+np.arange(len(xx)-k+1)[:, None]
return xx[idx],idx
def GetDCCACoefficients (series1, series2, minimumTimeScale, maximumTimeScale):
# Plot
cdata = np.array([series1,series2]).T
# plt.plot(cdata)
# plt.title('Sample time series')
# plt.legend(['$x_1$','$x_2$'])
# plt.show()
# plt.clf()
# Define
nsamples,nvars = cdata.shape
# Cummulative sum after removing mean
cdata = cdata-cdata.mean(axis=0)
xx = np.cumsum(cdata,axis=0)
kList = []
DCCAList = []
for k in range(minimumTimeScale, maximumTimeScale):
F2_dfa_x = np.zeros(nvars)
allxdif = []
for ivar in range(nvars): # do for all vars
xx_swin , idx = sliding_window(xx[:,ivar],k)
nwin = xx_swin.shape[0]
b1, b0 = np.polyfit(np.arange(k),xx_swin.T,deg=1) # linear fit
#x_hat = [[b1[i]*j+b0[i] for j in range(k)] for i in range(nwin)] # slow version
x_hatx = repmat(b1,k,1).T*repmat(range(k),nwin,1) + repmat(b0,k,1).T
# Store differences to the linear fit
xdif = xx_swin-x_hatx
allxdif.append(xdif)
# Eq.4
F2_dfa_x[ivar] = (xdif**2).mean()
# Get the DCCA matrix
dcca = np.zeros([nvars,nvars])
for i in range(nvars): # do for all vars
for j in range(nvars): # do for all vars
# Eq.5 and 6
F2_dcca = (allxdif[i]*allxdif[j]).mean()
# Eq.1: DCCA
dcca[i,j] = F2_dcca / np.sqrt(F2_dfa_x[i] * F2_dfa_x[j])
kList.append(k)
print(kList)
DCCAList.append(dcca[0,1])
print(dict(zip(kList, DCCAList)))
return dict(zip(kList, DCCAList))
| 29.012821
| 100
| 0.56076
|
8e95cd3caa199b18deeac006f5dfa7925fb9e694
| 57
|
py
|
Python
|
login.py
|
banyiluoyun/luckyman
|
a89e98fa089f4a23d035b0b5aa5f9b746906f4fe
|
[
"MIT"
] | null | null | null |
login.py
|
banyiluoyun/luckyman
|
a89e98fa089f4a23d035b0b5aa5f9b746906f4fe
|
[
"MIT"
] | null | null | null |
login.py
|
banyiluoyun/luckyman
|
a89e98fa089f4a23d035b0b5aa5f9b746906f4fe
|
[
"MIT"
] | null | null | null |
num = 1111
num2 = 22222
num3 = 3323o
num3 = lishuai
| 5.7
| 14
| 0.631579
|
e2f4bdc60bb1872e063c1829eabca8819652c1ee
| 3,272
|
py
|
Python
|
features/steps/xsens_mt_sample_time_fine_msg.py
|
PolySync/core-python-api
|
a753863eca820954f5b8f7502c38c5a7d8db5a15
|
[
"MIT"
] | null | null | null |
features/steps/xsens_mt_sample_time_fine_msg.py
|
PolySync/core-python-api
|
a753863eca820954f5b8f7502c38c5a7d8db5a15
|
[
"MIT"
] | null | null | null |
features/steps/xsens_mt_sample_time_fine_msg.py
|
PolySync/core-python-api
|
a753863eca820954f5b8f7502c38c5a7d8db5a15
|
[
"MIT"
] | 2
|
2018-07-22T21:07:23.000Z
|
2019-03-09T14:31:09.000Z
|
# WARNING: Auto-generated file. Any changes are subject to being overwritten
# by setup.py build script.
#!/usr/bin/python
import time
from behave import given
from behave import when
from behave import then
from hamcrest import assert_that, equal_to
try:
import polysync.node as ps_node
from polysync.data_model.types import Py_xsens_mt_sample_time_fine_msg
from polysync.data_model._internal.compare import xsens_mt_sample_time_fine_msg_type_convert_testable, Py_xsens_mt_sample_time_fine_msg_initialize_random
from polysync.data_model.message_support.xsens_mt_sample_time_fine_msg import publish, subscribe
except ImportError:
raise ImportError(
'Py_xsens_mt_sample_time_fine_msg module dependencies \
missing for tests, is the project built?')
@given('I have a Py_xsens_mt_sample_time_fine_msg object')
def step_impl(context):
pass
@when('I convert it to its C API equivalent a xsens_mt_sample_time_fine_msg')
def step_impl(context):
pass
@when('I convert the xsens_mt_sample_time_fine_msg back to a Py_xsens_mt_sample_time_fine_msg')
def step_impl(context):
pass
@then('the xsens_mt_sample_time_fine_msg values are equivalent to each Py_xsens_mt_sample_time_fine_msg value')
def step_impl(context):
msg = Py_xsens_mt_sample_time_fine_msg_initialize_random()
result = xsens_mt_sample_time_fine_msg_type_convert_testable(msg)
assert not result, result
@given('a xsens_mt_sample_time_fine_msg.publish function exists')
def step_impl(context):
assert callable(publish)
@when('I try to publish something that is not of type Py_xsens_mt_sample_time_fine_msg')
def step_impl(context):
bad_obj = "not the right type of object!"
context.exception = None
try:
publish(bad_obj)
except Exception as e:
context.exception = e
@then('a {exeption} indicates the type was not Py_xsens_mt_sample_time_fine_msg')
def step_impl(context, exeption):
assert isinstance(context.exception, eval(exeption)), \
"Invalid exception %s - expected %s" \
% (type(context.exception).__name__, exeption)
GLOBAL_TIMESTAMP = None
GLOBAL_GUID = None
def Py_xsens_mt_sample_time_fine_msg_handler(msg):
if msg.header.src_guid == GLOBAL_GUID:
global GLOBAL_TIMESTAMP
GLOBAL_TIMESTAMP = msg.header.timestamp
@given(u'I have a licensed PsNode for publishing Py_xsens_mt_sample_time_fine_msg')
def step_impl(context):
assert context.node_ref
global GLOBAL_GUID
GLOBAL_GUID = context.my_guid
@given(u'I have a Py_xsens_mt_sample_time_fine_msg')
def step_impl(context):
context.msg = Py_xsens_mt_sample_time_fine_msg()
context.msg.header.timestamp = 0xFFFF
@given(u'I have a handler for Py_xsens_mt_sample_time_fine_msg subscription')
def step_impl(context):
assert Py_xsens_mt_sample_time_fine_msg_handler
subscribe(handler=Py_xsens_mt_sample_time_fine_msg_handler)
@when(u'I publish my Py_xsens_mt_sample_time_fine_msg')
def step_impl(context):
publish(context.msg)
@then(u'I receive the corresponding Py_xsens_mt_sample_time_fine_msg in my handler')
def step_impl(context):
global GLOBAL_TIMESTAMP
while not GLOBAL_TIMESTAMP:
time.sleep(1)
assert_that(context.msg.header.timestamp, equal_to(GLOBAL_TIMESTAMP))
| 34.442105
| 157
| 0.789425
|
f779b516915e7cb1bb47a9be61e8da4c90bd2d7b
| 1,620
|
py
|
Python
|
decrypt_oracle/test/unit/key_providers/test_u_counting.py
|
seebees/aws-encryption-sdk-python
|
7498bafab9c231abe0694f856f299af59d0e7826
|
[
"Apache-2.0"
] | null | null | null |
decrypt_oracle/test/unit/key_providers/test_u_counting.py
|
seebees/aws-encryption-sdk-python
|
7498bafab9c231abe0694f856f299af59d0e7826
|
[
"Apache-2.0"
] | null | null | null |
decrypt_oracle/test/unit/key_providers/test_u_counting.py
|
seebees/aws-encryption-sdk-python
|
7498bafab9c231abe0694f856f299af59d0e7826
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test for ``aws_encryption_sdk_decrypt_oracle.key_providers.counting``."""
import aws_encryption_sdk
import pytest
from aws_encryption_sdk_decrypt_oracle.key_providers.counting import CountingMasterKey
from ...integration.integration_test_utils import filtered_test_vectors
pytestmark = [pytest.mark.unit, pytest.mark.local]
@pytest.mark.parametrize("vector", filtered_test_vectors(lambda x: x.key_type == "test_counting"))
def test_counting_master_key_decrypt_vectors(vector):
master_key = CountingMasterKey()
plaintext, _header = aws_encryption_sdk.decrypt(source=vector.ciphertext, key_provider=master_key)
assert plaintext == vector.plaintext
def test_counting_master_key_cycle():
plaintext = b"some super secret plaintext"
master_key = CountingMasterKey()
ciphertext, _header = aws_encryption_sdk.encrypt(source=plaintext, key_provider=master_key)
decrypted, _header = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=master_key)
assert plaintext != ciphertext
assert plaintext == decrypted
| 39.512195
| 102
| 0.789506
|
5b7c52c118974d7930ff5eacf389768a16a514e5
| 16,832
|
py
|
Python
|
veloproj/model.py
|
qiaochen/VeloAE
|
908fa01f36e3fc1c0c619ce7a68d536d0fb5cf9b
|
[
"MIT"
] | 13
|
2021-12-08T19:09:40.000Z
|
2022-03-03T06:45:21.000Z
|
veloproj/model.py
|
qiaochen/VeloAE
|
908fa01f36e3fc1c0c619ce7a68d536d0fb5cf9b
|
[
"MIT"
] | 2
|
2022-02-20T10:57:36.000Z
|
2022-03-30T07:14:02.000Z
|
veloproj/model.py
|
qiaochen/VeloAE
|
908fa01f36e3fc1c0c619ce7a68d536d0fb5cf9b
|
[
"MIT"
] | 3
|
2021-11-05T13:23:36.000Z
|
2022-01-10T12:00:20.000Z
|
# -*- coding: utf-8 -*-
"""VeloAutoencoder module.
This module contains the veloAutoencoder and its ablation configurations.
"""
import torch
import numpy as np
from torch import nn
from torch_geometric.nn import GCNConv, Sequential, GATv2Conv, GATConv
from torch.nn import functional as F
CANO_NAME_GAT='GAT'
CANO_NAME_GCN='GCN'
class Encoder(nn.Module):
"""Encoder
"""
def __init__(self,
in_dim,
z_dim,
edge_index,
edge_weight,
h_dim=256,
gnn_layer=CANO_NAME_GAT,
):
"""
Args:
in_dim (int): dimensionality of the input
z_dim (int): dimensionality of the low-dimensional space
edge_index (LongTensor): shape (2, ?), edge indices
edge_weight (FloatTensor): shape (?), edge weights.
h_dim (int): dimensionality of intermediate layers in MLP
gnn_layer (str): GNN layers in {'GAT', 'GCN'}
"""
super(Encoder, self).__init__()
self.edge_index = edge_index
self.edge_weight = edge_weight
self.fn = nn.Sequential(
nn.Linear(in_dim, h_dim, bias=True),
nn.GELU(),
nn.Linear(h_dim, h_dim, bias=True),
nn.GELU(),
)
if not gnn_layer.upper() in {CANO_NAME_GCN, CANO_NAME_GAT}:
raise Exception(f"GNN layer {gnn_layer} is not supported!")
if gnn_layer.upper() == CANO_NAME_GCN:
self.gc = Sequential( "x, edge_index, edge_weight",
[(GCNConv(h_dim, z_dim, cached=False, add_self_loops=True), "x, edge_index, edge_weight -> x"),
nn.GELU(),
(GCNConv(z_dim, z_dim, cached=False, add_self_loops=True), "x, edge_index, edge_weight -> x"),
nn.GELU(),
nn.Linear(z_dim, z_dim)]
)
# GATConv
if gnn_layer.upper() == CANO_NAME_GAT:
self.gc = Sequential( "x, edge_index, edge_weight",
[ (GATConv(h_dim, z_dim, add_self_loops=True), "x, edge_index -> x"),
nn.GELU(),
(GATConv(z_dim, z_dim, add_self_loops=True), "x, edge_index -> x"),
nn.GELU(),
nn.Linear(z_dim, z_dim)]
)
del self.edge_weight
self.edge_weight = None
self.gen = nn.Sequential(
nn.Linear(z_dim, z_dim, bias=True)
)
def forward(self, x, return_raw=False):
z = self.fn(x)
z = self.gc(z, self.edge_index, self.edge_weight)
if return_raw:
return self.gen(z), z
return self.gen(z)
class Decoder(nn.Module):
"""Decoder
"""
def __init__(self,
z_col_dim,
G_rep=None,
n_genes=None,
g_rep_dim=None,
k_dim=32,
h_dim=256,
gb_tau=1.0,
device=None
):
"""
Args:
z_col_dim (int): size of column vectors in Z.
G_rep (np.ndarry): representation for genes, e.g. PCA over gene profiles.
n_genes (int): number of genes.
g_rep_dim (int): dimensionality of gene representations.
# Either G_rep or (n_genes, g_rep_dim) should be provided.
# priority is given to G_rep.
k_dim (int): dimensionality of keys for attention computation.
h_dim (int): dimensionality of intermediate layers of MLP.
gb_tau (float): temperature param of gumbel softmax
device (torch.device): torch device object.
"""
super(Decoder, self).__init__()
self.device = device
if not G_rep is None:
g_rep_dim = G_rep.shape[-1]
self.key_Z = nn.Sequential(
nn.Linear(z_col_dim, h_dim),
nn.GELU(),
nn.Linear(h_dim, k_dim)
)
self.key_G = nn.Sequential(
nn.Linear(g_rep_dim, k_dim),
nn.GELU(),
nn.Linear(k_dim, k_dim)
)
self.G_rep = self._init_G_emb(n_genes, g_rep_dim) if G_rep is None else torch.FloatTensor(G_rep).to(device)
self.attn = Attention(gb_tau)
def _init_G_emb(self, n_genes, rep_dim):
embedder = torch.empty(n_genes, rep_dim)
nn.init.xavier_normal_(embedder)
return nn.Parameter(embedder).to(self.device)
def forward(self, raw_Z, gen_Z, return_attn=False):
Z = raw_Z.T
key = self.key_Z(Z)
query = self.key_G(self.G_rep)
X_hat_means, p_attn = self.attn(query, key, gen_Z.T, device=self.device)
if return_attn:
return X_hat_means.T, p_attn.T
return X_hat_means.T
class Attention(nn.Module):
"""Compute 'Scaled Dot Product Attention'.
"""
def __init__(self, gb_tau=1.0):
super(Attention, self).__init__()
self.gb_tau = gb_tau
def forward(self, query, key, value, device=None):
"""
Args:
query (torch.FloatTensor): query vectors identifying the gene profiles to be reconstructed.
key (torch.FloatTensor): key vectors identifying the latent profiles to be attended to.
value (torch.FloatTensor): Z.
device (torch.device): torch device object.
Returns:
FloatTensor: shape (n_genes, n_cells), reconstructed input
FloatTensor: shape (n_genes, z_dim), gene by attention distribution matrix
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / np.sqrt(d_k)
p_attn = F.gumbel_softmax(scores, tau=self.gb_tau, hard=False, dim=-1)
return torch.matmul(p_attn, value), p_attn
class AblationEncoder(nn.Module):
"""Encoder for Ablation Study
"""
def __init__(self,
in_dim,
z_dim,
h_dim=256,
batchnorm=False
):
super(AblationEncoder, self).__init__()
if batchnorm:
self.fn = nn.Sequential(
nn.Linear(in_dim, h_dim, bias=True),
nn.LayerNorm(h_dim),
nn.GELU(),
nn.Linear(h_dim, z_dim, bias=True),
nn.LayerNorm(z_dim),
nn.GELU(),
)
else:
self.fn = nn.Sequential(
nn.Linear(in_dim, h_dim, bias=True),
nn.GELU(),
nn.Linear(h_dim, z_dim, bias=True),
nn.GELU(),
)
def forward(self, x):
z = self.fn(x)
return z
class AblationDecoder(nn.Module):
"""Decoder for Ablation Study.
"""
def __init__(self,
z_dim,
out_dim,
h_dim=256
):
super(AblationDecoder, self).__init__()
"""
"""
self.fc = nn.Sequential(
nn.Linear(z_dim, out_dim),
)
def forward(self, Z):
return self.fc(Z)
class AblationCohAgg(nn.Module):
"""Ablation with only Cohort Aggregation.
"""
def __init__(self,
edge_index,
edge_weight,
in_dim,
z_dim,
h_dim=256,
gnn_layer=CANO_NAME_GAT,
device=None
):
"""
Args:
edge_index (LongTensor): shape (2, ?), edge indices
edge_weight (FloatTensor): shape (?), edge weights.
in_dim (int): dimensionality of the input
z_dim (int): dimensionality of the low-dimensional space
h_dim (int): dimensionality of intermediate layers in MLP
gnn_layer (str): GNN layers in {'GAT', 'GCN'}
device (torch.device): torch device object.
"""
super(AblationCohAgg, self).__init__()
self.device = device
self.encoder = Encoder(in_dim, z_dim, edge_index, edge_weight, h_dim=h_dim, gnn_layer=gnn_layer)
self.decoder = AblationDecoder(z_dim, in_dim, h_dim)
self.criterion = nn.MSELoss(reduction='mean')
def forward(self, X):
z = self.encoder(X)
X_hat = self.decoder(z)
return self.criterion(X_hat, X)
class AblationAttComb(nn.Module):
"""Ablation with only Attentive Combination.
"""
def __init__(self,
z_dim,
n_genes,
n_cells,
h_dim=256,
k_dim=100,
G_rep=None,
g_rep_dim=None,
gb_tau=1.0,
batchnorm=False,
device=None
):
"""
Args:
in_dim (int): dimensionality of the input
z_dim (int): dimensionality of the low-dimensional space
n_genes (int): number of genes
n_cells (int): number of cells
h_dim (int): dimensionality of intermediate layers in MLP
k_dim (int): dimensionality of keys for attention computation
G_rep (np.ndarry): representation for genes, e.g. PCA over gene profiles.
g_rep_dim (int): dimensionality of gene representations.
# Either G_rep or (n_genes, g_rep_dim) should be provided.
# priority is given to G_rep.
gb_tau (float): temperature parameter for gumbel softmax,
device (torch.device): torch device object.
"""
super(AblationAttComb, self).__init__()
self.device = device
self.encoder = AblationEncoder(n_genes, z_dim, h_dim=h_dim, batchnorm=batchnorm)
self.trans_z = nn.Linear(z_dim, z_dim, bias=True)
self.decoder = Decoder(n_cells, G_rep, n_genes, g_rep_dim, k_dim, h_dim, gb_tau, device)
self.criterion = nn.MSELoss(reduction='mean')
def forward(self, X):
z = self.encoder(X)
gen_z = self.trans_z(z)
X_hat = self.decoder(z, gen_z, False)
return self.criterion(X_hat, X)
class VeloAutoencoder(nn.Module):
"""Proposed VeloAutoencoder with both mechanisms.
"""
def __init__(self,
edge_index,
edge_weight,
in_dim,
z_dim,
n_genes,
n_cells,
h_dim=256,
k_dim=32,
G_rep=None,
g_rep_dim=None,
gb_tau=1.0,
gnn_layer=CANO_NAME_GAT,
device=None
):
"""
Args:
edge_index (LongTensor): shape (2, ?), edge indices
edge_weight (FloatTensor): shape (?), edge weights.
in_dim (int): dimensionality of the input
z_dim (int): dimensionality of the low-dimensional space
n_genes (int): number of genes
n_cells (int): number of cells
h_dim (int): dimensionality of intermediate layers in MLP
k_dim (int): dimensionality of keys for attention computation
G_rep (np.ndarry): representation for genes, e.g. PCA over gene profiles.
g_rep_dim (int): dimensionality of gene representations.
# Either G_rep or (n_genes, g_rep_dim) should be provided.
# priority is given to G_rep.
gb_tau (float): temperature parameter for gumbel softmax,
gnn_layer (str): GNN layers in {'GAT', 'GCN'}
device (torch.device): torch device object.
"""
super(VeloAutoencoder, self).__init__()
self.device = device
self.encoder = Encoder(in_dim, z_dim, edge_index, edge_weight, h_dim=h_dim, gnn_layer=gnn_layer)
self.decoder = Decoder(n_cells, G_rep, n_genes, g_rep_dim, k_dim, h_dim, gb_tau, device)
self.criterion = nn.MSELoss(reduction='mean')
def forward(self, X):
gen_z, raw_z = self.encoder(X, True)
X_hat = self.decoder(raw_z, gen_z, False)
return self.criterion(X_hat, X)
def get_mask_pt(x, y=None, perc=[5, 95], device=None):
"""Mask for matrix elements selected for regression
(adapt from scVelo)
Args:
x (Tensor): Splicing counts projection
y (Tensor): Unsplicing counts projection
perc (int): percentile
device (torch.device): GPU/CPU device object
return:
mask (Tensor): bool matrix
"""
with torch.no_grad():
xy_norm = torch.clone(x).float()
if y is not None:
y = torch.clone(y).float()
xy_norm = xy_norm / torch.clip(torch.max(xy_norm, axis=0).values - torch.min(xy_norm, axis=0).values, 1e-3, None)
xy_norm += y / torch.clip(torch.max(y, axis=0).values - torch.min(y, axis=0).values, 1e-3, None)
if isinstance(perc, int):
mask = xy_norm >= torch.nanquantile(xy_norm, perc/100, dim=0)
else:
lb, ub = torch.nanquantile(xy_norm, torch.Tensor(perc).to(device)/100, dim=0, keepdim=True)
mask = (xy_norm <= lb) | (xy_norm >= ub)
return mask
def prod_sum_obs_pt(A, B):
"""dot product and sum over axis 0 (obs) equivalent to np.sum(A * B, 0)"""
return torch.einsum("ij, ij -> j", A, B) if A.ndim > 1 else (A * B).sum()
def sum_obs_pt(A):
"""summation over axis 0 (obs) equivalent to np.sum(A, 0)"""
return torch.einsum("ij -> j", A) if A.ndim > 1 else torch.sum(A)
def leastsq_pt(x, y, fit_offset=True, constraint_positive_offset=False,
perc=None, device=None, norm=False):
"""Solves least squares X*b=Y for b. (adatpt from scVelo)
Args:
x (Tensor): low-dim splicing projection
y (Tensor): low-dim unsplicing projection
fit_offset (bool): whether fit offset
constraint_positive_offset (bool): whether to make non-negative offset
perc (int or list of int): percentile threshold for points in regression
device (torch.device): GPU/CPU device object
norm (bool): whether or not to normalize x, y
returns:
fitted offset, gamma and MSE losses
"""
"""Solves least squares X*b=Y for b."""
if norm:
x = (x - torch.mean(x, dim=0, keepdim=True)) / torch.std(x, dim=0, keepdim=True)
y = (y - torch.mean(y, dim=0, keepdim=True)) / torch.std(y, dim=0, keepdim=True)
x = torch.clamp(x, -1.96, 1.96)
y = torch.clamp(y, -1.96, 1.96)
if perc is not None:
if not fit_offset:
perc = perc[1]
weights = get_mask_pt(x, y, perc=perc, device=device)
x, y = x * weights, y * weights
else:
weights = None
xx_ = prod_sum_obs_pt(x, x)
xy_ = prod_sum_obs_pt(x, y)
n_obs = x.shape[0] if weights is None else sum_obs_pt(weights)
if fit_offset:
x_ = sum_obs_pt(x) / n_obs
y_ = sum_obs_pt(y) / n_obs
gamma = (xy_ / n_obs - x_ * y_) / (xx_ / n_obs - x_ ** 2)
offset = y_ - gamma * x_
# fix negative offsets:
if constraint_positive_offset:
idx = offset < 0
if gamma.ndim > 0:
gamma = (xy_ / xx_) * idx + gamma * ~idx
else:
gamma = xy_ / xx_
offset = torch.clip(offset, 0, None)
else:
gamma = xy_ / xx_
offset = torch.zeros(x.shape[1]).to(device) if x.ndim > 1 else 0
nans_offset, nans_gamma = torch.isnan(offset), torch.isnan(gamma)
if torch.any(nans_offset) or torch.any(nans_gamma):
version_1_8 = sum([int(this) >= that for this,that in zip(torch.__version__.split('.')[:2], [1, 8])]) == 2
if version_1_8:
offset = torch.nan_to_num(offset)
gamma = torch.nan_to_num(gamma)
else:
offset = torch.where(nans_offset, torch.zeros_like(offset), offset)
gamma = torch.where(nans_gamma, torch.zeros_like(gamma), gamma)
loss = torch.square(y - x * gamma.view(1,-1) - offset)
if perc is not None:
loss = loss * weights
loss = sum_obs_pt(loss) / n_obs
return offset, gamma, loss
| 36.197849
| 126
| 0.534042
|
33c9f50c08aa6893a0b704b491b689c60fe7a66b
| 683
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
jcarmonas/recipe-app-api
|
bab09e1931ec48bf3b648ac3ab27a1b4d2f37560
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
jcarmonas/recipe-app-api
|
bab09e1931ec48bf3b648ac3ab27a1b4d2f37560
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
jcarmonas/recipe-app-api
|
bab09e1931ec48bf3b648ac3ab27a1b4d2f37560
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-05-08 21:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333
| 118
| 0.616398
|
7d78e9c826958b82e51ec09c9be5e2edea09c25e
| 3,339
|
py
|
Python
|
recovery_data_extraction_5pfp.py
|
pdn4kd/isochoric-expander
|
56bfdc3c7efb3a242ff4ae4c556d70bb7f171e5f
|
[
"MIT"
] | null | null | null |
recovery_data_extraction_5pfp.py
|
pdn4kd/isochoric-expander
|
56bfdc3c7efb3a242ff4ae4c556d70bb7f171e5f
|
[
"MIT"
] | null | null | null |
recovery_data_extraction_5pfp.py
|
pdn4kd/isochoric-expander
|
56bfdc3c7efb3a242ff4ae4c556d70bb7f171e5f
|
[
"MIT"
] | null | null | null |
'''Converting all those output CSV files into a single large CSV in an easier to deal with format'''
import numpy as np
import pandas as pd
import os.path
stars = np.genfromtxt("planets.csv", delimiter=',', names=True, dtype=("U23", "U9", int, float, float, float, float, float, float, float, float, float, float))
star_name = stars[0]["HIPnumber"]
file_postfix = "5p" # note: columns = 2+(planets*parameters). 5p and fp have 4 parameters, while ep and np have 3! ([5,e,f,n]n may have more like 5+(remaining_planets*parameters), and given the way planets are truncated can not be easily matched up)
maxplanets = 10
planetfits_results = open("planetfits_results"+file_postfix+".csv", 'w')
planetfits_results.write("Star,num,PlanetNumber,per_min_"+file_postfix+",per_mid_"+file_postfix+",per_max_"+file_postfix+",per_err_minus_"+file_postfix+",per_err_plus_"+file_postfix+",tc_min_"+file_postfix+",tc_mid_"+file_postfix+",tc_max_"+file_postfix+",tc_err_minus_"+file_postfix+",tc_err_plus_"+file_postfix+",e_min_"+file_postfix+",e_mid_"+file_postfix+",e_max_"+file_postfix+",e_err_minus_"+file_postfix+",e_err_plus_"+file_postfix+",K_min_"+file_postfix+",K_mid_"+file_postfix+",K_max_"+file_postfix+",K_err_minus_"+file_postfix+",K_err_plus\n")
for i in np.arange(1,len(stars)):
if (star_name != stars[i]["HIPnumber"]):
if (stars[i-1]["PlanetNumber"] <= maxplanets):
if (os.path.isfile(star_name+file_postfix+"/"+star_name+file_postfix+"_post_summary.csv")):
planets_pd = pd.read_csv(star_name+file_postfix+"/"+star_name+file_postfix+"_post_summary.csv")
planets_columns = planets_pd.columns
#planetfits_results.write(star_name)
for x in np.arange(1, len(planets_columns)-2, 4):
planetfits_results.write(star_name+","+str(stars[i-1]["PlanetNumber"])+","+str(int((x+3)/4)))
planetfits_results.write(","+str(planets_pd[planets_columns[x]][0])+","+str(planets_pd[planets_columns[x]][1])+","+str(planets_pd[planets_columns[x]][2])) # per
planetfits_results.write(","+str(planets_pd[planets_columns[x]][1]-planets_pd[planets_columns[x]][0])+","+str(planets_pd[planets_columns[x]][2]-planets_pd[planets_columns[x]][1]))
planetfits_results.write(","+str(planets_pd[planets_columns[x+1]][0])+","+str(planets_pd[planets_columns[x+1]][1])+","+str(planets_pd[planets_columns[x+1]][2])) # tc
planetfits_results.write(","+str(planets_pd[planets_columns[x+1]][1]-planets_pd[planets_columns[x+1]][0])+","+str(planets_pd[planets_columns[x+1]][2]-planets_pd[planets_columns[x+1]][1]))
planetfits_results.write(","+str(planets_pd[planets_columns[x+2]][0])+","+str(planets_pd[planets_columns[x+2]][1])+","+str(planets_pd[planets_columns[x+2]][2])) # e
planetfits_results.write(","+str(planets_pd[planets_columns[x+2]][1]-planets_pd[planets_columns[x+2]][0])+","+str(planets_pd[planets_columns[x+2]][2]-planets_pd[planets_columns[x+2]][1]))
planetfits_results.write(","+str(planets_pd[planets_columns[x+3]][0])+","+str(planets_pd[planets_columns[x+3]][1])+","+str(planets_pd[planets_columns[x+3]][2])) # k
planetfits_results.write(","+str(planets_pd[planets_columns[x+3]][1]-planets_pd[planets_columns[x+3]][0])+","+str(planets_pd[planets_columns[x+3]][2]-planets_pd[planets_columns[x+3]][1]))
planetfits_results.write("\n")
star_name = stars[i]["HIPnumber"]
planetfits_results.close()
| 101.181818
| 553
| 0.732555
|
4da21fca75ac54cfc0339435584462722fe9aad7
| 7,896
|
py
|
Python
|
servicecatalog_puppet/config.py
|
fuellbie/aws-service-catalog-puppet
|
8a57d0f83623295214b7b6a498534c80de084c29
|
[
"Apache-2.0"
] | null | null | null |
servicecatalog_puppet/config.py
|
fuellbie/aws-service-catalog-puppet
|
8a57d0f83623295214b7b6a498534c80de084c29
|
[
"Apache-2.0"
] | null | null | null |
servicecatalog_puppet/config.py
|
fuellbie/aws-service-catalog-puppet
|
8a57d0f83623295214b7b6a498534c80de084c29
|
[
"Apache-2.0"
] | null | null | null |
import functools
import os
import pkg_resources
import yaml
from betterboto import client as betterboto_client
from jinja2 import Environment, FileSystemLoader
from servicecatalog_puppet import asset_helpers
from servicecatalog_puppet import constants
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@functools.lru_cache(maxsize=32)
def get_config(puppet_account_id, default_region=None):
if os.path.exists("config.yaml"):
with open("config.yaml", "r") as f:
conf = yaml.safe_load(f.read())
return conf
logger.info("getting config, default_region: {}".format(default_region))
region = default_region if default_region else get_home_region(puppet_account_id)
with betterboto_client.CrossAccountClientContextManager(
"ssm",
get_puppet_role_arn(puppet_account_id),
f"{puppet_account_id}-{region}-{get_puppet_role_name()}",
region_name=region,
) as ssm:
response = ssm.get_parameter(Name=constants.CONFIG_PARAM_NAME)
return yaml.safe_load(response.get("Parameter").get("Value"))
@functools.lru_cache(maxsize=32)
def get_regions(puppet_account_id, default_region=None):
logger.info(f"getting {constants.CONFIG_REGIONS}, default_region: {default_region}")
return get_config(puppet_account_id, default_region).get(constants.CONFIG_REGIONS)
@functools.lru_cache(maxsize=32)
def get_should_use_sns(puppet_account_id, default_region=None):
logger.info(f"getting {constants.CONFIG_SHOULD_COLLECT_CLOUDFORMATION_EVENTS}, default_region: {default_region}")
return get_config(puppet_account_id, default_region).get(
constants.CONFIG_SHOULD_COLLECT_CLOUDFORMATION_EVENTS, True
)
@functools.lru_cache(maxsize=32)
def is_caching_enabled(puppet_account_id, default_region=None):
logger.info(
"getting is_caching_enabled, default_region: {}".format(default_region)
)
if os.getenv(constants.CONFIG_IS_CACHING_ENABLED) is None:
caching_enabled = get_config(puppet_account_id, default_region).get(
"is_caching_enabled", False
)
os.putenv(constants.CONFIG_IS_CACHING_ENABLED, f"{caching_enabled}".lower())
else:
caching_enabled = (
os.getenv(constants.CONFIG_IS_CACHING_ENABLED).lower() == "true"
)
return caching_enabled
@functools.lru_cache(maxsize=32)
def get_should_use_eventbridge(puppet_account_id, default_region=None):
logger.info(
"getting should_use_eventbridge, default_region: {}".format(default_region)
)
return get_config(puppet_account_id, default_region).get(
"should_forward_events_to_eventbridge", False
)
@functools.lru_cache(maxsize=32)
def get_should_forward_failures_to_opscenter(puppet_account_id, default_region=None):
logger.info(
"getting should_forward_failures_to_opscenter, default_region: {}".format(
default_region
)
)
return get_config(puppet_account_id, default_region).get(
"should_forward_failures_to_opscenter", False
)
@functools.lru_cache(maxsize=32)
def get_should_use_product_plans(puppet_account_id, default_region=None):
logger.info(
"getting should_use_product_plans, default_region: {}".format(default_region)
)
return get_config(puppet_account_id, default_region).get(
"should_use_product_plans", True
)
@functools.lru_cache(maxsize=32)
def get_should_use_shared_scheduler(puppet_account_id, default_region=None):
logger.info(
f"getting {constants.CONFIG_SHOULD_USE_SHARED_SCHEDULER}, default_region: {default_region}"
)
return get_config(puppet_account_id, default_region).get(
constants.CONFIG_SHOULD_USE_SHARED_SCHEDULER, False
)
@functools.lru_cache(maxsize=32)
def get_should_explode_manifest(puppet_account_id, default_region=None):
logger.info(
f"getting {constants.CONFIG_SHOULD_EXPLODE_MANIFEST}, default_region: {default_region}"
)
return get_config(puppet_account_id, default_region).get(
constants.CONFIG_SHOULD_EXPLODE_MANIFEST, False
)
@functools.lru_cache(maxsize=32)
def get_global_sharing_mode_default(puppet_account_id, default_region=None):
logger.info(
"getting global_sharing_mode_default, default_region: {}".format(
default_region
)
)
return get_config(puppet_account_id, default_region).get(
"global_sharing_mode_default", constants.SHARING_MODE_DEFAULT
)
@functools.lru_cache()
def get_partition():
logger.info(f"getting partition")
return os.getenv(
constants.PARTITION_ENVIRONMENTAL_VARIABLE_NAME, constants.PARTITION_DEFAULT
)
@functools.lru_cache()
def get_puppet_role_name():
logger.info("getting puppet_role_name")
return os.getenv(
constants.PUPPET_ROLE_NAME_ENVIRONMENTAL_VARIABLE_NAME,
constants.PUPPET_ROLE_NAME_DEFAULT,
)
@functools.lru_cache()
def get_puppet_role_path():
logger.info("getting puppet_role_path")
return os.getenv(
constants.PUPPET_ROLE_PATH_ENVIRONMENTAL_VARIABLE_NAME,
constants.PUPPET_ROLE_PATH_DEFAULT,
)
@functools.lru_cache()
def get_puppet_role_arn(puppet_account_id):
logger.info("getting puppet_role_arn")
return f"arn:{get_partition()}:iam::{puppet_account_id}:role{get_puppet_role_path()}{get_puppet_role_name()}"
@functools.lru_cache(maxsize=32)
def get_local_config(what):
if os.path.exists("config.yaml"):
with open("config.yaml", "r") as f:
conf = yaml.safe_load(f.read())
return conf.get(what, None)
else:
return None
@functools.lru_cache()
def get_home_region(puppet_account_id):
if get_local_config("home_region"):
return get_local_config("home_region")
with betterboto_client.CrossAccountClientContextManager(
"ssm",
get_puppet_role_arn(puppet_account_id),
f"{puppet_account_id}-{get_puppet_role_name()}",
) as ssm:
response = ssm.get_parameter(Name=constants.HOME_REGION_PARAM_NAME)
return response.get("Parameter").get("Value")
@functools.lru_cache(maxsize=32)
def get_org_iam_role_arn(puppet_account_id):
with betterboto_client.CrossAccountClientContextManager(
"ssm",
get_puppet_role_arn(puppet_account_id),
f"{puppet_account_id}-{get_puppet_role_name()}",
region_name=get_home_region(puppet_account_id),
) as ssm:
try:
response = ssm.get_parameter(
Name=constants.CONFIG_PARAM_NAME_ORG_IAM_ROLE_ARN
)
return response.get("Parameter").get("Value")
except ssm.exceptions.ParameterNotFound:
logger.info("No org role set")
return None
template_dir = asset_helpers.resolve_from_site_packages("templates")
env = Environment(loader=FileSystemLoader(template_dir), extensions=["jinja2.ext.do"],)
@functools.lru_cache(maxsize=32)
def get_puppet_account_id():
with betterboto_client.ClientContextManager("sts") as sts:
return sts.get_caller_identity().get("Account")
@functools.lru_cache(maxsize=32)
def get_current_account_id():
with betterboto_client.ClientContextManager("sts") as sts:
return sts.get_caller_identity().get("Account")
# TODO - not used?
def get_ssm_config_for_parameter(account_ssm_param, required_parameter_name):
if account_ssm_param.get("region") is not None:
return {
"name": account_ssm_param.get("name"),
"region": account_ssm_param.get("region"),
"parameter_name": required_parameter_name,
}
else:
return {
"name": account_ssm_param.get("name"),
"parameter_name": required_parameter_name,
}
def get_puppet_version():
return pkg_resources.get_distribution("aws-service-catalog-puppet").version
| 33.176471
| 118
| 0.729863
|
80b5a0e8684bc24e88f8da84e34e7cea082f65b2
| 1,741
|
py
|
Python
|
Scripts/pcap_parse.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/pcap_parse.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
Scripts/pcap_parse.py
|
Team-Zed-cf/Team-Zed
|
662eee2948502fca0bdc477955db17e2d32f92aa
|
[
"MIT"
] | null | null | null |
#!usr/bin/python3
# Script: 401 Final Project Script
# Author: Courtney Hans
# Date of latest revision: 12/13/2020
# Purpose: pcap parse
# defines parameters for what information is deemed
# "interesting" to aid efficient pcap analysis
import argparse, os, sys
from scapy.utils import RawPcapReader
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, TCP
# parameters can be altered depending upon what you're looking for
def process_pcap(filename):
print('Opening {}...'.format(filename))
count = 0
interesting_pkt_count = 0
for (pkt_data, pkt_metadata,) in RawPcapReader(filename):
count += 1
ether_pkt = Ether(pkt_data)
if 'type' not in ether_pkt.fields:
continue
if ether_pkt.type != 0x0800: #ignore non-IPv4 packets
continue
ip_pckt = ether_pkt[IP]
if ip_pckt.proto != 6: # ignore non-TCP packets
continue
interesting_pkt_count += 1
print('{} contains {} packets ({} interesting)'.format(filename,count, interesting_pkt_count))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PCAP reader')
parser.add_argument('--pcap', metavar='<pcap file name>', help='pcap file to parse', required=True)
args = parser.parse_args()
filename = args.pcap
if not os.path.isfile(filename):
print('"{}" does not exist'.format(filename), file=sys.stderr)
sys.exit(-1)
process_pcap(filename)
sys.exit(0)
#resource: https://vnetman.github.io/pcap/python/pyshark/scapy/libpcap/2018/10/25/analyzing-packet-captures-with-python-part-1.html
| 31.654545
| 135
| 0.643308
|
1525b4f496b12718b43def195216766392a238e0
| 36,300
|
py
|
Python
|
tst/daemon/server.py
|
TST-Group-BE/flax-blockchain
|
ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5
|
[
"Apache-2.0"
] | null | null | null |
tst/daemon/server.py
|
TST-Group-BE/flax-blockchain
|
ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5
|
[
"Apache-2.0"
] | null | null | null |
tst/daemon/server.py
|
TST-Group-BE/flax-blockchain
|
ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, TextIO, Tuple, cast
from websockets import ConnectionClosedOK, WebSocketException, WebSocketServerProtocol, serve
from tst.cmds.init_funcs import tst_init
from tst.daemon.windows_signal import kill
from tst.server.server import ssl_context_for_root, ssl_context_for_server
from tst.ssl.create_ssl import get_mozzila_ca_crt
from tst.util.tst_logging import initialize_logging
from tst.util.config import load_config
from tst.util.json_util import dict_to_json_str
from tst.util.path import mkdir
from tst.util.service_groups import validate_service
from tst.util.setproctitle import setproctitle
from tst.util.ws_message import WsRpcMessage, create_payload, format_response
io_pool_exc = ThreadPoolExecutor()
try:
from aiohttp import ClientSession, web
except ModuleNotFoundError:
print("Error: Make sure to run . ./activate from the project folder before starting Tst.")
quit()
try:
import fcntl
has_fcntl = True
except ImportError:
has_fcntl = False
log = logging.getLogger(__name__)
service_plotter = "tst plots create"
async def fetch(url: str):
async with ClientSession() as session:
try:
mozzila_root = get_mozzila_ca_crt()
ssl_context = ssl_context_for_root(mozzila_root)
response = await session.get(url, ssl=ssl_context)
if not response.ok:
log.warning("Response not OK.")
return None
return await response.text()
except Exception as e:
log.error(f"Exception while fetching {url}, exception: {e}")
return None
class PlotState(str, Enum):
SUBMITTED = "SUBMITTED"
RUNNING = "RUNNING"
REMOVING = "REMOVING"
FINISHED = "FINISHED"
class PlotEvent(str, Enum):
LOG_CHANGED = "log_changed"
STATE_CHANGED = "state_changed"
# determine if application is a script file or frozen exe
if getattr(sys, "frozen", False):
name_map = {
"tst": "tst",
"tst_wallet": "start_wallet",
"tst_full_node": "start_full_node",
"tst_harvester": "start_harvester",
"tst_farmer": "start_farmer",
"tst_introducer": "start_introducer",
"tst_timelord": "start_timelord",
"tst_timelord_launcher": "timelord_launcher",
"tst_full_node_simulator": "start_simulator",
}
def executable_for_service(service_name: str) -> str:
application_path = os.path.dirname(sys.executable)
if sys.platform == "win32" or sys.platform == "cygwin":
executable = name_map[service_name]
path = f"{application_path}/{executable}.exe"
return path
else:
path = f"{application_path}/{name_map[service_name]}"
return path
else:
application_path = os.path.dirname(__file__)
def executable_for_service(service_name: str) -> str:
return service_name
async def ping() -> Dict[str, Any]:
response = {"success": True, "value": "pong"}
return response
class WebSocketServer:
def __init__(self, root_path: Path, ca_crt_path: Path, ca_key_path: Path, crt_path: Path, key_path: Path):
self.root_path = root_path
self.log = log
self.services: Dict = dict()
self.plots_queue: List[Dict] = []
self.connections: Dict[str, List[WebSocketServerProtocol]] = dict() # service_name : [WebSocket]
self.remote_address_map: Dict[WebSocketServerProtocol, str] = dict() # socket: service_name
self.ping_job: Optional[asyncio.Task] = None
self.net_config = load_config(root_path, "config.yaml")
self.self_hostname = self.net_config["self_hostname"]
self.daemon_port = self.net_config["daemon_port"]
self.websocket_server = None
self.ssl_context = ssl_context_for_server(ca_crt_path, ca_key_path, crt_path, key_path)
self.shut_down = False
async def start(self):
self.log.info("Starting Daemon Server")
def master_close_cb():
asyncio.create_task(self.stop())
try:
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb)
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb)
except NotImplementedError:
self.log.info("Not implemented")
self.websocket_server = await serve(
self.safe_handle,
self.self_hostname,
self.daemon_port,
max_size=50 * 1000 * 1000,
ping_interval=500,
ping_timeout=300,
ssl=self.ssl_context,
)
self.log.info("Waiting Daemon WebSocketServer closure")
def cancel_task_safe(self, task: Optional[asyncio.Task]):
if task is not None:
try:
task.cancel()
except Exception as e:
self.log.error(f"Error while canceling task.{e} {task}")
async def stop(self) -> Dict[str, Any]:
self.shut_down = True
self.cancel_task_safe(self.ping_job)
await self.exit()
if self.websocket_server is not None:
self.websocket_server.close()
return {"success": True}
async def safe_handle(self, websocket: WebSocketServerProtocol, path: str):
service_name = ""
try:
async for message in websocket:
try:
decoded = json.loads(message)
if "data" not in decoded:
decoded["data"] = {}
response, sockets_to_use = await self.handle_message(websocket, decoded)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error while handling message: {tb}")
error = {"success": False, "error": f"{e}"}
response = format_response(decoded, error)
sockets_to_use = []
if len(sockets_to_use) > 0:
for socket in sockets_to_use:
try:
await socket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
self.remove_connection(socket)
await socket.close()
except Exception as e:
tb = traceback.format_exc()
service_name = "Unknown"
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
if isinstance(e, ConnectionClosedOK):
self.log.info(f"ConnectionClosedOk. Closing websocket with {service_name} {e}")
elif isinstance(e, WebSocketException):
self.log.info(f"Websocket exception. Closing websocket with {service_name} {e} {tb}")
else:
self.log.error(f"Unexpected exception in websocket: {e} {tb}")
finally:
self.remove_connection(websocket)
await websocket.close()
def remove_connection(self, websocket: WebSocketServerProtocol):
service_name = None
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
self.remote_address_map.pop(websocket)
if service_name in self.connections:
after_removal = []
for connection in self.connections[service_name]:
if connection == websocket:
continue
else:
after_removal.append(connection)
self.connections[service_name] = after_removal
async def ping_task(self) -> None:
restart = True
await asyncio.sleep(30)
for remote_address, service_name in self.remote_address_map.items():
if service_name in self.connections:
sockets = self.connections[service_name]
for socket in sockets:
if socket.remote_address[1] == remote_address:
try:
self.log.info(f"About to ping: {service_name}")
await socket.ping()
except asyncio.CancelledError:
self.log.info("Ping task received Cancel")
restart = False
break
except Exception as e:
self.log.info(f"Ping error: {e}")
self.log.warning("Ping failed, connection closed.")
self.remove_connection(socket)
await socket.close()
if restart is True:
self.ping_job = asyncio.create_task(self.ping_task())
async def handle_message(
self, websocket: WebSocketServerProtocol, message: WsRpcMessage
) -> Tuple[Optional[str], List[Any]]:
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
destination = message["destination"]
if destination != "daemon":
destination = message["destination"]
if destination in self.connections:
sockets = self.connections[destination]
return dict_to_json_str(message), sockets
return None, []
data = message["data"]
commands_with_data = [
"start_service",
"start_plotting",
"stop_plotting",
"stop_service",
"is_running",
"register_service",
]
if len(data) == 0 and command in commands_with_data:
response = {"success": False, "error": f'{command} requires "data"'}
elif command == "ping":
response = await ping()
elif command == "start_service":
response = await self.start_service(cast(Dict[str, Any], data))
elif command == "start_plotting":
response = await self.start_plotting(cast(Dict[str, Any], data))
elif command == "stop_plotting":
response = await self.stop_plotting(cast(Dict[str, Any], data))
elif command == "stop_service":
response = await self.stop_service(cast(Dict[str, Any], data))
elif command == "is_running":
response = await self.is_running(cast(Dict[str, Any], data))
elif command == "exit":
response = await self.stop()
elif command == "register_service":
response = await self.register_service(websocket, cast(Dict[str, Any], data))
elif command == "get_status":
response = self.get_status()
else:
self.log.error(f"UK>> {message}")
response = {"success": False, "error": f"unknown_command {command}"}
full_response = format_response(message, response)
return full_response, [websocket]
def get_status(self) -> Dict[str, Any]:
response = {"success": True, "genesis_initialized": True}
return response
def plot_queue_to_payload(self, plot_queue_item, send_full_log: bool) -> Dict[str, Any]:
error = plot_queue_item.get("error")
has_error = error is not None
item = {
"id": plot_queue_item["id"],
"queue": plot_queue_item["queue"],
"size": plot_queue_item["size"],
"parallel": plot_queue_item["parallel"],
"delay": plot_queue_item["delay"],
"state": plot_queue_item["state"],
"error": str(error) if has_error else None,
"deleted": plot_queue_item["deleted"],
"log_new": plot_queue_item.get("log_new"),
}
if send_full_log:
item["log"] = plot_queue_item.get("log")
return item
def prepare_plot_state_message(self, state: PlotEvent, id):
message = {
"state": state,
"queue": self.extract_plot_queue(id),
}
return message
def extract_plot_queue(self, id=None) -> List[Dict]:
send_full_log = id is None
data = []
for item in self.plots_queue:
if id is None or item["id"] == id:
data.append(self.plot_queue_to_payload(item, send_full_log))
return data
async def _state_changed(self, service: str, message: Dict[str, Any]):
"""If id is None, send the whole state queue"""
if service not in self.connections:
return None
websockets = self.connections[service]
if message is None:
return None
response = create_payload("state_changed", message, service, "wallet_ui")
for websocket in websockets:
try:
await websocket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
websockets.remove(websocket)
await websocket.close()
def state_changed(self, service: str, message: Dict[str, Any]):
asyncio.create_task(self._state_changed(service, message))
async def _watch_file_changes(self, config, fp: TextIO, loop: asyncio.AbstractEventLoop):
id = config["id"]
final_words = ["Renamed final file"]
while True:
new_data = await loop.run_in_executor(io_pool_exc, fp.readline)
if config["state"] is not PlotState.RUNNING:
return None
if new_data not in (None, ""):
config["log"] = new_data if config["log"] is None else config["log"] + new_data
config["log_new"] = new_data
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.LOG_CHANGED, id))
if new_data:
for word in final_words:
if word in new_data:
return None
else:
time.sleep(0.5)
async def _track_plotting_progress(self, config, loop: asyncio.AbstractEventLoop):
file_path = config["out_file"]
with open(file_path, "r") as fp:
await self._watch_file_changes(config, fp, loop)
def _build_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
service_name = request["service"]
k = request["k"]
n = 1 if ignoreCount else request["n"]
t = request["t"]
t2 = request["t2"]
d = request["d"]
b = request["b"]
u = request["u"]
r = request["r"]
a = request.get("a")
e = request["e"]
x = request["x"]
override_k = request["overrideK"]
command_args: List[str] = []
command_args += service_name.split(" ")
command_args.append(f"-k{k}")
command_args.append(f"-n{n}")
command_args.append(f"-t{t}")
command_args.append(f"-2{t2}")
command_args.append(f"-d{d}")
command_args.append(f"-b{b}")
command_args.append(f"-u{u}")
command_args.append(f"-r{r}")
if a is not None:
command_args.append(f"-a{a}")
if e is True:
command_args.append("-e")
if x is True:
command_args.append("-x")
if override_k is True:
command_args.append("--override-k")
self.log.debug(f"command_args are {command_args}")
return command_args
def _is_serial_plotting_running(self, queue: str = "default") -> bool:
response = False
for item in self.plots_queue:
if item["queue"] == queue and item["parallel"] is False and item["state"] is PlotState.RUNNING:
response = True
return response
def _get_plots_queue_item(self, id: str):
config = next(item for item in self.plots_queue if item["id"] == id)
return config
def _run_next_serial_plotting(self, loop: asyncio.AbstractEventLoop, queue: str = "default"):
next_plot_id = None
if self._is_serial_plotting_running(queue) is True:
return None
for item in self.plots_queue:
if item["queue"] == queue and item["state"] is PlotState.SUBMITTED and item["parallel"] is False:
next_plot_id = item["id"]
if next_plot_id is not None:
loop.create_task(self._start_plotting(next_plot_id, loop, queue))
async def _start_plotting(self, id: str, loop: asyncio.AbstractEventLoop, queue: str = "default"):
current_process = None
try:
log.info(f"Starting plotting with ID {id}")
config = self._get_plots_queue_item(id)
if config is None:
raise Exception(f"Plot queue config with ID {id} does not exist")
state = config["state"]
if state is not PlotState.SUBMITTED:
raise Exception(f"Plot with ID {id} has no state submitted")
id = config["id"]
delay = config["delay"]
await asyncio.sleep(delay)
if config["state"] is not PlotState.SUBMITTED:
return None
service_name = config["service_name"]
command_args = config["command_args"]
self.log.debug(f"command_args before launch_plotter are {command_args}")
self.log.debug(f"self.root_path before launch_plotter is {self.root_path}")
process, pid_path = launch_plotter(self.root_path, service_name, command_args, id)
current_process = process
config["state"] = PlotState.RUNNING
config["out_file"] = plotter_log_path(self.root_path, id).absolute()
config["process"] = process
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
if service_name not in self.services:
self.services[service_name] = []
self.services[service_name].append(process)
await self._track_plotting_progress(config, loop)
config["state"] = PlotState.FINISHED
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}")
error = Exception("Start plotting failed")
config["state"] = PlotState.FINISHED
config["error"] = error
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
raise error
finally:
if current_process is not None:
self.services[service_name].remove(current_process)
current_process.wait() # prevent zombies
self._run_next_serial_plotting(loop, queue)
async def start_plotting(self, request: Dict[str, Any]):
service_name = request["service"]
delay = request.get("delay", 0)
parallel = request.get("parallel", False)
size = request.get("k")
count = request.get("n", 1)
queue = request.get("queue", "default")
for k in range(count):
id = str(uuid.uuid4())
config = {
"id": id,
"size": size,
"queue": queue,
"service_name": service_name,
"command_args": self._build_plotting_command_args(request, True),
"parallel": parallel,
"delay": delay * k if parallel is True else delay,
"state": PlotState.SUBMITTED,
"deleted": False,
"error": None,
"log": None,
"process": None,
}
self.plots_queue.append(config)
# notify GUI about new plot queue item
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
# only first item can start when user selected serial plotting
can_start_serial_plotting = k == 0 and self._is_serial_plotting_running(queue) is False
if parallel is True or can_start_serial_plotting:
log.info(f"Plotting will start in {config['delay']} seconds")
loop = asyncio.get_event_loop()
loop.create_task(self._start_plotting(id, loop, queue))
else:
log.info("Plotting will start automatically when previous plotting finish")
response = {
"success": True,
"service_name": service_name,
}
return response
async def stop_plotting(self, request: Dict[str, Any]) -> Dict[str, Any]:
id = request["id"]
config = self._get_plots_queue_item(id)
if config is None:
return {"success": False}
id = config["id"]
state = config["state"]
process = config["process"]
queue = config["queue"]
if config["state"] is PlotState.REMOVING:
return {"success": False}
try:
run_next = False
if process is not None and state == PlotState.RUNNING:
run_next = True
config["state"] = PlotState.REMOVING
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
await kill_process(process, self.root_path, service_plotter, id)
config["state"] = PlotState.FINISHED
config["deleted"] = True
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
self.plots_queue.remove(config)
if run_next:
loop = asyncio.get_event_loop()
self._run_next_serial_plotting(loop, queue)
return {"success": True}
except Exception as e:
log.error(f"Error during killing the plot process: {e}")
config["state"] = PlotState.FINISHED
config["error"] = str(e)
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
return {"success": False}
async def start_service(self, request: Dict[str, Any]):
service_command = request["service"]
error = None
success = False
testing = False
if "testing" in request:
testing = request["testing"]
if not validate_service(service_command):
error = "unknown service"
if service_command in self.services:
service = self.services[service_command]
r = service is not None and service.poll() is None
if r is False:
self.services.pop(service_command)
error = None
else:
error = f"Service {service_command} already running"
if error is None:
try:
exe_command = service_command
if testing is True:
exe_command = f"{service_command} --testing=true"
process, pid_path = launch_service(self.root_path, exe_command)
self.services[service_command] = process
success = True
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_command}")
error = "start failed"
response = {"success": success, "service": service_command, "error": error}
return response
async def stop_service(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
result = await kill_service(self.root_path, self.services, service_name)
response = {"success": result, "service_name": service_name}
return response
async def is_running(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
if service_name == service_plotter:
processes = self.services.get(service_name)
is_running = processes is not None and len(processes) > 0
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
else:
process = self.services.get(service_name)
is_running = process is not None and process.poll() is None
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
return response
async def exit(self) -> Dict[str, Any]:
jobs = []
for k in self.services.keys():
jobs.append(kill_service(self.root_path, self.services, k))
if jobs:
await asyncio.wait(jobs)
self.services.clear()
# TODO: fix this hack
asyncio.get_event_loop().call_later(5, lambda *args: sys.exit(0))
log.info("tst daemon exiting in 5 seconds")
response = {"success": True}
return response
async def register_service(self, websocket: WebSocketServerProtocol, request: Dict[str, Any]) -> Dict[str, Any]:
self.log.info(f"Register service {request}")
service = request["service"]
if service not in self.connections:
self.connections[service] = []
self.connections[service].append(websocket)
response: Dict[str, Any] = {"success": True}
if service == service_plotter:
response = {
"success": True,
"service": service,
"queue": self.extract_plot_queue(),
}
else:
self.remote_address_map[websocket] = service
if self.ping_job is None:
self.ping_job = asyncio.create_task(self.ping_task())
self.log.info(f"registered for service {service}")
log.info(f"{response}")
return response
def daemon_launch_lock_path(root_path: Path) -> Path:
"""
A path to a file that is lock when a daemon is launching but not yet started.
This prevents multiple instances from launching.
"""
return root_path / "run" / "start-daemon.launching"
def service_launch_lock_path(root_path: Path, service: str) -> Path:
"""
A path to a file that is lock when a service is running.
"""
service_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{service_name}.lock"
def pid_path_for_service(root_path: Path, service: str, id: str = "") -> Path:
"""
Generate a path for a PID file for the given service name.
"""
pid_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{pid_name}{id}.pid"
def plotter_log_path(root_path: Path, id: str):
return root_path / "plotter" / f"plotter_log_{id}.txt"
def launch_plotter(root_path: Path, service_name: str, service_array: List[str], id: str):
# we need to pass on the possibly altered TST_ROOT
os.environ["TST_ROOT"] = str(root_path)
service_executable = executable_for_service(service_array[0])
# Swap service name with name of executable
service_array[0] = service_executable
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
plotter_path = plotter_log_path(root_path, id)
if plotter_path.parent.exists():
if plotter_path.exists():
plotter_path.unlink()
else:
mkdir(plotter_path.parent)
outfile = open(plotter_path.resolve(), "w")
log.info(f"Service array: {service_array}")
process = subprocess.Popen(service_array, shell=False, stderr=outfile, stdout=outfile, startupinfo=startupinfo)
pid_path = pid_path_for_service(root_path, service_name, id)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]:
"""
Launch a child process.
"""
# set up TST_ROOT
# invoke correct script
# save away PID
# we need to pass on the possibly altered TST_ROOT
os.environ["TST_ROOT"] = str(root_path)
log.debug(f"Launching service with TST_ROOT: {os.environ['TST_ROOT']}")
lockfile = singleton(service_launch_lock_path(root_path, service_command))
if lockfile is None:
logging.error(f"{service_command}: already running")
raise subprocess.SubprocessError
# Insert proper e
service_array = service_command.split()
service_executable = executable_for_service(service_array[0])
service_array[0] = service_executable
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
# CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal
if sys.platform == "win32" or sys.platform == "cygwin":
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
creationflags = 0
environ_copy = os.environ.copy()
process = subprocess.Popen(
service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, env=environ_copy
)
pid_path = pid_path_for_service(root_path, service_command)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
async def kill_process(
process: subprocess.Popen, root_path: Path, service_name: str, id: str, delay_before_kill: int = 15
) -> bool:
pid_path = pid_path_for_service(root_path, service_name, id)
if sys.platform == "win32" or sys.platform == "cygwin":
log.info("sending CTRL_BREAK_EVENT signal to %s", service_name)
# pylint: disable=E1101
kill(process.pid, signal.SIGBREAK) # type: ignore
else:
log.info("sending term signal to %s", service_name)
process.terminate()
count: float = 0
while count < delay_before_kill:
if process.poll() is not None:
break
await asyncio.sleep(0.5)
count += 0.5
else:
process.kill()
log.info("sending kill signal to %s", service_name)
r = process.wait()
log.info("process %s returned %d", service_name, r)
try:
pid_path_killed = pid_path.with_suffix(".pid-killed")
if pid_path_killed.exists():
pid_path_killed.unlink()
os.rename(pid_path, pid_path_killed)
except Exception:
pass
return True
async def kill_service(
root_path: Path, services: Dict[str, subprocess.Popen], service_name: str, delay_before_kill: int = 15
) -> bool:
process = services.get(service_name)
if process is None:
return False
del services[service_name]
result = await kill_process(process, root_path, service_name, "", delay_before_kill)
return result
def is_running(services: Dict[str, subprocess.Popen], service_name: str) -> bool:
process = services.get(service_name)
return process is not None and process.poll() is None
def create_server_for_daemon(root_path: Path):
routes = web.RouteTableDef()
services: Dict = dict()
@routes.get("/daemon/ping/")
async def ping(request: web.Request) -> web.Response:
return web.Response(text="pong")
@routes.get("/daemon/service/start/")
async def start_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None or not validate_service(service_name):
r = f"{service_name} unknown service"
return web.Response(text=str(r))
if is_running(services, service_name):
r = f"{service_name} already running"
return web.Response(text=str(r))
try:
process, pid_path = launch_service(root_path, service_name)
services[service_name] = process
r = f"{service_name} started"
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}")
r = f"{service_name} start failed"
return web.Response(text=str(r))
@routes.get("/daemon/service/stop/")
async def stop_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(await kill_service(root_path, services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/service/is_running/")
async def is_running_handler(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(is_running(services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/exit/")
async def exit(request: web.Request):
jobs = []
for k in services.keys():
jobs.append(kill_service(root_path, services, k))
if jobs:
await asyncio.wait(jobs)
services.clear()
# we can't await `site.stop()` here because that will cause a deadlock, waiting for this
# request to exit
def singleton(lockfile: Path, text: str = "semaphore") -> Optional[TextIO]:
"""
Open a lockfile exclusively.
"""
if not lockfile.parent.exists():
mkdir(lockfile.parent)
try:
if has_fcntl:
f = open(lockfile, "w")
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
else:
if lockfile.exists():
lockfile.unlink()
fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
f = open(fd, "w")
f.write(text)
except IOError:
return None
return f
async def async_run_daemon(root_path: Path) -> int:
tst_init(root_path)
config = load_config(root_path, "config.yaml")
setproctitle("tst_daemon")
initialize_logging("daemon", config["logging"], root_path)
lockfile = singleton(daemon_launch_lock_path(root_path))
crt_path = root_path / config["daemon_ssl"]["private_crt"]
key_path = root_path / config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / config["private_ssl_ca"]["crt"]
ca_key_path = root_path / config["private_ssl_ca"]["key"]
sys.stdout.flush()
json_msg = dict_to_json_str(
{
"message": "cert_path",
"success": True,
"cert": f"{crt_path}",
"key": f"{key_path}",
"ca_crt": f"{ca_crt_path}",
}
)
sys.stdout.write("\n" + json_msg + "\n")
sys.stdout.flush()
if lockfile is None:
print("daemon: already launching")
return 2
# TODO: clean this up, ensuring lockfile isn't removed until the listen port is open
create_server_for_daemon(root_path)
ws_server = WebSocketServer(root_path, ca_crt_path, ca_key_path, crt_path, key_path)
await ws_server.start()
assert ws_server.websocket_server is not None
await ws_server.websocket_server.wait_closed()
log.info("Daemon WebSocketServer closed")
return 0
def run_daemon(root_path: Path) -> int:
return asyncio.get_event_loop().run_until_complete(async_run_daemon(root_path))
def main() -> int:
from tst.util.default_root import DEFAULT_ROOT_PATH
return run_daemon(DEFAULT_ROOT_PATH)
if __name__ == "__main__":
main()
| 36.445783
| 116
| 0.60876
|
f5fc1d22f71abd5e1350b92a722b612ad7250de2
| 19,849
|
py
|
Python
|
salt/client/mixins.py
|
olivier-mauras/salt
|
a6483b1b39705401fc6143a7abab13d046bd55de
|
[
"Apache-2.0"
] | null | null | null |
salt/client/mixins.py
|
olivier-mauras/salt
|
a6483b1b39705401fc6143a7abab13d046bd55de
|
[
"Apache-2.0"
] | null | null | null |
salt/client/mixins.py
|
olivier-mauras/salt
|
a6483b1b39705401fc6143a7abab13d046bd55de
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
'''
A collection of mixins useful for the various *Client interfaces
'''
# Import Python libs
from __future__ import absolute_import, print_function, with_statement
import fnmatch
import signal
import logging
import weakref
import traceback
import collections
import copy as pycopy
# Import Salt libs
import salt.exceptions
import salt.minion
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
import salt.utils.args
import salt.utils.doc
import salt.utils.error
import salt.utils.event
import salt.utils.jid
import salt.utils.job
import salt.utils.lazy
import salt.utils.platform
import salt.utils.process
import salt.utils.state
import salt.utils.versions
import salt.transport
import salt.log.setup
from salt.ext import six
# Import 3rd-party libs
import tornado.stack_context
log = logging.getLogger(__name__)
CLIENT_INTERNAL_KEYWORDS = frozenset([
u'client',
u'cmd',
u'eauth',
u'fun',
u'kwarg',
u'match',
u'token',
u'__jid__',
u'__tag__',
u'__user__',
u'username',
u'password'
])
class ClientFuncsDict(collections.MutableMapping):
'''
Class to make a read-only dict for accessing runner funcs "directly"
'''
def __init__(self, client):
self.client = client
def __getattr__(self, attr):
'''
Provide access eg. to 'pack'
'''
return getattr(self.client.functions, attr)
def __setitem__(self, key, val):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def __getitem__(self, key):
'''
Return a function that you can call with regular func params, but
will do all the _proc_function magic
'''
if key not in self.client.functions:
raise KeyError
def wrapper(*args, **kwargs):
low = {u'fun': key,
u'args': args,
u'kwargs': kwargs,
}
pub_data = {}
# Copy kwargs keys so we can iterate over and pop the pub data
kwargs_keys = list(kwargs)
# pull out pub_data if you have it
for kwargs_key in kwargs_keys:
if kwargs_key.startswith(u'__pub_'):
pub_data[kwargs_key] = kwargs.pop(kwargs_key)
async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid'))
user = salt.utils.get_specific_user()
return self.client._proc_function(
key,
low,
user,
async_pub[u'tag'], # TODO: fix
async_pub[u'jid'], # TODO: fix
False, # Don't daemonize
)
return wrapper
def __len__(self):
return len(self.client.functions)
def __iter__(self):
return iter(self.client.functions)
class SyncClientMixin(object):
'''
A mixin for *Client interfaces to abstract common function execution
'''
functions = ()
def functions_dict(self):
'''
Return a dict that will mimic the "functions" dict used all over salt.
It creates a wrapper around the function allowing **kwargs, and if pub_data
is passed in as kwargs, will re-use the JID passed in
'''
return ClientFuncsDict(self)
def master_call(self, **kwargs):
'''
Execute a function through the master network interface.
'''
load = kwargs
load[u'cmd'] = self.client
channel = salt.transport.Channel.factory(self.opts,
crypt=u'clear',
usage=u'master_call')
ret = channel.send(load)
if isinstance(ret, collections.Mapping):
if u'error' in ret:
salt.utils.error.raise_error(**ret[u'error'])
return ret
def cmd_sync(self, low, timeout=None, full_return=False):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=True)
job = self.master_call(**low)
ret_tag = salt.utils.event.tagify(u'ret', base=job[u'tag'])
if timeout is None:
timeout = self.opts.get(u'rest_timeout', 300)
ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True)
if ret is None:
raise salt.exceptions.SaltClientTimeout(
u"RunnerClient job '{0}' timed out".format(job[u'jid']),
jid=job[u'jid'])
return ret if full_return else ret[u'data'][u'return']
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
Execute a function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if arg is None:
arg = tuple()
if not isinstance(arg, list) and not isinstance(arg, tuple):
raise salt.exceptions.SaltInvocationError(
u'arg must be formatted as a list/tuple'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
u'pub_data must be formatted as a dictionary'
)
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
u'kwarg must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(
arg,
no_parse=self.opts.get(u'no_parse', []))
# if you were passed kwarg, add it to arglist
if kwarg:
kwarg[u'__kwarg__'] = True
arglist.append(kwarg)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
low = {u'fun': fun,
u'arg': args,
u'kwarg': kwargs}
return self.low(fun, low, print_event=print_event, full_return=full_return)
@property
def mminion(self):
if not hasattr(self, u'_mminion'):
self._mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False)
return self._mminion
def low(self, fun, low, print_event=True, full_return=False):
'''
Check for deprecated usage and allow until Salt Oxygen.
'''
msg = []
if u'args' in low:
msg.append(u'call with arg instead')
low[u'arg'] = low.pop(u'args')
if u'kwargs' in low:
msg.append(u'call with kwarg instead')
low[u'kwarg'] = low.pop(u'kwargs')
if msg:
salt.utils.versions.warn_until(u'Oxygen', u' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)
@property
def store_job(self):
'''
Helper that allows us to turn off storing jobs for different classes
that may incorporate this mixin.
'''
try:
class_name = self.__class__.__name__.lower()
except AttributeError:
log.warning(
u'Unable to determine class name',
exc_info_on_loglevel=logging.DEBUG
)
return True
try:
return self.opts[u'{0}_returns'.format(class_name)]
except KeyError:
# No such option, assume this isn't one we care about gating and
# just return True.
return True
def _low(self, fun, low, print_event=True, full_return=False):
'''
Execute a function from low data
Low data includes:
required:
- fun: the name of the function to run
optional:
- arg: a list of args to pass to fun
- kwarg: kwargs for fun
- __user__: user who is running the command
- __jid__: jid to run under
- __tag__: tag to run under
'''
# fire the mminion loading (if not already done) here
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
u'jid': jid,
u'user': low.get(u'__user__', u'UNKNOWN'),
}
event = salt.utils.event.get_event(
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, u'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
# runner/wheel output during orchestration).
print_func = None
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=print_func
)
# TODO: document these, and test that they exist
# TODO: Other things to inject??
func_globals = {u'__jid__': jid,
u'__user__': data[u'user'],
u'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
u'__jid_event__': weakref.proxy(namespaced_event),
}
try:
self_functions = pycopy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, fun)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if u'.' not in mod_name:
continue
mod, _ = mod_name.split(u'.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
# There are some discrepancies of what a "low" structure is in the
# publisher world it is a dict including stuff such as jid, fun,
# arg (a list of args, with kwargs packed in). Historically this
# particular one has had no "arg" and just has had all the kwargs
# packed into the top level object. The plan is to move away from
# that since the caller knows what is an arg vs a kwarg, but while
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in.
if u'arg' in low and u'kwarg' in low:
args = low[u'arg']
kwargs = low[u'kwarg']
else:
f_call = salt.utils.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get(u'args', ())
kwargs = f_call.get(u'kwargs', {})
# Update the event data with loaded args and kwargs
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals[u'__jid_event__'].fire_event(data, u'new')
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
data[u'return'] = self.functions[fun](*args, **kwargs)
data[u'success'] = True
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
# some functions can return boolean values
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data[u'return'] = str(ex)
else:
data[u'return'] = u'Exception occurred in {0} {1}: {2}'.format(
self.client,
fun,
traceback.format_exc(),
)
data[u'success'] = False
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
u'id': self.opts[u'id'],
u'tgt': self.opts[u'id'],
u'jid': data[u'jid'],
u'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error(u'Could not store job cache info. '
u'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, u'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info(u'Runner completed: %s', data[u'jid'])
del event
del namespaced_event
return data if full_return else data[u'return']
def get_docs(self, arg=None):
'''
Return a dictionary of functions and the inline documentation for each
'''
if arg:
if u'*' in arg:
target_mod = arg
_use_fnmatch = True
else:
target_mod = arg + u'.' if not arg.endswith(u'.') else arg
_use_fnmatch = False
if _use_fnmatch:
docs = [(fun, self.functions[fun].__doc__)
for fun in fnmatch.filter(self.functions, target_mod)]
else:
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)
if fun == arg or fun.startswith(target_mod)]
else:
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)]
docs = dict(docs)
return salt.utils.doc.strip_rst(docs)
class AsyncClientMixin(object):
'''
A mixin for *Client interfaces to enable easy async function execution
'''
client = None
tag_prefix = None
def _proc_function(self, fun, low, user, tag, jid, daemonize=True):
'''
Run this method in a multiprocess target to execute the function in a
multiprocess and fire the return data on the event bus
'''
if daemonize and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize()
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
# pack a few things into low
low[u'__jid__'] = jid
low[u'__user__'] = user
low[u'__tag__'] = tag
return self.low(fun, low, full_return=False)
def cmd_async(self, low):
'''
Execute a function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized
.. code-block:: python
>>> wheel.cmd_async({
'fun': 'key.finger',
'match': 'jerry',
'eauth': 'auto',
'username': 'saltdev',
'password': 'saltdev',
})
{'jid': '20131219224744416681', 'tag': 'salt/wheel/20131219224744416681'}
'''
return self.master_call(**low)
def _gen_async_pub(self, jid=None):
if jid is None:
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
return {u'tag': tag, u'jid': jid}
def async(self, fun, low, user=u'UNKNOWN', pub=None):
'''
Execute the function in a multiprocess and return the event tag to use
to watch for the return
'''
async_pub = pub if pub is not None else self._gen_async_pub()
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self._proc_function,
args=(fun, low, user, async_pub[u'tag'], async_pub[u'jid']))
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join() # MUST join, otherwise we leave zombies all over
return async_pub
def print_async_event(self, suffix, event):
'''
Print all of the events with the prefix 'tag'
'''
if not isinstance(event, dict):
return
# if we are "quiet", don't print
if self.opts.get(u'quiet', False):
return
# some suffixes we don't want to print
if suffix in (u'new',):
return
try:
outputter = self.opts.get(u'output', event.get(u'outputter', None) or event.get(u'return').get(u'outputter'))
except AttributeError:
outputter = None
# if this is a ret, we have our own set of rules
if suffix == u'ret':
# Check if outputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get(u'return'), dict) \
and set(event[u'return']) == set((u'data', u'outputter')):
event_data = event[u'return'][u'data']
outputter = event[u'return'][u'outputter']
else:
event_data = event[u'return']
else:
event_data = {u'suffix': suffix, u'event': event}
salt.output.display_output(event_data, outputter, self.opts)
| 35.444643
| 121
| 0.550456
|
99c72906d7533e8d340e061a5816f6c581f771b0
| 244
|
py
|
Python
|
birthday/tests/models.py
|
bashu/django-birthday
|
c80737210266e7bab005c4ac91f0e1304a85dd3e
|
[
"BSD-3-Clause"
] | 12
|
2017-02-11T12:08:12.000Z
|
2022-01-06T06:25:38.000Z
|
birthday/tests/models.py
|
bashu/django-birthday
|
c80737210266e7bab005c4ac91f0e1304a85dd3e
|
[
"BSD-3-Clause"
] | 5
|
2017-01-20T06:42:46.000Z
|
2021-10-10T19:23:48.000Z
|
birthday/tests/models.py
|
bashu/django-birthday
|
c80737210266e7bab005c4ac91f0e1304a85dd3e
|
[
"BSD-3-Clause"
] | 6
|
2017-04-28T15:12:28.000Z
|
2021-12-29T18:12:37.000Z
|
from django.db import models
from birthday import BirthdayField, BirthdayManager
class TestModel(models.Model):
__test__ = False
birthday = BirthdayField()
objects = BirthdayManager()
class Meta:
ordering = ("pk",)
| 17.428571
| 51
| 0.696721
|
e2d9ee0dbd0a2896f7a3d5b8331175a000266102
| 1,376
|
py
|
Python
|
fbdplc/apps/parse_s7xml.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | 4
|
2021-09-18T13:32:57.000Z
|
2022-03-15T22:13:56.000Z
|
fbdplc/apps/parse_s7xml.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | null | null | null |
fbdplc/apps/parse_s7xml.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | 2
|
2021-12-06T20:19:04.000Z
|
2022-03-15T22:13:58.000Z
|
from fbdplc.functions import Program
from fbdplc.s7xml import parse_function_from_file, parse_tags_from_file
from fbdplc.modeling import program_model
import fbdplc.apps.loggers as loggers
import argparse
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='*')
parser.add_argument('--show_model', action='store_true')
parser.add_argument('--tags', action='store_true')
loggers.add_log_arguments(parser)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
loggers.configure_logger(args)
if args.tags:
for p in args.paths:
tags = parse_tags_from_file(p)
print(p)
print(tags)
sys.exit(0)
elif not args.paths:
print('No inputs specified. Run with --help.')
sys.exit(0)
program = Program('main')
main = None
for path in args.paths:
print(f'Attempting to load s7xml file "{path}"')
block = parse_function_from_file(path)
program.blocks[block.name] = block
if main is None:
print(f'Setting entry point to {block.name}')
main = block
program.entry = main.name
model = program_model(program)
if args.show_model:
print('Program Model\n----------')
for a in model.assertions:
print(f'{a},')
| 26.980392
| 71
| 0.640988
|
39dc15d4bc58318f5bcbc3ebf21255f8a795b656
| 22,249
|
py
|
Python
|
loggo/loggo.py
|
interrogator/loggo
|
2f9f41d4527e65604b292a65323899e43a6a0d3b
|
[
"MIT"
] | null | null | null |
loggo/loggo.py
|
interrogator/loggo
|
2f9f41d4527e65604b292a65323899e43a6a0d3b
|
[
"MIT"
] | null | null | null |
loggo/loggo.py
|
interrogator/loggo
|
2f9f41d4527e65604b292a65323899e43a6a0d3b
|
[
"MIT"
] | 1
|
2018-05-09T13:16:31.000Z
|
2018-05-09T13:16:31.000Z
|
"""
Loggo: safe and automatable logging
"""
import inspect
import logging
import os
import traceback
import uuid
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
from typing import Optional, Set, Dict, Union, Callable, Generator, Any, cast, Mapping
# you don't need graylog installed
try:
import graypy # type: ignore
except ImportError:
graypy = None
# Strings to be formatted for pre function, post function and error during function
DEFAULT_FORMS = dict(called='*Called {call_signature}',
returned='*Returned from {call_signature} with {return_type} {return_value}',
returned_none='*Returned None from {call_signature}',
errored='*Errored during {call_signature} with {exception_type} "{exception_msg}"')
DEFAULT_LOG_LEVEL = logging.INFO
class Loggo:
"""
A class for logging
"""
# Callables with an attribute of this name set to True will not be logged by Loggo
no_logs_attribute_name = '_do_not_log_this_callable'
# Only log when log level is this or higher
log_threshold = logging.DEBUG
def __init__(self,
called: Optional[str] = DEFAULT_FORMS['called'],
returned: Optional[str] = DEFAULT_FORMS['returned'],
returned_none: Optional[str] = DEFAULT_FORMS['returned_none'],
errored: Optional[str] = DEFAULT_FORMS['errored'],
error_level: int = DEFAULT_LOG_LEVEL,
facility: str = 'loggo',
ip: Optional[str] = None,
port: Optional[int] = None,
do_print: bool = False,
do_write: bool = False,
truncation: int = 7500,
raise_logging_errors: bool = False,
logfile: str = './logs/logs.txt',
obscured: str = '********',
private_data: Optional[Set[str]] = None,
max_dict_depth: int = 5,
log_if_graylog_disabled: bool = True) -> None:
"""
On instantiation, pass in a dictionary containing the config. Currently
accepted config values are:
- facility: name of the app the log is coming from
- ip: ip address for graylog
- port: port for graylog
- logfile: path to a file to which logs will be written
- do_print: print logs to console
- do_write: write logs to file
- truncation: truncate value of log data fields to this length
- private_data: key names that should be filtered out of logging. when not
- max_dict_depth: how deep into log data loggo will look for private data provided, nothing
is censored
- raise_logging_errors: should Loggo errors be allowed to happen?
- obscure: a string to use instead of any private data
- log_if_graylog_disabled: boolean value, should a warning log be made when failing to
connect to graylog
"""
self.stopped = False
self.allow_errors = True
self.called = called
self.returned = returned
self.returned_none = self._best_returned_none(returned, returned_none)
self.errored = errored
self.error_level = error_level
self.facility = facility
self.ip = ip
self.port = port
self.do_print = do_print
self.do_write = do_write
self.truncation = truncation
self.raise_logging_errors = raise_logging_errors
self.logfile = logfile
self.obscured = obscured
self.private_data = private_data or set()
self.max_dict_depth = max_dict_depth
self.log_if_graylog_disabled = log_if_graylog_disabled
self.logger = logging.getLogger(self.facility)
self.logger.setLevel(Loggo.log_threshold)
self._add_graylog_handler()
@staticmethod
def _best_returned_none(returned: Optional[str], returned_none: Optional[str]) -> Optional[str]:
"""
If the user has their own msg format for 'returned' logs, but not one
for 'returned_none', we should use theirs over loggo's default
"""
# if the user explicitly doesn't want logs for returns, set to none
if not returned_none or not returned:
return None
# if they provided their own, use that
if returned_none != DEFAULT_FORMS['returned_none']:
return returned_none
# if the user just used the defaults, use those
if returned == DEFAULT_FORMS['returned']:
return returned_none
# the switch: use the user provided returned for returned_none
return returned
def _can_decorate(self, candidate: Callable, name: Optional[str] = None) -> bool:
"""
Decide if we can decorate a given object
Must have non private name and be callable
"""
name = name or getattr(candidate, '__name__', None)
if not name:
return False
if name.startswith('__') and name.endswith('__'):
return False
if not callable(candidate):
return False
return True
def _decorate_all_methods(self, cls: type, just_errors: bool = False) -> type:
"""
Decorate all viable methods in a class
"""
assert inspect.isclass(cls)
members = inspect.getmembers(cls)
members = [(k, v) for k, v in members if self._can_decorate(v, name=k)]
for name, candidate in members:
deco = self.logme(candidate, just_errors=just_errors)
# somehow, decorating classmethods as staticmethods is the only way
# to make everything work properly. we should find out why, some day
if isinstance(cls.__dict__[name], (staticmethod, classmethod)):
# Make mypy ignore due to an open issue: https://github.com/python/mypy/issues/5530
deco = staticmethod(deco) # type: ignore
try:
setattr(cls, name, deco)
# AttributeError happens if we can't write, as with __dict__
except AttributeError:
pass
return cls
def __call__(self, class_or_func: Union[Callable, type]) -> Union[Callable, type]:
"""
Make Loggo itself a decorator of either a class or a method/function, so
you can just use @Loggo on both classes and functions
"""
if inspect.isclass(class_or_func):
return self._decorate_all_methods(cast(type, class_or_func))
if self._can_decorate(class_or_func):
return self.logme(class_or_func)
return class_or_func
@contextmanager
def pause(self, allow_errors: bool = True) -> Generator[None, None, None]:
"""
A context manager that prevents loggo from logging in that context. By
default, errors will still make it through, unless allow_errors==False
"""
original = self.allow_errors, self.stopped
self.stopped = True
self.allow_errors = allow_errors
try:
yield
finally:
self.allow_errors, self.stopped = original
@contextmanager
def log_errors(self) -> Generator[None, None, None]:
"""
Context manager that logs errors only
"""
original = self.allow_errors, self.stopped
try:
yield
finally:
self.allow_errors, self.stopped = original
def stop(self, allow_errors: bool = True) -> None:
"""
Normal function: manually stop loggo from logging, but by default allow
errors through
"""
self.stopped = True
self.allow_errors = allow_errors
def start(self, allow_errors: bool = True) -> None:
"""
Normal function: manually restart loggo, also allowing errors by default
"""
self.stopped = False
self.allow_errors = allow_errors
@staticmethod
def ignore(function: Callable) -> Callable:
"""
A decorator that will override Loggo class deco, in case you do not want
to log one particular method for some reason
"""
setattr(function, Loggo.no_logs_attribute_name, True)
return function
def errors(self, class_or_func: Union[Callable, type]) -> Union[Callable, type]:
"""
Decorator: only log errors within a given method
"""
if inspect.isclass(class_or_func):
return self._decorate_all_methods(cast(type, class_or_func), just_errors=True)
return self.logme(class_or_func, just_errors=True)
def logme(self, function: Callable, just_errors: bool = False) -> Callable:
"""
This the function decorator. After having instantiated Loggo, use it as a
decorator like so:
@Loggo.logme
def f(): pass
It will the call, return and errors that occurred during the function/method
"""
# if logging has been turned off, just do nothing
if getattr(function, Loggo.no_logs_attribute_name, False):
return function
@wraps(function)
def full_decoration(*args: Any, **kwargs: Any) -> Any:
"""
Main decorator logic. Generate a log before running the callable,
then try to run it. If it errors, log the error. If it doesn't,
log the return value.
Args and kwargs are for/from the decorated function
"""
bound = self._params_to_dict(function, *args, **kwargs)
# bound will be none if inspect signature binding failed. in this
# case, error log was created, raised if self.raise_logging_errors
if bound is None:
return function(*args, **kwargs)
param_strings = self.sanitise(bound)
formatters = self._make_call_signature(function, param_strings)
privates = [key for key in param_strings if key not in bound]
# add more format strings
more = dict(decorated=True,
couplet=uuid.uuid1(),
number_of_params=len(args) + len(kwargs),
private_keys=', '.join(privates),
timestamp=datetime.now().strftime('%d.%m %Y %H:%M:%S'))
formatters.update(more)
# 'called' log tells you what was called and with what arguments
if not just_errors:
self._generate_log('called', None, formatters, param_strings)
try:
# where the original function is actually run
response = function(*args, **kwargs)
where = 'returned_none' if response is None else 'returned'
# the successful return log
if not just_errors:
self._generate_log(where, response, formatters, param_strings)
# return whatever the original callable did
return response
# handle any possible error
except Exception as error:
formatters['traceback'] = traceback.format_exc()
self._generate_log('errored', error, formatters, param_strings)
raise
return full_decoration
def _string_params(self, non_private_params: Dict, use_repr: bool = True) -> Dict[str, str]:
"""
Turn every entry in log_data into truncated strings
"""
params = dict()
for key, val in non_private_params.items():
truncation = self.truncation if key not in {'trace', 'traceback'} else None
safe_key = self._force_string_and_truncate(key, 50, use_repr=False)
safe_val = self._force_string_and_truncate(val, truncation, use_repr=use_repr)
params[safe_key] = safe_val
return params
@staticmethod
def _make_call_signature(function: Callable, param_strings: Dict[str, str]) -> Dict:
"""
Represent the call as a string mimicking how it is written in Python.
Return it within a dict containing some other format strings.
"""
signature = '{callable}({params})'
param_str = ', '.join(f'{k}={v}' for k, v in param_strings.items())
format_strings = dict(callable=getattr(function, '__qualname__', 'unknown_callable'),
params=param_str)
format_strings['call_signature'] = signature.format(**format_strings)
return format_strings
def listen_to(loggo_self, facility: str) -> None:
"""
This method can hook the logger up to anything else that logs using the
Python logging module (i.e. another logger) and steals its logs
"""
class LoggoHandler(logging.Handler):
def emit(handler_self, record: logging.LogRecord) -> None:
attributes = {'msg', 'created', 'msecs', 'stack_info',
'levelname', 'filename', 'module', 'args',
'funcName', 'process', 'relativeCreated',
'exc_info', 'name', 'processName', 'threadName',
'lineno', 'exc_text', 'pathname', 'thread',
'levelno'}
extra = dict(record.__dict__)
[extra.pop(attrib, None) for attrib in attributes]
extra['sublogger'] = facility
loggo_self.log(record.levelno, record.msg, extra)
other_loggo = logging.getLogger(facility)
other_loggo.setLevel(Loggo.log_threshold)
other_loggo.addHandler(LoggoHandler())
def _params_to_dict(self, function: Callable, *args: Any, **kwargs: Any) -> Mapping:
"""
Turn args and kwargs into an OrderedDict of {param_name: value}
"""
sig = inspect.signature(function)
bound = sig.bind(*args, **kwargs).arguments
if bound:
first = list(bound)[0]
if first == 'self':
bound.pop('self')
elif first == 'cls':
bound.pop('cls')
return bound
def _obscure_private_keys(self, log_data: Any, dict_depth: int = 0) -> Any:
"""
Obscure any private values in a dictionary recursively
"""
if not isinstance(log_data, dict) or dict_depth >= self.max_dict_depth:
return log_data
out = dict()
for key, value in log_data.items():
if key in self.private_data:
out[key] = self.obscured
else:
out[key] = self._obscure_private_keys(value, dict_depth + 1)
return out
def _represent_return_value(self, response: Any, truncate: Optional[int] = 140) -> str:
"""
Make a string representation of whatever a method returns
"""
# some custom handling for request response objects
if str(type(response)) == "<class 'requests.models.Response'>":
response = response.text
return '({})'.format(self._force_string_and_truncate(response, truncate, use_repr=True))
def _generate_log(self,
where: str,
returned: Any,
formatters: Dict,
safe_log_data: Dict[str, str]) -> None:
"""
generate message, level and log data for automated logs
msg (str): the unformatted message
returned (ANY): what the decorated callable returned
formatters (dict): dict containing format strings needed for message
safe_log_data (dict): dict of stringified, truncated, censored parameters
"""
# if the user turned off logs of this type, do nothing immediately
msg = getattr(self, where)
if not msg:
return
# if errors not to be shown and this is an error, quit
if not self.allow_errors and where == 'errored':
return
# if state is stopped and not an error, quit
if self.stopped and where != 'errored':
return
# do not log loggo, because why would you ever want that?
if 'loggo.loggo' in formatters['call_signature']:
return
# return value for log message
if 'returned' in where:
ret_str = self._represent_return_value(returned, truncate=None)
formatters['return_value'] = ret_str
formatters['return_type'] = type(returned).__name__
# if what is 'returned' is an exception, get the error formatters
if where == 'errored':
formatters['exception_type'] = type(returned).__name__
formatters['exception_msg'] = str(returned)
formatters['level'] = self.error_level
else:
formatters['level'] = DEFAULT_LOG_LEVEL
# format the string template
msg = msg.format(**formatters).replace(' ', ' ')
# make the log data
log_data = {**formatters, **safe_log_data}
custom_log_data = self.add_custom_log_data()
log_data.update(custom_log_data)
# record if logging was on or off
original_state = bool(self.stopped)
# turn it on just for now, as if we shouldn't log we'd have returned
self.stopped = False
# do logging
self.log(DEFAULT_LOG_LEVEL, msg, extra=log_data, safe=True)
# restore old stopped state
self.stopped = original_state
def add_custom_log_data(self) -> Dict[str, str]:
"""
An overwritable method useful for adding custom log data
"""
return dict()
def write_to_file(self, line: str) -> None:
"""
Very simple log writer, could expand. simple append the line to the file
"""
needed_dir = os.path.dirname(self.logfile)
if needed_dir and not os.path.isdir(needed_dir):
os.makedirs(os.path.dirname(self.logfile))
with open(self.logfile, 'a') as fo:
fo.write(line.rstrip('\n') + '\n')
def _add_graylog_handler(self) -> None:
if not self.ip or not self.port or not graypy:
if self.log_if_graylog_disabled:
self.warning('Graylog not configured! Disabling it')
return
handler = graypy.GELFUDPHandler(self.ip, self.port, debugging_fields=False)
self.logger.addHandler(handler)
def _force_string_and_truncate(self, obj: Any, truncate: Optional[int], use_repr: bool = False) -> str:
"""
Return stringified and truncated obj. If stringification fails, log a warning
and return the string '<<Unstringable input>>'
"""
try:
obj = str(obj) if not use_repr else repr(obj)
except Exception as exc:
self.warning('Object could not be cast to string', extra=dict(exception_type=type(exc),
exception=exc))
return '<<Unstringable input>>'
if truncate is None:
return obj
# truncate and return
return (obj[:truncate] + '...') if len(obj) > (truncate + 3) else obj
@staticmethod
def _rename_protected_keys(log_data: Dict) -> Dict:
"""
Some names cannot go into logger; remove them here and log the problem
"""
out = dict()
# names that logger will not like
protected = {'name', 'message', 'asctime', 'msg', 'module', 'args', 'exc_info'}
for key, value in log_data.items():
if key in protected:
key = 'protected_' + key
out[key] = value
return out
def sanitise(self, unsafe_dict: Mapping, use_repr: bool = True) -> Dict[str, str]:
"""
Ensure that log data is safe to log:
- No private keys
- Rename protected keys
- Everything strings
"""
obscured = self._obscure_private_keys(unsafe_dict)
no_protected = self._rename_protected_keys(obscured)
return self._string_params(no_protected, use_repr=use_repr)
def sanitise_msg(self, msg: str) -> str:
"""
Overwritable method to clean or alter log messages
"""
return msg
def log(self, level: int, msg: str, extra: Optional[Dict] = None, safe: bool = False) -> None:
"""
Main logging method, called both in auto logs and manually by user
level: int, priority of log
msg: string to log
extra: dict of extra fields to log
safe: do we need to sanitise extra?
"""
# don't log in a stopped state
if self.stopped:
return
extra = extra or dict()
if not safe:
extra = self.sanitise(extra, use_repr=False)
msg = self.sanitise_msg(msg)
extra.update(dict(level=str(level), loggo=str(True)))
# format logs for printing/writing to file
if self.do_write or self.do_print:
ts = extra.get('timestamp', datetime.now().strftime('%d.%m %Y %H:%M:%S'))
line = f'{ts}\t{msg}\t{level}'
trace = extra.get('traceback')
if trace:
line = f'{line} -- see below: \n{trace}\n'
# do printing and writing to file
if self.do_print:
print(line)
if self.do_write:
self.write_to_file(line)
try:
self.logger.log(level, msg, extra=extra)
# it has been known to fail, e.g. when extra contains weird stuff
except Exception:
if self.raise_logging_errors:
raise
def debug(self, *args: Any, **kwargs: Any) -> None:
return self.log(logging.DEBUG, *args, **kwargs)
def info(self, *args: Any, **kwargs: Any) -> None:
return self.log(logging.INFO, *args, **kwargs)
def warning(self, *args: Any, **kwargs: Any) -> None:
return self.log(logging.WARNING, *args, **kwargs)
def error(self, *args: Any, **kwargs: Any) -> None:
return self.log(logging.ERROR, *args, **kwargs)
def critical(self, *args: Any, **kwargs: Any) -> None:
return self.log(logging.CRITICAL, *args, **kwargs)
| 40.016187
| 107
| 0.598274
|
c16d71bac9bcf73eaa34b84d3515e223ecf4ba87
| 489
|
py
|
Python
|
2019/09.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2019/09.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2019/09.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
import intcode
DATA = '09.txt'
def code1():
with open(DATA) as f:
strcode = f.readline().strip()
code = intcode.parse_data(strcode)
comp = intcode.Intcode(code)
comp.run([1])
print('1>', comp.outvalues)
def code2():
with open(DATA) as f:
strcode = f.readline().strip()
code = intcode.parse_data(strcode)
comp = intcode.Intcode(code)
comp.run([2])
print('2>', comp.outvalues)
code1()
code2()
| 18.111111
| 43
| 0.560327
|
3e09cc6a46357fe0edaed1230003c4fe33f0be3a
| 2,788
|
py
|
Python
|
odhobs/test.py
|
anabanami/Bender
|
2ec1e4a56c0252ca8d8603303ecea24295a768bb
|
[
"MIT"
] | 1
|
2022-03-04T00:07:48.000Z
|
2022-03-04T00:07:48.000Z
|
odhobs/test.py
|
anabanami/Bender
|
2ec1e4a56c0252ca8d8603303ecea24295a768bb
|
[
"MIT"
] | null | null | null |
odhobs/test.py
|
anabanami/Bender
|
2ec1e4a56c0252ca8d8603303ecea24295a768bb
|
[
"MIT"
] | 1
|
2022-03-04T00:07:42.000Z
|
2022-03-04T00:07:42.000Z
|
import numpy as np
from scipy.special import eval_hermite, factorial
from ho import psi as cpsi
# Python implementation. Adapted from
# https://www.numbercrunch.de/blog/2014/08/calculating-the-hermite-functions/ with
# wisdom from https://scicomp.stackexchange.com/questions/30896/
#
# I managed to get a ~50% speedup by using frexp to do the rescaling using the base-2
# exponent the floats already have, without actually calculating a log. This also avoids
# taking a log of zero (discussed in the stackexchange post), which is otherwise awkward
# to do.
def psi(n, x):
h_prev = np.ones_like(x) * np.pi ** -0.25
h = np.sqrt(2.0) * x * np.pi ** -0.25
sum_log_scale = np.zeros_like(x)
if n == 0:
h = h_prev
for i in range(2, n + 1):
h, h_prev = np.sqrt(2 / i) * x * h - np.sqrt((i - 1) / i) * h_prev, h
_, log_scale = np.frexp(h)
scale = np.exp2(-log_scale)
h *= scale
h_prev *= scale
sum_log_scale += log_scale
return h * np.exp(-(x ** 2) / 2 + np.log(2) * sum_log_scale)
def psi_explicit(n, x):
c = 1 / (np.pi ** 0.25 * np.sqrt(2 ** n * factorial(n)))
return c * eval_hermite(n, x) * np.exp(-(x ** 2) / 2)
import time
n_small = 9
n_big = 1000
x = np.linspace(-10, 10, 1024, endpoint=False)
# Quick test for correctness
print("testing correctness of first ten states")
for n in range(10):
a = psi(n, x)
b = psi_explicit(n, x)
c = cpsi(n, x)
assert np.allclose(a, c)
assert np.allclose(b, c)
print('testing handling of dtypes')
a = psi(n_small, 10)
print(" np float")
c = cpsi(n_small, np.array([10.0])[0])
assert a == c
print(" np 0d float arr")
c = cpsi(n_small, np.array(10.0))
assert a == c
print(" np int")
c = cpsi(n_small, np.array([10])[0])
assert a == c
print(" np 0d int arr")
c = cpsi(n_small, np.array(10))
assert a == c
print(" pyint")
c = cpsi(n_small, 10)
assert a == c
print(" pyfloat")
c = cpsi(n_small, 10.0)
assert a == c
print(f"testing array speed with {x.shape} array")
start_time = time.time()
a = psi(n_big, x)
pytime = time.time() - start_time
print(f' pytime: {pytime}')
start_time = time.time()
c = cpsi(n_big, x)
cytime = time.time() - start_time
print(f' cytime: {cytime}')
assert np.allclose(a, c)
print(f' {(pytime / cytime):.1f}× speedup')
print(f"testing python loop speed with {x.size} points")
a = np.empty_like(x)
start_time = time.time()
for i, x_i in enumerate(x.flat):
a.flat[i] = psi(n_big, x_i)
pytime = time.time() - start_time
print(f' pytime: {pytime}')
c = np.empty_like(x)
start_time = time.time()
for i, x_i in enumerate(x.flat):
c.flat[i] = cpsi(n_big, x_i)
cytime = time.time() - start_time
print(f' cytime: {cytime}')
assert np.allclose(a, c)
print(f' {(pytime / cytime):.1f}× speedup')
| 25.345455
| 88
| 0.637374
|
fd0cae0972340533414a06ea290851d40685d078
| 168
|
py
|
Python
|
docs/examples_src/fields/optional.py
|
dynalz/odmantic
|
f20f08f8ab1768534c1e743f7539bfe4f8c73bdd
|
[
"0BSD"
] | 486
|
2020-10-19T05:33:53.000Z
|
2022-03-30T12:54:57.000Z
|
docs/examples_src/fields/optional.py
|
dynalz/odmantic
|
f20f08f8ab1768534c1e743f7539bfe4f8c73bdd
|
[
"0BSD"
] | 183
|
2020-10-19T18:15:25.000Z
|
2022-03-31T04:59:21.000Z
|
docs/examples_src/fields/optional.py
|
dynalz/odmantic
|
f20f08f8ab1768534c1e743f7539bfe4f8c73bdd
|
[
"0BSD"
] | 53
|
2020-10-19T09:35:01.000Z
|
2022-03-31T20:39:51.000Z
|
from typing import Optional
from odmantic import Model
class Person(Model):
name: str
age: Optional[int]
john = Person(name="John")
print(john.age)
#> None
| 12.923077
| 27
| 0.702381
|
1911d3a74b402e645f6d894aac2762bb649e97b3
| 10,828
|
py
|
Python
|
test/unit/managers/test_HDAManager.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | null | null | null |
test/unit/managers/test_HDAManager.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | 1
|
2015-02-21T18:48:19.000Z
|
2015-02-27T15:50:32.000Z
|
test/unit/managers/test_HDAManager.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | 3
|
2015-02-22T13:34:16.000Z
|
2020-10-01T01:28:04.000Z
|
import sys
import os
import pprint
import unittest
__GALAXY_ROOT__ = os.getcwd() + '/../../../'
sys.path.insert( 1, __GALAXY_ROOT__ + 'lib' )
from galaxy import eggs
eggs.require( 'SQLAlchemy >= 0.4' )
import sqlalchemy
from galaxy import model
from galaxy import exceptions
from galaxy.util.bunch import Bunch
import mock
from test_ModelManager import BaseTestCase
from galaxy.managers.histories import HistoryManager
from galaxy.managers.datasets import DatasetManager
from galaxy.managers.hdas import HDAManager
# =============================================================================
default_password = '123456'
user2_data = dict( email='user2@user2.user2', username='user2', password=default_password )
user3_data = dict( email='user3@user3.user3', username='user3', password=default_password )
# =============================================================================
class HDAManagerTestCase( BaseTestCase ):
def set_up_managers( self ):
super( HDAManagerTestCase, self ).set_up_managers()
self.history_mgr = HistoryManager( self.app )
self.dataset_mgr = DatasetManager( self.app )
self.hda_mgr = HDAManager( self.app )
def test_base( self ):
hda_model = model.HistoryDatasetAssociation
owner = self.user_mgr.create( self.trans, **user2_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
hda1 = self.hda_mgr.create( self.trans, history=history1, hid=1 )
hda2 = self.hda_mgr.create( self.trans, history=history1, hid=2 )
hda3 = self.hda_mgr.create( self.trans, history=history1, hid=3 )
self.log( "should be able to query" )
hdas = self.trans.sa_session.query( hda_model ).all()
self.assertEqual( self.hda_mgr.list( self.trans ), hdas )
self.assertEqual( self.hda_mgr.one( self.trans, filters=( hda_model.id == hda1.id ) ), hda1 )
self.assertEqual( self.hda_mgr.by_id( self.trans, hda1.id ), hda1 )
self.assertEqual( self.hda_mgr.by_ids( self.trans, [ hda2.id, hda1.id ] ), [ hda2, hda1 ] )
self.log( "should be able to limit and offset" )
self.assertEqual( self.hda_mgr.list( self.trans, limit=1 ), hdas[0:1] )
self.assertEqual( self.hda_mgr.list( self.trans, offset=1 ), hdas[1:] )
self.assertEqual( self.hda_mgr.list( self.trans, limit=1, offset=1 ), hdas[1:2] )
self.assertEqual( self.hda_mgr.list( self.trans, limit=0 ), [] )
self.assertEqual( self.hda_mgr.list( self.trans, offset=3 ), [] )
self.log( "should be able to order" )
self.assertEqual( self.hda_mgr.list( self.trans, order_by=sqlalchemy.desc( hda_model.create_time ) ),
[ hda3, hda2, hda1 ] )
def test_create( self ):
owner = self.user_mgr.create( self.trans, **user2_data )
non_owner = self.user_mgr.create( self.trans, **user3_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
self.log( "should be able to create a new HDA with a specified history and dataset" )
hda1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
self.assertIsInstance( hda1, model.HistoryDatasetAssociation )
self.assertEqual( hda1, self.trans.sa_session.query( model.HistoryDatasetAssociation ).get( hda1.id ) )
self.assertEqual( hda1.history, history1 )
self.assertEqual( hda1.dataset, dataset1 )
self.assertEqual( hda1.hid, 1 )
self.log( "should be able to create a new HDA with only a specified history and no dataset" )
hda2 = self.hda_mgr.create( self.trans, history=history1 )
self.assertIsInstance( hda2, model.HistoryDatasetAssociation )
self.assertIsInstance( hda2.dataset, model.Dataset )
self.assertEqual( hda2.history, history1 )
self.assertEqual( hda2.hid, 2 )
self.log( "should be able to create a new HDA with no history and no dataset" )
hda3 = self.hda_mgr.create( self.trans, hid=None )
self.assertIsInstance( hda3, model.HistoryDatasetAssociation )
self.assertIsInstance( hda3.dataset, model.Dataset, msg="dataset will be auto created" )
self.assertIsNone( hda3.history, msg="history will be None" )
self.assertEqual( hda3.hid, None, msg="should allow setting hid to None (or any other value)" )
def test_copy_from_hda( self ):
owner = self.user_mgr.create( self.trans, **user2_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
hda1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
self.log( "should be able to copy an HDA" )
hda2 = self.hda_mgr.copy( self.trans, hda1, history=history1 )
self.assertIsInstance( hda2, model.HistoryDatasetAssociation )
self.assertEqual( hda2, self.trans.sa_session.query( model.HistoryDatasetAssociation ).get( hda2.id ) )
self.assertEqual( hda2.name, hda1.name )
self.assertEqual( hda2.history, hda1.history )
self.assertEqual( hda2.dataset, hda1.dataset )
self.assertNotEqual( hda2, hda1 )
#def test_copy_from_ldda( self ):
# owner = self.user_mgr.create( self.trans, **user2_data )
# history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
#
# self.log( "should be able to copy an HDA" )
# hda2 = self.hda_mgr.copy_ldda( self.trans, history1, hda1 )
def test_delete( self ):
owner = self.user_mgr.create( self.trans, **user2_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
self.log( "should be able to delete and undelete an hda" )
self.assertFalse( item1.deleted )
self.assertEqual( self.hda_mgr.delete( self.trans, item1 ), item1 )
self.assertTrue( item1.deleted )
self.assertEqual( self.hda_mgr.undelete( self.trans, item1 ), item1 )
self.assertFalse( item1.deleted )
def test_purge_allowed( self ):
self.trans.app.config.allow_user_dataset_purge = True
owner = self.user_mgr.create( self.trans, **user2_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
self.log( "should purge an hda if config does allow" )
self.assertFalse( item1.purged )
self.assertEqual( self.hda_mgr.purge( self.trans, item1 ), item1 )
self.assertTrue( item1.purged )
def test_purge_not_allowed( self ):
self.trans.app.config.allow_user_dataset_purge = False
owner = self.user_mgr.create( self.trans, **user2_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
self.log( "should raise an error when purging an hda if config does not allow" )
self.assertFalse( item1.purged )
self.assertRaises( exceptions.ConfigDoesNotAllowException, self.hda_mgr.purge, self.trans, item1 )
self.assertFalse( item1.purged )
def test_ownable( self ):
owner = self.user_mgr.create( self.trans, **user2_data )
non_owner = self.user_mgr.create( self.trans, **user3_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history1, dataset1 )
self.log( "should be able to poll whether a given user owns an item" )
self.assertTrue( self.hda_mgr.is_owner( self.trans, item1, owner ) )
self.assertFalse( self.hda_mgr.is_owner( self.trans, item1, non_owner ) )
self.log( "should raise an error when checking ownership with non-owner" )
self.assertRaises( exceptions.ItemOwnershipException,
self.hda_mgr.error_unless_owner, self.trans, item1, non_owner )
self.log( "should raise an error when checking ownership with anonymous" )
self.assertRaises( exceptions.ItemOwnershipException,
self.hda_mgr.error_unless_owner, self.trans, item1, None )
self.log( "should not raise an error when checking ownership with owner" )
self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, owner ), item1 )
self.log( "should not raise an error when checking ownership with admin" )
self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, self.admin_user ), item1 )
def test_accessible( self ):
owner = self.user_mgr.create( self.trans, **user2_data )
non_owner = self.user_mgr.create( self.trans, **user3_data )
history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history1, dataset1 )
self.log( "(by default, dataset permissions are lax) should be accessible to all" )
for user in self.user_mgr.list( self.trans ):
self.assertTrue( self.hda_mgr.is_accessible( self.trans, item1, user ) )
#TODO: set perms on underlying dataset and then test accessible
def test_anon( self ):
anon_user = None
self.trans.set_user( anon_user )
history1 = self.history_mgr.create( self.trans, name='anon_history', user=anon_user )
self.trans.set_history( history1 )
dataset1 = self.dataset_mgr.create( self.trans )
item1 = self.hda_mgr.create( self.trans, history1, dataset1 )
self.log( "should not raise an error when checking ownership/access on anonymous' own dataset" )
self.assertTrue( self.hda_mgr.is_accessible( self.trans, item1, anon_user ) )
self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, anon_user ), item1 )
self.log( "should raise an error when checking ownership on anonymous' dataset with other user" )
non_owner = self.user_mgr.create( self.trans, **user3_data )
self.assertRaises( exceptions.ItemOwnershipException,
self.hda_mgr.error_unless_owner, self.trans, item1, non_owner )
# =============================================================================
if __name__ == '__main__':
# or more generally, nosetests test_resourcemanagers.py -s -v
unittest.main()
| 49.442922
| 111
| 0.667067
|
31af6e2b99afe39ad4658abd405ef878f2e20fc5
| 74,510
|
py
|
Python
|
elasticsearch/client/__init__.py
|
johnistan/elasticsearch-py
|
7751fede5a4733130e210b62ab1f99ab9763c9c7
|
[
"Apache-2.0"
] | 46
|
2019-03-01T02:19:18.000Z
|
2021-12-18T12:37:02.000Z
|
elasticsearch/client/__init__.py
|
johnistan/elasticsearch-py
|
7751fede5a4733130e210b62ab1f99ab9763c9c7
|
[
"Apache-2.0"
] | 8
|
2019-01-22T10:46:43.000Z
|
2020-12-30T13:03:04.000Z
|
elasticsearch/client/__init__.py
|
johnistan/elasticsearch-py
|
7751fede5a4733130e210b62ab1f99ab9763c9c7
|
[
"Apache-2.0"
] | 67
|
2018-10-29T09:50:49.000Z
|
2022-01-06T07:35:56.000Z
|
from __future__ import unicode_literals
import logging
from ..transport import Transport
from ..exceptions import TransportError
from ..compat import string_types, urlparse, unquote
from .indices import IndicesClient
from .ingest import IngestClient
from .cluster import ClusterClient
from .cat import CatClient
from .nodes import NodesClient
from .remote import RemoteClient
from .snapshot import SnapshotClient
from .tasks import TasksClient
from .xpack import XPackClient
from .utils import query_params, _make_path, SKIP_IN_PATH
logger = logging.getLogger('elasticsearch')
def _normalize_hosts(hosts):
"""
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts.
"""
# if hosts are empty, just defer to defaults down the line
if hosts is None:
return [{}]
# passed in just one string
if isinstance(hosts, string_types):
hosts = [hosts]
out = []
# normalize hosts to dicts
for host in hosts:
if isinstance(host, string_types):
if '://' not in host:
host = "//%s" % host
parsed_url = urlparse(host)
h = {"host": parsed_url.hostname}
if parsed_url.port:
h["port"] = parsed_url.port
if parsed_url.scheme == "https":
h['port'] = parsed_url.port or 443
h['use_ssl'] = True
if parsed_url.username or parsed_url.password:
h['http_auth'] = '%s:%s' % (unquote(parsed_url.username),
unquote(parsed_url.password))
if parsed_url.path and parsed_url.path != '/':
h['url_prefix'] = parsed_url.path
out.append(h)
else:
out.append(host)
return out
class Elasticsearch(object):
"""
Elasticsearch low-level client. Provides a straightforward mapping from
Python to ES REST endpoints.
The instance has attributes ``cat``, ``cluster``, ``indices``, ``ingest``,
``nodes``, ``snapshot`` and ``tasks`` that provide access to instances of
:class:`~elasticsearch.client.CatClient`,
:class:`~elasticsearch.client.ClusterClient`,
:class:`~elasticsearch.client.IndicesClient`,
:class:`~elasticsearch.client.IngestClient`,
:class:`~elasticsearch.client.NodesClient`,
:class:`~elasticsearch.client.SnapshotClient` and
:class:`~elasticsearch.client.TasksClient` respectively. This is the
preferred (and only supported) way to get access to those classes and their
methods.
You can specify your own connection class which should be used by providing
the ``connection_class`` parameter::
# create connection to localhost using the ThriftConnection
es = Elasticsearch(connection_class=ThriftConnection)
If you want to turn on :ref:`sniffing` you have several options (described
in :class:`~elasticsearch.Transport`)::
# create connection that will automatically inspect the cluster to get
# the list of active nodes. Start with nodes running on 'esnode1' and
# 'esnode2'
es = Elasticsearch(
['esnode1', 'esnode2'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60
)
Different hosts can have different parameters, use a dictionary per node to
specify those::
# connect to localhost directly and another node using SSL on port 443
# and an url_prefix. Note that ``port`` needs to be an int.
es = Elasticsearch([
{'host': 'localhost'},
{'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True},
])
If using SSL, there are several parameters that control how we deal with
certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for
detailed description of the options)::
es = Elasticsearch(
['localhost:443', 'other_host:443'],
# turn on SSL
use_ssl=True,
# make sure we verify SSL certificates (off by default)
verify_certs=True,
# provide a path to CA certs on disk
ca_certs='/path/to/CA_certs'
)
SSL client authentication is supported
(see :class:`~elasticsearch.Urllib3HttpConnection` for
detailed description of the options)::
es = Elasticsearch(
['localhost:443', 'other_host:443'],
# turn on SSL
use_ssl=True,
# make sure we verify SSL certificates (off by default)
verify_certs=True,
# provide a path to CA certs on disk
ca_certs='/path/to/CA_certs',
# PEM formatted SSL client certificate
client_cert='/path/to/clientcert.pem',
# PEM formatted SSL client key
client_key='/path/to/clientkey.pem'
)
Alternatively you can use RFC-1738 formatted URLs, as long as they are not
in conflict with other options::
es = Elasticsearch(
[
'http://user:secret@localhost:9200/',
'https://user:secret@other_host:443/production'
],
verify_certs=True
)
By default, `JSONSerializer
<https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L24>`_
is used to encode all outgoing requests.
However, you can implement your own custom serializer::
from elasticsearch.serializer import JSONSerializer
class SetEncoder(JSONSerializer):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, Something):
return 'CustomSomethingRepresentation'
return JSONSerializer.default(self, obj)
es = Elasticsearch(serializer=SetEncoder())
"""
def __init__(self, hosts=None, transport_class=Transport, **kwargs):
"""
:arg hosts: list of nodes we should connect to. Node should be a
dictionary ({"host": "localhost", "port": 9200}), the entire dictionary
will be passed to the :class:`~elasticsearch.Connection` class as
kwargs, or a string in the format of ``host[:port]`` which will be
translated to a dictionary automatically. If no value is given the
:class:`~elasticsearch.Urllib3HttpConnection` class defaults will be used.
:arg transport_class: :class:`~elasticsearch.Transport` subclass to use.
:arg kwargs: any additional arguments will be passed on to the
:class:`~elasticsearch.Transport` class and, subsequently, to the
:class:`~elasticsearch.Connection` instances.
"""
self.transport = transport_class(_normalize_hosts(hosts), **kwargs)
# namespaced clients for compatibility with API names
self.indices = IndicesClient(self)
self.ingest = IngestClient(self)
self.cluster = ClusterClient(self)
self.cat = CatClient(self)
self.nodes = NodesClient(self)
self.remote = RemoteClient(self)
self.snapshot = SnapshotClient(self)
self.tasks = TasksClient(self)
self.xpack = XPackClient(self)
def __repr__(self):
try:
# get a list of all connections
cons = self.transport.hosts
# truncate to 5 if there are too many
if len(cons) > 5:
cons = cons[:5] + ['...']
return '<{cls}({cons})>'.format(cls=self.__class__.__name__, cons=cons)
except:
# probably operating on custom transport and connection_pool, ignore
return super(Elasticsearch, self).__repr__()
def _bulk_body(self, body):
# if not passed in a string, serialize items and join by newline
if not isinstance(body, string_types):
body = '\n'.join(map(self.transport.serializer.dumps, body))
# bulk body must end with a newline
if not body.endswith('\n'):
body += '\n'
return body
@query_params()
def ping(self, params=None):
"""
Returns True if the cluster is up, False otherwise.
`<http://www.elastic.co/guide/>`_
"""
try:
return self.transport.perform_request('HEAD', '/', params=params)
except TransportError:
return False
@query_params()
def info(self, params=None):
"""
Get the basic info from the current cluster.
`<http://www.elastic.co/guide/>`_
"""
return self.transport.perform_request('GET', '/', params=params)
@query_params('parent', 'pipeline', 'refresh', 'routing', 'timeout',
'timestamp', 'ttl', 'version', 'version_type', 'wait_for_active_shards')
def create(self, index, doc_type, id, body, params=None):
"""
Adds a typed JSON document in a specific index, making it searchable.
Behind the scenes this method calls index(..., op_type='create')
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The document
:arg parent: ID of the parent document
:arg pipeline: The pipeline id to preprocess incoming documents with
:arg refresh: If `true` then refresh the affected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the index operation. Defaults to 1,
meaning the primary shard only. Set to `all` for all shard copies,
otherwise set to any non-negative value less than or equal to the
total number of copies for the shard (number of replicas + 1)
"""
for param in (index, doc_type, id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path(index, doc_type,
id, '_create'), params=params, body=body)
@query_params('op_type', 'parent', 'pipeline', 'refresh', 'routing',
'timeout', 'timestamp', 'ttl', 'version', 'version_type',
'wait_for_active_shards')
def index(self, index, doc_type, body, id=None, params=None):
"""
Adds or updates a typed JSON document in a specific index, making it searchable.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg body: The document
:arg id: Document ID
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg pipeline: The pipeline id to preprocess incoming documents with
:arg refresh: If `true` then refresh the affected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the index operation. Defaults to 1,
meaning the primary shard only. Set to `all` for all shard copies,
otherwise set to any non-negative value less than or equal to the
total number of copies for the shard (number of replicas + 1)
"""
for param in (index, doc_type, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST' if id in SKIP_IN_PATH else 'PUT',
_make_path(index, doc_type, id), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'stored_fields',
'version', 'version_type')
def exists(self, index, doc_type, id, params=None):
"""
Returns a boolean indicating whether or not given document exists in Elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('HEAD', _make_path(index,
doc_type, id), params=params)
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def exists_source(self, index, doc_type, id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document; use `_all` to fetch the first
document matching the ID across all types
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('HEAD', _make_path(index,
doc_type, id, '_source'), params=params)
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'stored_fields',
'version', 'version_type')
def get(self, index, doc_type, id, params=None):
"""
Get a typed JSON document from the index based on its id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, id), params=params)
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
"""
Get the source of a document by it's index, type and id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document; use `_all` to fetch the first
document matching the ID across all types
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_source'), params=params)
@query_params('_source', '_source_exclude', '_source_include', 'preference',
'realtime', 'refresh', 'routing', 'stored_fields')
def mget(self, body, index=None, doc_type=None, params=None):
"""
Get multiple documents based on an index, type (optional) and ids.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided in
the URL.
:arg index: The name of the index
:arg doc_type: The type of the document
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_mget'), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'lang', 'parent', 'refresh', 'retry_on_conflict', 'routing', 'timeout',
'timestamp', 'ttl', 'version', 'version_type', 'wait_for_active_shards')
def update(self, index, doc_type, id, body=None, params=None):
"""
Update a document based on a script or partial data provided.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg lang: The script language (default: painless)
:arg parent: ID of the parent document. Is is only used for routing and
when for the upsert request
:arg refresh: If `true` then refresh the effected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
:arg retry_on_conflict: Specify how many times should the operation be
retried when a conflict occurs (default: 0)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the update operation. Defaults to
1, meaning the primary shard only. Set to `all` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path(index,
doc_type, id, '_update'), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer',
'batched_reduce_size', 'default_operator', 'df', 'docvalue_fields',
'expand_wildcards', 'explain', 'from_', 'ignore_unavailable', 'lenient',
'max_concurrent_shard_requests', 'pre_filter_shard_size', 'preference',
'q', 'request_cache', 'routing', 'scroll', 'search_type', 'size',
'sort', 'stats', 'stored_fields', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'track_total_hits', 'typed_keys', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg batched_reduce_size: The number of shard results that should be
reduced at once on the coordinating node. This value should be used
as a protection mechanism to reduce the memory overhead per search
request if the potential number of shards in the request can be
large., default 512
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg docvalue_fields: A comma-separated list of fields to return as the
docvalue representation of a field for each hit
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg from\\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg max_concurrent_shard_requests: The number of concurrent shard
requests this search executes concurrently. This value should be
used to limit the impact of the search on the cluster in order to
limit the number of concurrent shard requests, default 'The default
grows with the number of nodes in the cluster but is at most 256.'
:arg pre_filter_shard_size: A threshold that enforces a pre-filter
roundtrip to prefilter search shards based on query rewriting if
the number of shards the search request expands to exceeds the
threshold. This filter roundtrip can limit the number of shards
significantly if for instance a shard can not match any documents
based on it's rewrite method ie. if date filters are mandatory to
match but the shard bounds and the query are disjoint., default 128
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg stored_fields: A comma-separated list of stored fields to return as
part of a hit
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode, default 'missing', valid
choices are: 'missing', 'popular', 'always'
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Explicit operation timeout
:arg track_scores: Whether to calculate and return scores even if they
are not used for sorting
:arg track_total_hits: Indicate if the number of documents that match
the query should be tracked
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
:arg version: Specify whether to return document version as part of a
hit
"""
# from is a reserved word so it cannot be used, use from_ instead
if 'from_' in params:
params['from'] = params.pop('from_')
if doc_type and not index:
index = '_all'
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_search'), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'conflicts',
'default_operator', 'df', 'expand_wildcards', 'from_',
'ignore_unavailable', 'lenient', 'pipeline', 'preference', 'q',
'refresh', 'request_cache', 'requests_per_second', 'routing', 'scroll',
'scroll_size', 'search_timeout', 'search_type', 'size', 'slices',
'sort', 'stats', 'terminate_after', 'timeout', 'version',
'version_type', 'wait_for_active_shards', 'wait_for_completion')
def update_by_query(self, index, doc_type=None, body=None, params=None):
"""
Perform an update on all documents matching a query.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg conflicts: What to do when the update by query hits version
conflicts?, default 'abort', valid choices are: 'abort', 'proceed'
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg from\\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg pipeline: Ingest pipeline to set on index requests made by this
action. (default: none)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg refresh: Should the effected indexes be refreshed?
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg requests_per_second: The throttle to set on this request in sub-
requests per second. -1 means no throttle., default 0
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg scroll_size: Size on the scroll request powering the
update_by_query
:arg search_timeout: Explicit timeout for each search request. Defaults
to no timeout.
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg slices: The number of slices this task should be divided into.
Defaults to 1 meaning the task isn't sliced into subtasks., default
1
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Time each individual bulk request should wait for shards
that are unavailable., default '1m'
:arg version: Specify whether to return document version as part of a
hit
:arg version_type: Should the document increment the version number
(internal) on hit or not (reindex)
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the update by query operation.
Defaults to 1, meaning the primary shard only. Set to `all` for all
shard copies, otherwise set to any non-negative value less than or
equal to the total number of copies for the shard (number of
replicas + 1)
:arg wait_for_completion: Should the request should block until the
update by query operation is complete., default True
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('POST', _make_path(index,
doc_type, '_update_by_query'), params=params, body=body)
@query_params('refresh', 'requests_per_second', 'slices', 'timeout',
'wait_for_active_shards', 'wait_for_completion')
def reindex(self, body, params=None):
"""
Reindex all documents from one index to another.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html>`_
:arg body: The search definition using the Query DSL and the prototype
for the index request.
:arg refresh: Should the effected indexes be refreshed?
:arg requests_per_second: The throttle to set on this request in sub-
requests per second. -1 means no throttle., default 0
:arg slices: The number of slices this task should be divided into.
Defaults to 1 meaning the task isn't sliced into subtasks., default
1
:arg timeout: Time each individual bulk request should wait for shards
that are unavailable., default '1m'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the reindex operation. Defaults to
1, meaning the primary shard only. Set to `all` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
:arg wait_for_completion: Should the request should block until the
reindex is complete., default True
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', '/_reindex',
params=params, body=body)
@query_params('requests_per_second')
def reindex_rethrottle(self, task_id=None, params=None):
"""
Change the value of ``requests_per_second`` of a running ``reindex`` task.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html>`_
:arg task_id: The task id to rethrottle
:arg requests_per_second: The throttle to set on this request in
floating sub-requests per second. -1 means set no throttle.
"""
return self.transport.perform_request('POST', _make_path('_reindex',
task_id, '_rethrottle'), params=params)
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'conflicts',
'default_operator', 'df', 'expand_wildcards', 'from_',
'ignore_unavailable', 'lenient', 'preference', 'q', 'refresh',
'request_cache', 'requests_per_second', 'routing', 'scroll',
'scroll_size', 'search_timeout', 'search_type', 'size', 'slices',
'sort', 'stats', 'terminate_after', 'timeout', 'version',
'wait_for_active_shards', 'wait_for_completion')
def delete_by_query(self, index, body, doc_type=None, params=None):
"""
Delete all documents matching a query.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg body: The search definition using the Query DSL
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg conflicts: What to do when the delete-by-query hits version
conflicts?, default 'abort', valid choices are: 'abort', 'proceed'
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg from\\_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg refresh: Should the effected indexes be refreshed?
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg requests_per_second: The throttle for this request in sub-requests
per second. -1 means no throttle., default 0
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg scroll_size: Size on the scroll request powering the
update_by_query
:arg search_timeout: Explicit timeout for each search request. Defaults
to no timeout.
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch'
:arg size: Number of hits to return (default: 10)
:arg slices: The number of slices this task should be divided into.
Defaults to 1 meaning the task isn't sliced into subtasks., default
1
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Time each individual bulk request should wait for shards
that are unavailable., default '1m'
:arg version: Specify whether to return document version as part of a
hit
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the delete by query operation.
Defaults to 1, meaning the primary shard only. Set to `all` for all
shard copies, otherwise set to any non-negative value less than or
equal to the total number of copies for the shard (number of
replicas + 1)
:arg wait_for_completion: Should the request should block until the
delete-by-query is complete., default True
"""
for param in (index, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path(index,
doc_type, '_delete_by_query'), params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local', 'preference', 'routing')
def search_shards(self, index=None, doc_type=None, params=None):
"""
The search shards api returns the indices and shards that a search
request would be executed against. This can give useful feedback for working
out issues or planning optimizations with routing and shard preferences.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
"""
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_search_shards'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'explain',
'ignore_unavailable', 'preference', 'profile', 'routing', 'scroll',
'search_type', 'typed_keys')
def search_template(self, index=None, doc_type=None, body=None, params=None):
"""
A query that accepts a query template and a map of key/value pairs to
fill in template parameters.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg body: The search definition template and its params
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg profile: Specify whether to profile the query execution
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch',
'dfs_query_and_fetch'
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
"""
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_search', 'template'), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include',
'analyze_wildcard', 'analyzer', 'default_operator', 'df', 'lenient',
'parent', 'preference', 'q', 'routing', 'stored_fields')
def explain(self, index, doc_type, id, body=None, params=None):
"""
The explain api computes a score explanation for a query and a specific
document. This can give useful feedback whether a document matches or
didn't match a specific query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg body: The query definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg analyze_wildcard: Specify whether wildcards and prefix queries in
the query string query should be analyzed (default: false)
:arg analyzer: The analyzer for the query string query
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The default field for query string query (default: _all)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_explain'), params=params, body=body)
@query_params('scroll')
def scroll(self, scroll_id=None, body=None, params=None):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg body: The scroll ID if not passed by URL or query parameter.
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
raise ValueError("You need to supply scroll_id or body.")
elif scroll_id and not body:
body = {'scroll_id':scroll_id}
elif scroll_id:
params['scroll_id'] = scroll_id
return self.transport.perform_request('GET', '/_search/scroll',
params=params, body=body)
@query_params()
def clear_scroll(self, scroll_id=None, body=None, params=None):
"""
Clear the scroll request created by specifying the scroll parameter to
search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: A comma-separated list of scroll IDs to clear
:arg body: A comma-separated list of scroll IDs to clear if none was
specified via the scroll_id parameter
"""
if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
raise ValueError("You need to supply scroll_id or body.")
elif scroll_id and not body:
body = {'scroll_id':[scroll_id]}
elif scroll_id:
params['scroll_id'] = scroll_id
return self.transport.perform_request('DELETE', '/_search/scroll',
params=params, body=body)
@query_params('parent', 'refresh', 'routing', 'timeout', 'version',
'version_type', 'wait_for_active_shards')
def delete(self, index, doc_type, id, params=None):
"""
Delete a typed JSON document from a specific index based on its id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg parent: ID of parent document
:arg refresh: If `true` then refresh the effected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the delete operation. Defaults to
1, meaning the primary shard only. Set to `all` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('DELETE', _make_path(index,
doc_type, id), params=params)
@query_params('allow_no_indices', 'analyze_wildcard', 'analyzer',
'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable',
'lenient', 'min_score', 'preference', 'q', 'routing', 'terminate_after')
def count(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a query and get the number of matches for that query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_
:arg index: A comma-separated list of indices to restrict the results
:arg doc_type: A comma-separated list of types to restrict the results
:arg body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
if doc_type and not index:
index = '_all'
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_count'), params=params, body=body)
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'pipeline', 'refresh', 'routing', 'timeout', 'wait_for_active_shards')
def bulk(self, body, index=None, doc_type=None, params=None):
"""
Perform many index/delete operations in a single API call.
See the :func:`~elasticsearch.helpers.bulk` helper function for a more
friendly API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html>`_
:arg body: The operation definition and data (action-data pairs),
separated by newlines
:arg index: Default index for items which don't provide one
:arg doc_type: Default document type for items which don't provide one
:arg _source: True or false to return the _source field or not, or
default list of fields to return, can be overridden on each sub-
request
:arg _source_exclude: Default list of fields to exclude from the
returned _source field, can be overridden on each sub-request
:arg _source_include: Default list of fields to extract and return from
the _source field, can be overridden on each sub-request
:arg fields: Default comma-separated list of fields to return in the
response for updates, can be overridden on each sub-request
:arg pipeline: The pipeline id to preprocess incoming documents with
:arg refresh: If `true` then refresh the effected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the bulk operation. Defaults to 1,
meaning the primary shard only. Set to `all` for all shard copies,
otherwise set to any non-negative value less than or equal to the
total number of copies for the shard (number of replicas + 1)
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', _make_path(index,
doc_type, '_bulk'), params=params, body=self._bulk_body(body),
headers={'content-type': 'application/x-ndjson'})
@query_params('max_concurrent_searches', 'pre_filter_shard_size',
'search_type', 'typed_keys')
def msearch(self, body, index=None, doc_type=None, params=None):
"""
Execute several search requests within the same API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html>`_
:arg body: The request definitions (metadata-search request definition
pairs), separated by newlines
:arg index: A comma-separated list of index names to use as default
:arg doc_type: A comma-separated list of document types to use as
default
:arg max_concurrent_searches: Controls the maximum number of concurrent
searches the multi search api will execute
:arg pre_filter_shard_size: A threshold that enforces a pre-filter
roundtrip to prefilter search shards based on query rewriting if
the number of shards the search request expands to exceeds the
threshold. This filter roundtrip can limit the number of shards
significantly if for instance a shard can not match any documents
based on it's rewrite method ie. if date filters are mandatory to
match but the shard bounds and the query are disjoint., default 128
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch',
'dfs_query_and_fetch'
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_msearch'), params=params, body=self._bulk_body(body),
headers={'content-type': 'application/x-ndjson'})
@query_params('field_statistics', 'fields', 'offsets', 'parent', 'payloads',
'positions', 'preference', 'realtime', 'routing', 'term_statistics',
'version', 'version_type')
def termvectors(self, index, doc_type, id=None, body=None, params=None):
"""
Returns information and statistics on terms in the fields of a
particular document. The document could be stored in the index or
artificially provided by the user (Added in 1.4). Note that for
documents stored in the index, this is a near realtime API as the term
vectors are not available until the next refresh.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html>`_
:arg index: The index in which the document resides.
:arg doc_type: The type of the document.
:arg id: The id of the document, when not specified a doc param should
be supplied.
:arg body: Define parameters and or supply a document to get termvectors
for. See documentation.
:arg field_statistics: Specifies if document count, sum of document
frequencies and sum of total term frequencies should be returned.,
default True
:arg fields: A comma-separated list of fields to return.
:arg offsets: Specifies if term offsets should be returned., default
True
:arg parent: Parent id of documents.
:arg payloads: Specifies if term payloads should be returned., default
True
:arg positions: Specifies if term positions should be returned., default
True
:arg preference: Specify the node or shard the operation should be
performed on (default: random).
:arg realtime: Specifies if request is real-time as opposed to near-
real-time (default: true).
:arg routing: Specific routing value.
:arg term_statistics: Specifies if total term frequency and document
frequency should be returned., default False
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_termvectors'), params=params, body=body)
@query_params('field_statistics', 'fields', 'ids', 'offsets', 'parent',
'payloads', 'positions', 'preference', 'realtime', 'routing',
'term_statistics', 'version', 'version_type')
def mtermvectors(self, index=None, doc_type=None, body=None, params=None):
"""
Multi termvectors API allows to get multiple termvectors based on an
index, type and id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html>`_
:arg index: The index in which the document resides.
:arg doc_type: The type of the document.
:arg body: Define ids, documents, parameters or a list of parameters per
document here. You must at least provide a list of document ids. See
documentation.
:arg field_statistics: Specifies if document count, sum of document
frequencies and sum of total term frequencies should be returned.
Applies to all returned documents unless otherwise specified in body
"params" or "docs"., default True
:arg fields: A comma-separated list of fields to return. Applies to all
returned documents unless otherwise specified in body "params" or
"docs".
:arg ids: A comma-separated list of documents ids. You must define ids
as parameter or set "ids" or "docs" in the request body
:arg offsets: Specifies if term offsets should be returned. Applies to
all returned documents unless otherwise specified in body "params"
or "docs"., default True
:arg parent: Parent id of documents. Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg payloads: Specifies if term payloads should be returned. Applies to
all returned documents unless otherwise specified in body "params"
or "docs"., default True
:arg positions: Specifies if term positions should be returned. Applies
to all returned documents unless otherwise specified in body
"params" or "docs"., default True
:arg preference: Specify the node or shard the operation should be
performed on (default: random) .Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg realtime: Specifies if requests are real-time as opposed to near-
real-time (default: true).
:arg routing: Specific routing value. Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg term_statistics: Specifies if total term frequency and document
frequency should be returned. Applies to all returned documents
unless otherwise specified in body "params" or "docs"., default
False
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_mtermvectors'), params=params, body=body)
@query_params()
def put_script(self, id, body, context=None, params=None):
"""
Create a script in given language with specified ID.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html>`_
:arg id: Script ID
:arg body: The document
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_scripts', id,
context), params=params, body=body)
@query_params()
def get_script(self, id, params=None):
"""
Retrieve a script from the API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html>`_
:arg id: Script ID
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request('GET', _make_path('_scripts', id),
params=params)
@query_params()
def put_template(self, id, body, params=None):
"""
Create a search template.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: Template ID
:arg body: The document
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_search',
'template', id), params=params, body=body)
@query_params()
def get_template(self, id, params=None):
"""
Retrieve a search template.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: Template ID
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request('GET', _make_path('_search',
'template', id), params=params)
@query_params()
def delete_script(self, id, params=None):
"""
Remove a stored script from elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html>`_
:arg id: Script ID
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request('DELETE', _make_path('_scripts',
id), params=params)
@query_params()
def render_search_template(self, id=None, body=None, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: The id of the stored search template
:arg body: The search definition template and its params
"""
return self.transport.perform_request('GET', _make_path('_render',
'template', id), params=params, body=body)
@query_params('max_concurrent_searches', 'search_type', 'typed_keys')
def msearch_template(self, body, index=None, doc_type=None, params=None):
"""
The /_search/template endpoint allows to use the mustache language to
pre render search requests, before they are executed and fill existing
templates with template parameters.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg body: The request definitions (metadata-search request definition
pairs), separated by newlines
:arg index: A comma-separated list of index names to use as default
:arg doc_type: A comma-separated list of document types to use as
default
:arg max_concurrent_searches: Controls the maximum number of concurrent
searches the multi search api will execute
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch',
'dfs_query_and_fetch'
:arg typed_keys: Specify whether aggregation and suggester names should
be prefixed by their respective types in the response
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index, doc_type,
'_msearch', 'template'), params=params, body=self._bulk_body(body),
headers={'content-type': 'application/x-ndjson'})
@query_params('allow_no_indices', 'expand_wildcards', 'fields',
'ignore_unavailable')
def field_caps(self, index=None, body=None, params=None):
"""
The field capabilities API allows to retrieve the capabilities of fields among multiple indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg body: Field json objects containing an array of field names
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg fields: A comma-separated list of field names
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
return self.transport.perform_request('GET', _make_path(index,
'_field_caps'), params=params, body=body)
| 52.731776
| 104
| 0.655241
|
37979de98e2fa371057dca67c0dcfd17b938f6ff
| 101
|
py
|
Python
|
python/testData/mover/multiLineSelection3_afterDown.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/mover/multiLineSelection3_afterDown.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/mover/multiLineSelection3_afterDown.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Test(object):
def q(self):
pass
<selection> a = 1
b = 2
<caret></selection>
| 14.428571
| 20
| 0.534653
|
7665efd14219da92035509943129d77c014c9aab
| 77
|
py
|
Python
|
asynced/__init__.py
|
jorenham/asynced
|
4b406ad9626078e903fd012d3bcc40f4e27af41d
|
[
"MIT"
] | 1
|
2022-03-15T19:56:13.000Z
|
2022-03-15T19:56:13.000Z
|
asynced/__init__.py
|
jorenham/asynced
|
4b406ad9626078e903fd012d3bcc40f4e27af41d
|
[
"MIT"
] | null | null | null |
asynced/__init__.py
|
jorenham/asynced
|
4b406ad9626078e903fd012d3bcc40f4e27af41d
|
[
"MIT"
] | null | null | null |
from .asyncio_utils import *
from .exceptions import *
from .states import *
| 19.25
| 28
| 0.766234
|
67338f5e3dd01ded464c34f24bf084239650ec9e
| 27,551
|
py
|
Python
|
digit.py
|
bsciolla/matchit_digit
|
147d7570a4856b2a594cf872f1535875c7f0fa98
|
[
"Apache-2.0"
] | null | null | null |
digit.py
|
bsciolla/matchit_digit
|
147d7570a4856b2a594cf872f1535875c7f0fa98
|
[
"Apache-2.0"
] | null | null | null |
digit.py
|
bsciolla/matchit_digit
|
147d7570a4856b2a594cf872f1535875c7f0fa98
|
[
"Apache-2.0"
] | 1
|
2022-03-22T07:38:40.000Z
|
2022-03-22T07:38:40.000Z
|
#!/usr/bin/env python
# Import Modules
import os
import pygame
import copy
import sys
import numpy
import math
from pygame.locals import *
from pygame.compat import geterror
import match_func
import sound
import imageloader
from spriteclass import *
from speedseq import Speedseq
# functions to create our resources
from globals import DELTAX, DELTAY, ANIMRATE, SLOWED_TIME
from globals import MATCH_VIEWX, MATCH_VIEWY
from globals import DEATHMODE, XSTART, YSTART, SCROLLING_DEATHX
from globals import HSCREEN, VSCREEN, HBLOCK, VBLOCK
from globals import LARGE_TIME, FADE_WAIT, KEYBOARD_WAIT, PROBA_HIT, DELAY_HURT, SCORING_ROW_TIME, COMBOVALIDATED, FACTORDISPLAYCOMBO
from globals import NKEYS
from globals import HITBOXX, HITBOXY
from globals import SPEEDSEQ
# Ellipsoidal patch for matching search
PATCH = numpy.zeros((MATCH_VIEWY*2, MATCH_VIEWX*2))
for j in range(PATCH.shape[0]):
for i in range(PATCH.shape[1]):
if (i-MATCH_VIEWX+0.5)**2.0/MATCH_VIEWX**2.0 +\
(j-MATCH_VIEWY+0.5)**2/MATCH_VIEWY**2.0 <= 1:
PATCH[j, i] = 1
SCROLLING_MINX = 200 if DEATHMODE == 0 else 1
SCROLLING_MAXX = HSCREEN - 200
SCROLLING_MINY = 200
SCROLLING_MAXY = VSCREEN - 200
assert(DEATHMODE == 0 or SCROLLING_DEATHX > SCROLLING_MINX)
fontsize = 30
def get_ticks():
return(pygame.time.get_ticks())
if not pygame.font:
print('Warning, fonts disabled')
if not pygame.mixer:
print('Warning, sound disabled')
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, 'data')
def coord_to_tiles(x, y):
cx = HSCREEN/2.0 - (HBLOCK/2.0) * DELTAX
cy = VSCREEN/2.0 - (VBLOCK/2.0) * DELTAY
i = (int)((float)(x-cx)/(float)(DELTAX))
j = (int)((float)(y-cy)/(float)(DELTAY))
return i, j
def tiles_to_coord(i, j):
return CX + i*DELTAX, CY + j*DELTAY
CX = HSCREEN/2.0 - HBLOCK/2.0 * DELTAX
CY = VSCREEN/2.0 - VBLOCK/2.0 * DELTAY
class MotionHandler():
def __init__(self):
self.factor = 1
self.timer = 0
def slowing(self):
self.factor = 0.3
self.timer = get_ticks()
def refresh(self):
if get_ticks() - self.timer > SLOWED_TIME:
self.factor = 1
class Companion():
def __init__(self, sprite):
self.sprite = sprite
self.x = -100
self.y = -100
self.startVisible = 0
def SetPosition(self, board, isAbove, x, y):
angular_speed = 3.14/200.
if isAbove:
if self.startVisible == 0:
self.startVisible = get_ticks()
if get_ticks() - self.startVisible < 1500:
radiusFactor = 1.0 - (get_ticks() - self.startVisible)/1500.0
self.sprite.rect.center = \
(x + board.SCROLLX + radiusFactor * 10 * math.cos(get_ticks() * angular_speed),
y + board.SCROLLY + radiusFactor * 4 * math.sin(get_ticks() * angular_speed))
else:
self.sprite.rect.center = (x + board.SCROLLX, \
y + board.SCROLLY)
else:
self.sprite.rect.center = (-100, -100)
self.startVisible = 0
class Hero():
def __init__(self, board, hero_loaded, lumina_loaded):
if DEATHMODE == 1:
self.x = CX + 0.4 + XSTART*DELTAX + board.SCROLLX
self.y = CY + YSTART*DELTAY + board.SCROLLY
else:
self.x = CX + HBLOCK/2 + 1 + 10 * HBLOCK
self.y = CY
self.targetX = self.x
self.targetY = self.y
self.vx = 0
self.vy = 0
self.speedseq = []
self.motionHandler = MotionHandler()
for i in range(4):
self.speedseq.append(Speedseq())
self.sprite = SpriteHero(hero_loaded)
self.skin = 0
self.SpeedMode = 4
sprite = SpriteCompanion(lumina_loaded[0])
self.companion1 = Companion(sprite)
# self.companion2 = SpriteCompanion("lumina.png")
def MoveTarget(self, deltax, deltay):
self.targetX = self.targetX + DELTAX * deltax
self.targetY = self.targetY + DELTAY * deltay
def updatecompanions(self, board):
self.companion1.SetPosition(board, board.scoring.IsAbove(0.5), self.x, self.y)
# self.companion2.rect.center = \
# (self.x + board.SCROLLX + MATCH_VIEWX*DELTAX*math.cos(get_ticks()*angular_speed + math.pi),
# self.y + board.SCROLLY + MATCH_VIEWY*DELTAY*math.sin(get_ticks()*angular_speed + math.pi))
def updateposition(self, board):
# scrolling
if DEATHMODE == 1 and board.playing is True:
if self.x + board.SCROLLX < SCROLLING_DEATHX:
print("DEAD")
if self.x + board.SCROLLX < SCROLLING_MINX:
board.scrolling(-(self.x + board.SCROLLX - SCROLLING_MINX), 0)
if self.x + board.SCROLLX > SCROLLING_MAXX:
board.scrolling(-(self.x + board.SCROLLX - SCROLLING_MAXX), 0)
if self.y + board.SCROLLY < SCROLLING_MINY:
board.scrolling(0, -(self.y + board.SCROLLY - SCROLLING_MINY))
if self.y + board.SCROLLY > SCROLLING_MAXY:
board.scrolling(0, -(self.y + board.SCROLLY - SCROLLING_MAXY))
self.updateposition_nockeck(board)
def updateposition_nockeck(self, board):
self.sprite.rect.center = \
(self.x + board.SCROLLX, self.y + board.SCROLLY)
self.updatecompanions(board)
def TargetReached(self):
return (self.x == self.targetX) and (self.y == self.targetY)
def get_speed(self, dx, dy):
if self.SpeedMode == 4:
jumpFactor = 0.35
if abs(self.targetX - self.x) + abs(self.targetY - self.y) < 10:
jumpFactor = 1
self.previousx = self.targetX
self.previousy = self.targetY
return (jumpFactor * (self.targetX - self.x),
jumpFactor * (self.targetY - self.y))
if self.SpeedMode == 0:
return(self.speedseq[2].speed*self.motionHandler.factor -
self.speedseq[3].speed*self.motionHandler.factor,
self.speedseq[1].speed*self.motionHandler.factor -
self.speedseq[0].speed*self.motionHandler.factor)
if self.SpeedMode == 2:
return(self.speedseq[2].speed*self.motionHandler.factor * 2 -
self.speedseq[3].speed*self.motionHandler.factor * 2,
self.speedseq[1].speed*self.motionHandler.factor * 2 -
self.speedseq[0].speed*self.motionHandler.factor * 2)
if dy == -1:
return(0, -DELTAX)
if dy == 1:
return(0, DELTAX)
if dx == -1:
return(-DELTAY, 0)
if dx == 1:
return(DELTAY, 0)
return(0, 0)
def accelerate(self, dx, dy):
if self.motionHandler.factor<1:
return
if dy == -1:
self.speedseq[0].increase_speed()
if dy == 1:
self.speedseq[1].increase_speed()
if dx == 1:
self.speedseq[2].increase_speed()
if dx == -1:
self.speedseq[3].increase_speed()
return
def stopping(self, dx, dy):
self.targetX = self.x
self.targetY = self.y
if dx != 0:
self.speedseq[2].reset_speed()
self.speedseq[3].reset_speed()
if dy != 0:
self.speedseq[0].reset_speed()
self.speedseq[1].reset_speed()
def recover_position(self, i, j):
x, y = tiles_to_coord(i, j)
self.x = self.previousx
self.y = self.previousy
self.targetX = self.previousx
self.targetY = self.previousy
def moving(self, dx, dy):
#self.accelerate(dx, dy)
deltax, deltay = self.get_speed(dx, dy)
self.x = self.x + deltax
self.y = self.y + deltay
self.motionHandler.refresh()
def moving_digging(self, dx, dy):
self.motionHandler.slowing()
if dy == -1:
self.speedseq[0].cut_speed(3)
if dy == 1:
self.speedseq[1].cut_speed(3)
if dx == 1:
self.speedseq[2].cut_speed(3)
if dx == -1:
self.speedseq[3].cut_speed(3)
deltax, deltay = self.get_speed(dx, dy)
self.x = self.x + deltax
self.y = self.y + deltay
def ChangeSkin(self, skin):
self.skin = skin
self.sprite.ChangeSkin(self.skin)
class Leveling():
def __init__(self):
self.factor = 1
self.level = 0
self.skin = 0
def GetSkin(self):
return (int)(self.level/2)
def UpdateSkin(self):
changeskin = (self.GetSkin() != self.skin)
self.skin = self.GetSkin()
return(changeskin)
def IncreaseLevel(self):
self.level += 1
self.factor = 1 + self.level * 0.2
print("Leveling to ", self.level)
return(self.UpdateSkin())
def DecreaseLevel(self):
self.level -= 1
if self.level <= 0:
self.level = 0
self.factor = 1 + self.level * 0.2
print("Down a level ", self.level)
return(self.UpdateSkin())
class Scoring():
def __init__(self):
self.health = 100
self.combo_timer = get_ticks()
self.in_a_row = 0
self.deathscroll = 0.1
self.deathtimer = get_ticks()
#self.factor = 1
self.timer_hurt = get_ticks()
self.comboscore = 0
self.level = Leveling()
self.hittimes = []
def empty_hit(self):
if numpy.random.rand() <= PROBA_HIT and get_ticks() - self.timer_hurt > DELAY_HURT :
self.comboscore = self.comboscore - 20
if self.comboscore < 0:
self.comboscore = 0
#self.level.DecreaseLevel()
self.health = self.health - numpy.random.randint(30) - numpy.random.randint(30) - numpy.random.randint(30)
if self.health < 0:
self.dying()
self.timer_hurt = get_ticks()
return True
return False
def IsAbove(self, proportion):
return self.comboscore >= COMBOVALIDATED * proportion
def dying(self):
self.level.DecreaseLevel()
self.health = 100
def out_of_time(self):
if self.comboscore > 0:
self.comboscore = self.comboscore - 0.03*self.level.factor
if self.comboscore <= 0:
return True
return False
def combo_straight(self, move):
self.hittimes.append(get_ticks())
if len(self.hittimes) < 5:
return False
self.hittimes = self.hittimes[-5:]
if move.GetOldestKeyPressTime() - 20 < self.hittimes[-5]:
self.health = self.health + 3*self.level.factor
self.level.IncreaseLevel()
# Two more to get next combo
move.ResetKeyPressTime(self.hittimes[-3])
return True
return False
def combo_quick(self):
self.comboscore = self.comboscore + 10
if self.comboscore > COMBOVALIDATED:
self.comboscore = 0
self.level.IncreaseLevel()
self.health = self.health + 3*self.level.factor
return True
return False
def increase_deathscroll(self):
if get_ticks() - self.deathtimer > 1000:
self.deathscroll += 0.01
self.deathtimer = get_ticks()
def GetSkin():
return(self.level.skin)
class MenuLife(Scoring):
def __init__(self, group, board):
Scoring.__init__(self)
self.spritelist = []
self.group = group
img, rect = imageloader.load_image("life.png", -1)
for i in range(20):
sprite = SpriteTool(img)
self.spritelist.append(sprite)
group.add(sprite)
sprite.place(10*i , 10)
self.combosprite = ComboSprite(board.images.fire_loaded, group)
self.combosprite2 = ComboSprite(board.images.fire_loaded, group, COMBOX)
self.combosprite3 = ComboSprite(board.images.fire_loaded, group, COMBOX + COMBOVALIDATED * FACTORDISPLAYCOMBO)
self.combosprite2.timer += 100
self.combosprite3.timer += 200
def show(self):
j = (int)(self.health/100*20)
if j < 0:
j = 0
if j > 20:
j = 20
[self.spritelist[i].remove(self.group) for i in range(j,20)]
[self.spritelist[i].add(self.group) for i in range(0,j)]
self.combosprite.score = self.comboscore
class Move():
# UP, DOWN, RIGHT, LEFT: 273, 274, 275, 276
def __init__(self):
self.push = numpy.array([0, 0, 0, 0])
self.when = numpy.array([1, 1, 1, 1])*(-1)
self.whenpressed = numpy.array([1, 1, 1, 1])*(-1)
def key_up(self, key, hero):
if not(self.is_a_move(key)):
return
self.push[key-273] = 0
self.when[key-273] = -1
self.whenpressed[key-273] = -1
print("up " ,key)
#if key == K_UP or key == K_DOWN:
# hero.stopping(0, 1)
#if key == K_LEFT or key == K_RIGHT:
# hero.stopping(1, 0)
def key_down(self, key, hero):
if not(self.is_a_move(key)):
return
self.push[key-273] = 1
self.when[key-273] = get_ticks()
self.whenpressed[key-273] = get_ticks()
if key == K_UP:
hero.vy = -5
hero.MoveTarget(0, -1)
if key == K_DOWN:
hero.vy = 5
hero.MoveTarget(0, 1)
if key == K_LEFT:
hero.vx = -5
hero.MoveTarget(-1, 0)
if key == K_RIGHT:
hero.vx = 5
hero.MoveTarget(1, 0)
def MoveRoutine(self, board):
if (get_ticks() - numpy.max(self.when)) > KEYBOARD_WAIT \
and (not numpy.all(self.push == 0)) and board.hero.TargetReached():
keypress = numpy.argmax(self.when)
if self.push[keypress] == 1:
self.key_down(keypress + 273, board.hero)
maxim = self.when.copy()
maxim[maxim<0] = numpy.max(self.when.copy()) + 1
key = numpy.argmin(maxim) + 273
if (get_ticks() - self.when[key-273]) > KEYBOARD_WAIT:
board.moveHero(key)
self.when[key-273] = get_ticks()
def is_a_move(self, key):
return(key == K_LEFT or key == K_RIGHT or
key == K_UP or key == K_DOWN)
def GetOldestKeyPressTime(self):
return(max(self.whenpressed))
def ResetKeyPressTime(self, timereset):
self.whenpressed[self.push==1] = timereset
class Board():
def __init__(self, images, nkeys=NKEYS):
self.images = images
self.blocks_loaded = self.images.blocks_loaded
self.anim_loaded = self.images.anim_loaded
self.playing = False
self.SCROLLX = 0
self.SCROLLY = 0
self.tiles = numpy.random.randint(0, nkeys, size=(VBLOCK, HBLOCK))
if DEATHMODE == 0:
self.tiles[0, :] = -1
self.tiles[-1, :] = -1
self.tiles[-2, :] = -1
self.tiles[:, 0] = -1
self.tiles[:, -1] = -1
self.tiles[:, -2] = -1
self.tilesid = numpy.zeros((VBLOCK, HBLOCK))
if DEATHMODE == 1:
self.tiles[YSTART-5:YSTART+5, XSTART-5:XSTART+5] = -1
self.hour = get_ticks()
self.hero = Hero(self, images.hero_loaded, images.lumina_loaded)
self.sound = sound.Sound()
self.spritegroup = pygame.sprite.Group()
self.spritegroup_other = pygame.sprite.Group()
self.scoring = MenuLife(self.spritegroup_other, self)
self.spritegroup_other.add(self.hero.sprite)
self.spritegroup_other.add(self.hero.companion1.sprite)
# self.spritegroup_other.add(self.hero.companion2)
self.explosiontiles = []
self.build_blocks_sprites()
self.place_tiles()
self.hero.updateposition(self)
self.move = Move()
def build_blocks_sprites(self):
self.spritelist = []
current_id = 0
for j in range(VBLOCK):
for i in range(HBLOCK):
if self.tiles[j, i] != -1:
self.tilesid[j, i] = current_id
current_id = current_id + 1
img, rect = self.blocks_loaded[self.tiles[j, i]]
self.spritelist.append(SpriteTile(img, rect))
# SpriteTile(tilenames[self.tiles[j,i]]) )
self.spritelist[-1].add(self.spritegroup)
self.tilesid = self.tilesid.astype(int)
def haveOnlyVisibleSpritesOnScreen(self):
return
i, j = coord_to_tiles(self.hero.x, self.hero.y)
for sprite in self.spritelist:
k, l = coord_to_tiles(sprite.rect.x, sprite.rect.y)
dist_x = (i - k)
dist_y = (j - l)
if math.fabs(dist_x) < 10 or \
math.fabs(dist_y) < 10 and self.tiles[i, j] != -1:
sprite.add(self.spritegroup)
else:
sprite.remove(self.spritegroup)
def scrolling(self, dx, dy):
self.SCROLLX += dx
self.SCROLLY += dy
self.hero.updateposition_nockeck(self)
self.place_tiles()
# used in DEATHMODE
def circular_warping(self):
ibound = (int)((HSCREEN - CX - self.SCROLLX)/DELTAX)
if ibound >= HBLOCK:
# This should be optimized:
# do not recreate ALL sprites but just the last column...
self.perform_circular_warping()
# used in DEATHMODE
def perform_circular_warping(self):
self.tiles[:, 0:-1] = self.tiles[:, 1:]
self.tiles[:, -1] = numpy.random.randint(0, NKEYS, size=(VBLOCK))
self.spritegroup.empty()
self.build_blocks_sprites()
self.scrolling(DELTAX, 0)
self.hero.x = self.hero.x - DELTAX
def Dash(self):
boo = 0
#self.hero.SpeedMode = 1
def Fast(self):
boo = 0
#self.hero.SpeedMode = 2
def moveHero(self, key):
dx = 0
dy = 0
i, j = coord_to_tiles(self.hero.x, self.hero.y)
if (key == K_LEFT):
dx = -1
# if (self.hero.x == 0):
# return
if (key == K_RIGHT):
dx = 1
# if (i >= HBLOCK-1):
# return
if (key == K_UP):
dy = -1
# if (j==0):
# return
if (key == K_DOWN):
dy = 1
# if (j >= VBLOCK-1):
# return
dist_x, dist_y = tiles_to_coord(i, j)
dist_x = self.hero.x - dist_x
dist_y = self.hero.y - dist_y
speed_x, speed_y = self.hero.get_speed(dx, dy)
collision = False
if dx == 1 and dist_x + HITBOXX + speed_x >= 0.5*DELTAX:
collision = True
if dist_y/DELTAY <= 0.5:
k, l = i+1, j
else:
k, l = i+1, j+1
if dx == -1 and dist_x - HITBOXX + speed_x <= 0.5*DELTAX:
collision = True
if dist_y/DELTAY <= 0.5:
k, l = i, j
else:
k, l = i, j+1
if dy == 1 and dist_y + HITBOXY + speed_y >= 0.5*DELTAY:
collision = True
if dist_x/DELTAX <= 0.5:
k, l = i, j+1
else:
k, l = i+1, j+1
if dy == -1 and dist_y - HITBOXY + speed_y <= 0.5*DELTAY:
collision = True
if dist_x/DELTAX <= 0.5:
k, l = i, j
else:
k, l = i+1, j
if collision is True and self.tiles[l, k] == -1:
collision = False
# step in an empty space
# if self.tiles[j+dy,i+dx] == -1:
if collision is False:
self.hero.moving(dx, dy)
self.sound.play_stepsound()
self.hero.updateposition(self)
# Attempt to dig
if collision is True:
# Dig is allowed
if self.find_match_to_one_tile(k, l, i, j):
self.hero.moving_digging(dx, dy)
self.hero.updateposition(self)
cond1 = self.scoring.combo_quick()
cond2 = self.scoring.combo_straight(self.move)
if cond1 is True:
self.sound.play_combosound()
if cond2 is True:
self.sound.play_combostraightsound()
if not(cond1) and not(cond2):
self.sound.play_digsound()
self.sound.play_hitsound()
else:
self.hero.recover_position(i, j)
if self.scoring.empty_hit():
self.sound.play_hurtsound()
else:
self.sound.play_hitsound()
if self.hero.SpeedMode == 1:
self.hero.SpeedMode = 0
self.hero.stopping(1, 1)
def place_tiles(self):
cx = HSCREEN/2.0 - HBLOCK/2.0 * DELTAX
cy = VSCREEN/2.0 - VBLOCK/2.0 * DELTAY
for j in range(VBLOCK):
for i in range(HBLOCK):
if self.tiles[j, i] != -1:
dx = i*DELTAX
dy = j*DELTAY
currentid = self.tilesid[j, i]
self.spritelist[currentid].rect.center = \
(cx + dx + self.SCROLLX, cy + dy + self.SCROLLY)
def find_match_to_one_tile(self, i, j, io, jo):
# reduced view
imin = io - MATCH_VIEWX if io - MATCH_VIEWX >= 0 else 0
imax = io + MATCH_VIEWX if io + MATCH_VIEWX < HBLOCK-1 else HBLOCK - 1
jmin = jo - MATCH_VIEWY if jo - MATCH_VIEWY >= 0 else 0
jmax = jo + MATCH_VIEWY if jo + MATCH_VIEWY < VBLOCK-1 else VBLOCK - 1
# position of the tile of interest in the view
i, j = i - imin, j - jmin
io, jo = io - imin, jo - jmin
local_tiles = copy.deepcopy(self.tiles[jmin:jmax, imin:imax])
# Just use a memorized patch if the view is full.
# Else, search in the square. Can be fixed later or never.
if jmax-jmin == 2*MATCH_VIEWY and imax-imin == 2*MATCH_VIEWX:
local_tiles[PATCH == 0] = -2
idx_list = numpy.argwhere(local_tiles == local_tiles[j, i])
list_match = []
for idx in range(idx_list.shape[0]):
k = idx_list[idx, 1]
l = idx_list[idx, 0]
connect = match_func.find_connection(i, j, k, l, local_tiles)
if connect is True and (i != k or j != l):
list_match.append([k, l])
if list_match == []:
return False
distances = []
for pair in list_match:
distances.append(((i-pair[0])**2.0 + (j-pair[1])**2.0)**0.5)
rank = numpy.argmin(distances)
k = list_match[rank][0]
l = list_match[rank][1]
self.do_digging(i+imin, j+jmin, k+imin, l+jmin)
return True
def do_digging(self, i, j, k, l):
self.group_dig(i, j, 'digged')
self.group_dig(k, l)
def group_dig(self, i, j, tag='not'):
xini, yini = (CX + i*DELTAX + self.SCROLLX,
CY + j*DELTAY + self.SCROLLY)
# Heavy explosion where the character is not!
if tag == 'not':
self.spritegroup_other.add(AnimatedTile(self.anim_loaded,
xini-self.SCROLLX, yini-self.SCROLLY, self))
for number in range(3):
self.add_destroy_sprite(i, j, self.tiles[j, i])
# Add a modest explosion close to the character
if tag == 'digged':
self.spritegroup_other.add(AnimatedTile(self.anim_loaded[:2],
xini-self.SCROLLX, yini-self.SCROLLY, self))
# Remove tile
index = self.tilesid[j, i]
self.spritelist[index].dig(self.spritegroup)
self.tiles[j, i] = -1
def add_destroy_sprite(self, i, j, flavor):
img, rect = self.blocks_loaded[flavor]
xini, yini = (CX + i*DELTAX + self.SCROLLX,
CY + j*DELTAY + self.SCROLLY)
self.explosiontiles.append(ExplosionTile(
img, xini-self.SCROLLX, yini-self.SCROLLY))
self.spritegroup_other.add(self.explosiontiles[-1])
def updateBoard(self):
self.scoring.show()
for t in self.explosiontiles:
t.moving(self)
if self.scoring.out_of_time():
if TAG_NO_PRESSURE == 0:
self.sound.play_outoftimesound()
nextskin = self.scoring.level.skin
if nextskin > self.hero.skin:
self.hero.ChangeSkin(nextskin)
self.sound.play_youpiesound()
if nextskin < self.hero.skin:
self.hero.ChangeSkin(nextskin)
self.sound.play_loselifesound()
def IsFire(key):
return key == K_f
def IsFast(key):
return key == K_d
def GetKeyboardEvents(move, board):
# Handle Input Events
for event in pygame.event.get():
if event.type == KEYDOWN:
move.key_down(event.key, board.hero)
if event.type == KEYUP:
move.key_up(event.key, board.hero)
if event.type == QUIT:
return True
if event.type == KEYDOWN and event.key == K_ESCAPE:
return True
if event.type == KEYDOWN and move.is_a_move(event.key):
board.moveHero(event.key)
if event.type == KEYDOWN and IsFire(event.key):
board.Dash()
if event.type == KEYDOWN and IsFast(event.key):
board.Fast()
if event.type == KEYUP and IsFast(event.key):
board.hero.SpeedMode = 0
return False
def main():
"""this function is called when the program starts.
it initializes everything it needs, then runs in
a loop until the function returns."""
# Initialize Everything
pygame.init()
screen = pygame.display.set_mode((HSCREEN, VSCREEN))
pygame.display.set_caption('Matchit_Digit')
pygame.mouse.set_visible(0)
# Create The Backgound
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((50, 20, 10))
# Create font
if pygame.font:
font = pygame.font.Font(None, fontsize)
# Display The Background
screen.blit(background, (0, 0))
pygame.display.flip()
# Prepare Game Objects
clock = pygame.time.Clock()
images = imageloader.Imageloader()
board = Board(images)
move = board.move
if DEATHMODE == 1:
board.scrolling(500, 0)
board.playing = True
# Main Loop
exitTag = False
while exitTag == False:
clock.tick(30)
exitTag = GetKeyboardEvents(move, board)
move.MoveRoutine(board)
board.scrolling(0, 0)
if DEATHMODE == 1:
board.scoring.increase_deathscroll()
board.scrolling(-board.scoring.deathscroll, 0)
board.circular_warping()
# Draw Everything
board.updateBoard()
board.haveOnlyVisibleSpritesOnScreen()
screen.blit(background, (0, 0))
board.spritegroup.draw(screen)
board.spritegroup_other.update()
board.spritegroup_other.draw(screen)
pygame.display.flip()
pygame.quit()
# Game Over
# this calls the 'main' function when this script is executed
if __name__ == '__main__':
main()
| 30.51052
| 133
| 0.556169
|
65393806c403c795d5c63709abca07232d4c3755
| 8,433
|
py
|
Python
|
examples/tf1_mnist_estimator.py
|
jmsalamy/KungFu
|
063ccaae3c0a2c6411f26d3641da262abd4eeab3
|
[
"Apache-2.0"
] | null | null | null |
examples/tf1_mnist_estimator.py
|
jmsalamy/KungFu
|
063ccaae3c0a2c6411f26d3641da262abd4eeab3
|
[
"Apache-2.0"
] | null | null | null |
examples/tf1_mnist_estimator.py
|
jmsalamy/KungFu
|
063ccaae3c0a2c6411f26d3641da262abd4eeab3
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
from six.moves import urllib
flags = tf.app.flags
flags.DEFINE_string(
'data_dir', './mnist/data', 'Directory where mnist data will be downloaded'
' if the data is not already there')
flags.DEFINE_string('model_dir', './mnist/model',
'Directory where all models are saved')
flags.DEFINE_string('kf_optimizer', 'sync_sgd', 'KungFu optimizer')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_integer('num_epochs', 1, 'Num of batches to train (epochs).')
flags.DEFINE_float('learning_rate', 0.001, 'Learning Rate')
FLAGS = flags.FLAGS
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' %
(magic, f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' %
(magic, f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(labels_file, 1,
header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte',
't10k-labels-idx1-ubyte')
def train_data():
data = train(FLAGS.data_dir)
data = data.cache()
data = data.batch(FLAGS.batch_size)
return data
def eval_data():
data = test(FLAGS.data_dir)
data = data.cache()
data = data.batch(FLAGS.batch_size)
return data
def lenet():
layers = tf.keras.layers
model = tf.keras.Sequential([
layers.Reshape(target_shape=[28, 28, 1], input_shape=(28 * 28, )),
layers.Conv2D(filters=20,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu),
layers.MaxPooling2D(pool_size=[2, 2]),
layers.Conv2D(filters=50,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu),
layers.MaxPool2D(pool_size=[2, 2]),
layers.Flatten(),
layers.Dense(units=500, activation=tf.nn.relu),
layers.Dense(units=10),
])
return model
def model_function(features, labels, mode):
# get the model
model = lenet()
if mode == tf.estimator.ModeKeys.TRAIN:
# pass the input through the model
logits = model(features)
# get the cross-entropy loss and name it
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
tf.identity(loss, 'train_loss')
# record the accuracy and name it
accuracy = tf.metrics.accuracy(labels=labels,
predictions=tf.argmax(logits, axis=1))
tf.identity(accuracy[1], name='train_accuracy')
# use Adam to optimize
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
tf.identity(FLAGS.learning_rate, name='learning_rate')
# KungFu: Wrap the tf.train.optimizer with KungFu optimizers
if FLAGS.kf_optimizer == 'sync_sgd':
from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer
optimizer = SynchronousSGDOptimizer(optimizer)
elif FLAGS.kf_optimizer == 'async_sgd':
from kungfu.tensorflow.optimizers import PairAveragingOptimizer
optimizer = PairAveragingOptimizer(optimizer)
else:
raise RuntimeError('Unknown kungfu optimizer')
# create an estimator spec to optimize the loss
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss,
tf.train.get_or_create_global_step()))
elif mode == tf.estimator.ModeKeys.EVAL:
# pass the input through the model
logits = model(features, training=False)
# get the cross-entropy loss
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
# use the accuracy as a metric
accuracy = tf.metrics.accuracy(labels=labels,
predictions=tf.argmax(logits, axis=1))
# create an estimator spec with the loss and accuracy
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={'accuracy': accuracy})
return estimator_spec
def main(_):
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesHook
hooks = [
BroadcastGlobalVariablesHook(),
tf.train.LoggingTensorHook(['train_accuracy', 'train_loss'],
every_n_iter=10)
]
from kungfu import current_rank
save_checkpoints_secs = None if current_rank() != 0 else 30
config = tf.estimator.RunConfig(
save_checkpoints_secs=save_checkpoints_secs)
mnist_classifier = tf.estimator.Estimator(model_fn=model_function,
model_dir=FLAGS.model_dir,
config=config)
for _ in range(FLAGS.num_epochs):
mnist_classifier.train(
input_fn=train_data,
hooks=hooks,
)
mnist_classifier.evaluate(input_fn=eval_data)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 34.991701
| 82
| 0.620538
|
e8d1d73029317466d13f0f9f7d0311f9c5fe7ca4
| 23,254
|
py
|
Python
|
lib/RunTester/RunTesterServer.py
|
kbaseapps/RunTester
|
21a69a95378a65796bcaae48ed8ae0e26538e012
|
[
"MIT"
] | null | null | null |
lib/RunTester/RunTesterServer.py
|
kbaseapps/RunTester
|
21a69a95378a65796bcaae48ed8ae0e26538e012
|
[
"MIT"
] | null | null | null |
lib/RunTester/RunTesterServer.py
|
kbaseapps/RunTester
|
21a69a95378a65796bcaae48ed8ae0e26538e012
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from RunTester.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'RunTester'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from RunTester.RunTesterImpl import RunTester # noqa @IgnorePep8
impl_RunTester = RunTester(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'RunTester'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_RunTester.run_RunTester,
name='RunTester.run_RunTester',
types=[dict])
self.method_authentication['RunTester.run_RunTester'] = 'required' # noqa
self.rpc_service.add(impl_RunTester.status,
name='RunTester.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'RunTester ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| 36.678233
| 151
| 0.542186
|
2f2c5703fb2c96ad390fe514853fafb4ac59ae64
| 2,979
|
py
|
Python
|
tests/python/contrib/test_hexagon/conftest.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | 1
|
2022-01-29T21:05:36.000Z
|
2022-01-29T21:05:36.000Z
|
tests/python/contrib/test_hexagon/conftest.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | null | null | null |
tests/python/contrib/test_hexagon/conftest.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon testing fixtures used to deduce testing argument
values from testing parameters """
import os
import pytest
import tvm
from tvm import rpc
HEXAGON_TOOLCHAIN = "HEXAGON_TOOLCHAIN"
TVM_TRACKER_HOST = "TVM_TRACKER_HOST"
TVM_TRACKER_PORT = "TVM_TRACKER_PORT"
ANDROID_REMOTE_DIR = "ANDROID_REMOTE_DIR"
ANDROID_SERIAL_NUMBER = "ANDROID_SERIAL_NUMBER"
ADB_SERVER_SOCKET = "ADB_SERVER_SOCKET"
@tvm.testing.fixture
def shape_nhwc(batch, in_channel, in_size):
return (batch, in_size, in_size, in_channel)
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
def requires_hexagon_toolchain(*args):
_requires_hexagon_toolchain = [
pytest.mark.skipif(
os.environ.get(HEXAGON_TOOLCHAIN) == None,
reason=f"Missing environment variable {HEXAGON_TOOLCHAIN}.",
),
]
return _compose(args, _requires_hexagon_toolchain)
@tvm.testing.fixture
def android_serial_number() -> str:
return os.getenv(ANDROID_SERIAL_NUMBER, default=None)
@tvm.testing.fixture
def tvm_tracker_host() -> str:
return os.getenv(TVM_TRACKER_HOST, default=None)
@tvm.testing.fixture
def tvm_tracker_port() -> int:
port = os.getenv(TVM_TRACKER_PORT, default=None)
port = int(port) if port else None
return port
@tvm.testing.fixture
def adb_server_socket() -> str:
return os.getenv(ADB_SERVER_SOCKET, default="tcp:5037")
# If the execution aborts while an RPC server is running, the python
# code that is supposed to shut it dowm will never execute. This will
# keep pytest from terminating (indefinitely), so add a cleanup
# fixture to terminate any still-running servers.
@pytest.fixture(scope="session", autouse=True)
def terminate_rpc_servers():
# Since this is a fixture that runs regardless of whether the
# execution happens on simulator or on target, make sure the
# yield happens every time.
serial = os.environ.get(ANDROID_SERIAL_NUMBER)
yield []
if serial == "simulator":
os.system("ps ax | grep tvm_rpc_x86 | awk '{print $1}' | xargs kill")
| 31.03125
| 77
| 0.731789
|
0cc00bda15f3e9f766cfe028f816c9766ce67673
| 462
|
py
|
Python
|
setup.py
|
zhangshyue/Excutable
|
abe5c7a9fd65f2b7010b48db317a6ba5630a5022
|
[
"MIT"
] | null | null | null |
setup.py
|
zhangshyue/Excutable
|
abe5c7a9fd65f2b7010b48db317a6ba5630a5022
|
[
"MIT"
] | null | null | null |
setup.py
|
zhangshyue/Excutable
|
abe5c7a9fd65f2b7010b48db317a6ba5630a5022
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='listAllTheFile',
packages = find_packages(),
version='0.0.1',
license='MIT',
author="Shiyue Zhang",
author_email="zhangshiyuez@gmail.com",
description="A package for listing file names in directory",
url="https://github.com/zhangshyue/listFile",
entry_points={
'console_scripts': [
'list_all_the_file=listFile.list_file:extract'
]
}
)
| 28.875
| 65
| 0.655844
|
a2ed186c4941bc3d9c00378ba34124a6c0c256ef
| 714
|
py
|
Python
|
examples/reference/models/Line.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 17
|
2020-06-14T03:47:35.000Z
|
2022-03-07T00:25:23.000Z
|
examples/reference/models/Line.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 12
|
2020-07-22T22:40:09.000Z
|
2021-03-17T14:10:27.000Z
|
examples/reference/models/Line.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 8
|
2020-06-14T03:47:23.000Z
|
2021-11-20T15:14:04.000Z
|
import numpy as np
from bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid
from bokeh.models.glyphs import Line
from bokeh.io import curdoc, show
N = 30
x = np.linspace(-2, 2, N)
y = x**2
source = ColumnDataSource(dict(x=x, y=y))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = Line(x="x", y="y", line_color="#f46d43", line_width=6, line_alpha=0.6)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| 22.3125
| 78
| 0.731092
|
835a91cb4891fecbd5eaf674d03cbd51bc422e12
| 9,073
|
py
|
Python
|
telepresence/outbound/container.py
|
getbread/telepresence
|
d800e89675d39c958e4424f597854c995a40feeb
|
[
"Apache-2.0"
] | null | null | null |
telepresence/outbound/container.py
|
getbread/telepresence
|
d800e89675d39c958e4424f597854c995a40feeb
|
[
"Apache-2.0"
] | 1
|
2021-11-23T22:31:02.000Z
|
2021-11-23T22:31:02.000Z
|
telepresence/outbound/container.py
|
getbread/telepresence
|
d800e89675d39c958e4424f597854c995a40feeb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import os.path
import subprocess
from typing import Callable, Dict, List, Optional, Tuple
from telepresence import TELEPRESENCE_LOCAL_IMAGE
from telepresence.cli import PortMapping
from telepresence.connect import SSH
from telepresence.proxy import RemoteInfo
from telepresence.runner import Runner
from telepresence.utilities import find_free_port, random_name
def make_docker_kill(runner: Runner, name: str) -> Callable[[], None]:
"""Return a function that will kill a named docker container."""
def kill() -> None:
runner.check_call(runner.docker("stop", "--time=1", name))
return kill
def parse_docker_args(docker_run: List[str]) -> Tuple[List[str], List[str]]:
"""Separate --publish flags from the rest of the docker arguments"""
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--publish", "-p", action="append", default=[])
publish_ns, docker_args = parser.parse_known_args(docker_run)
publish_args = ["-p={}".format(pub) for pub in publish_ns.publish]
return docker_args, publish_args
def parse_resolv_conf(contents: str) -> List[str]:
"""
Try to extract nameserver, search path, and ndots info from the pod's
resolv.conf file.
"""
res = []
for line in contents.splitlines():
line = line.strip()
if not line:
continue
tokens = line.split()
keyword = tokens[0].lower()
args = tokens[1:]
if keyword == "nameserver":
res.append("--dns={}".format(args[0]))
elif keyword == "search":
for arg in args:
res.append("--dns-search={}".format(arg))
elif keyword == "options":
for arg in args:
res.append("--dns-opt={}".format(arg))
else:
pass # Ignore the rest
return res
def parse_hosts_aliases(contents: str) -> List[str]:
"""
Try to extract IP, and corresponding host names from hosts file for each
hostAlias, and create the corresponding --add-host docker run argument.
"""
res = []
host_alias = False
for line in contents.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
host_alias = line.__contains__("HostAliases")
continue
if host_alias:
tokens = line.split()
ip = tokens[0]
hosts = tokens[1:]
for host in hosts:
res.append("--add-host={}:{}".format(host, ip))
return res
def run_docker_command(
runner: Runner,
remote_info: RemoteInfo,
docker_run: List[str],
expose: PortMapping,
to_pod: List[int],
from_pod: List[int],
container_to_host: PortMapping,
remote_env: Dict[str, str],
docker_host: Optional[str],
ssh: SSH,
mount_dir: Optional[str],
use_docker_mount: Optional[bool],
pod_info: Dict[str, str],
exclude_proxy: List[str],
host_ip: str,
) -> "subprocess.Popen[bytes]":
"""
--docker-run support.
Connect using sshuttle running in a Docker container, and then run user
container.
:param remote_env: Dictionary with environment on remote pod.
:param mount_dir: Path to local directory where remote pod's filesystem is
mounted.
"""
# Update environment:
remote_env["TELEPRESENCE_METHOD"] = "container" # mostly just for tests :(
# Extract --publish flags and add them to the sshuttle container, which is
# responsible for defining the network entirely.
docker_args, publish_args = parse_docker_args(docker_run)
# Point a host port to the network container's sshd
container_sshd_port = find_free_port()
publish_args.append(
"--publish=127.0.0.1:{}:38022/tcp".format(container_sshd_port)
)
if not docker_host:
docker_host = "127.0.0.1"
local_ssh = SSH(runner, container_sshd_port, "root@{}".format(docker_host))
if host_ip:
exclude_proxy.append(host_ip)
# Start the network (sshuttle) container:
name = random_name()
config = {
"cidrs": ["0/0"],
"expose_ports": list(expose.local_to_remote()),
"to_pod": to_pod,
"from_pod": from_pod,
"exclude_proxy": exclude_proxy,
"host_ip": host_ip,
}
dns_args = []
if "hostname" in pod_info:
dns_args.append("--hostname={}".format(pod_info["hostname"].strip()))
if "hosts" in pod_info:
dns_args.extend(parse_hosts_aliases(pod_info["hosts"]))
if "resolv" in pod_info:
dns_args.extend(parse_resolv_conf(pod_info["resolv"]))
# Image already has tini init so doesn't need --init option:
span = runner.span()
runner.launch(
"Network container",
runner.docker(
"run", *publish_args, *dns_args, "--rm", "--privileged",
"--name=" + name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
json.dumps(config)
),
killer=make_docker_kill(runner, name),
keep_session=runner.sudo_for_docker,
)
# Set up ssh tunnel to allow the container to reach the cluster
if not local_ssh.wait():
raise RuntimeError("SSH to the network container failed to start.")
container_forward_args = ["-R", "38023:127.0.0.1:{}".format(ssh.port)]
for container_port, host_port in container_to_host.local_to_remote():
if runner.chatty:
runner.show(
"Forwarding container port {} to host port {}.".format(
container_port, host_port
)
)
container_forward_args.extend([
"-R", "{}:127.0.0.1:{}".format(container_port, host_port)
])
runner.launch(
"Local SSH port forward", local_ssh.bg_command(container_forward_args)
)
# Wait for sshuttle to be running:
sshuttle_ok = False
for _ in runner.loop_until(120, 1):
try:
runner.check_call(
runner.docker(
"run", "--network=container:" + name, "--rm",
TELEPRESENCE_LOCAL_IMAGE, "wait"
)
)
except subprocess.CalledProcessError as e:
if e.returncode == 100:
# We're good!
sshuttle_ok = True
break
elif e.returncode == 125:
# Docker failure, probably due to original container not
# starting yet... so try again:
continue
else:
raise
else:
raise RuntimeError(
"Waiting container exited prematurely. File a bug, please!"
)
if not sshuttle_ok:
# This used to loop forever. Now we time out after two minutes.
raise RuntimeError(
"Waiting for network container timed out. File a bug, please!"
)
# Start the container specified by the user:
container_name = random_name()
docker_command = runner.docker(
"run",
"--name=" + container_name,
"--network=container:" + name,
env=True,
)
# Prepare container environment
for key in remote_env:
docker_command.append("-e={}".format(key))
docker_env = os.environ.copy()
docker_env.update(remote_env)
if mount_dir:
if use_docker_mount:
mount_volume = "telepresence-" + runner.session_id
else:
mount_volume = mount_dir
docker_command.append("--volume={}:{}".format(mount_volume, mount_dir))
# Don't add --init if the user is doing something with it
init_args = [
arg for arg in docker_args
if arg == "--init" or arg.startswith("--init=")
]
# Older versions of Docker don't have --init:
docker_run_help = runner.get_output(["docker", "run", "--help"])
if not init_args and "--init" in docker_run_help:
docker_command += ["--init"]
docker_command += docker_args
span.end()
runner.show("Setup complete. Launching your container.")
process = subprocess.Popen(docker_command, env=docker_env)
def terminate_if_alive() -> None:
runner.write("Shutting down containers...\n")
if process.poll() is None:
runner.write("Killing local container...\n")
make_docker_kill(runner, container_name)()
runner.add_cleanup("Terminate local container", terminate_if_alive)
return process
| 32.519713
| 79
| 0.622947
|
f67b2ab9906b015f9c71d439b01cde901e958ba4
| 1,553
|
py
|
Python
|
api/user/modify.py
|
lonkaars/po-4-op-een-rij
|
5cd9eaf116422c82ab68ffcf2dff22e04781b6c5
|
[
"MIT"
] | 4
|
2021-02-04T10:33:45.000Z
|
2021-03-25T11:26:27.000Z
|
api/user/modify.py
|
lonkaars/connect-4
|
5cd9eaf116422c82ab68ffcf2dff22e04781b6c5
|
[
"MIT"
] | 21
|
2021-04-05T17:16:04.000Z
|
2021-06-12T12:47:22.000Z
|
api/user/modify.py
|
lonkaars/connect-4
|
5cd9eaf116422c82ab68ffcf2dff22e04781b6c5
|
[
"MIT"
] | 2
|
2021-04-23T07:06:11.000Z
|
2021-04-27T08:44:26.000Z
|
from flask import Blueprint, request
from db import cursor, connection
from hierarchy import auth_required
from auth.login import login_password
def login_and_password(func):
@auth_required("user")
def wrapper(user_id):
data = request.get_json()
if not data: return "", 400
password = data.get("password")
if not password: return "", 401
if not login_password(user_id, password): return "", 401
return func(user_id)
return wrapper
def modify_user_info(type):
@login_and_password
def index(user_id):
data = request.get_json()
if not data: return "", 400
new_value = data.get(type)
if not new_value: return "", 401
# check if already taken
taken = cursor.execute(
f"select count(user_id) from users where lower({type}) = lower(?)",
[new_value]
).fetchone()
if taken[0] > 0: return "", 403
# update
cursor.execute(
f"update users set {type} = ? where user_id = ?",
[new_value, user_id]
)
connection.commit()
return "", 200
return index
modify_username = Blueprint('modify_username', __name__)
modify_username.add_url_rule(
'/username', 'route', modify_user_info("username"), methods=["POST"]
)
modify_email = Blueprint('modify_email', __name__)
modify_email.add_url_rule(
'/email', 'route', modify_user_info("email"), methods=["POST"]
)
dynamic_routes = [["/user", modify_username], ["/user", modify_email]]
| 25.459016
| 79
| 0.626529
|
2eb1065aa7fcab74507a533404ad59acb87be0aa
| 30,802
|
py
|
Python
|
Packs/WindowsDefenderAdvancedThreatProtection/Integrations/WindowsDefenderAdvancedThreatProtection/WindowsDefenderAdvancedThreatProtection.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-04-19T11:05:42.000Z
|
2020-04-19T11:05:42.000Z
|
Packs/WindowsDefenderAdvancedThreatProtection/Integrations/WindowsDefenderAdvancedThreatProtection/WindowsDefenderAdvancedThreatProtection.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 9
|
2021-02-08T20:51:18.000Z
|
2021-09-23T23:27:38.000Z
|
Packs/WindowsDefenderAdvancedThreatProtection/Integrations/WindowsDefenderAdvancedThreatProtection/WindowsDefenderAdvancedThreatProtection.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 2
|
2020-07-15T06:41:52.000Z
|
2020-07-19T18:45:23.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import base64
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
requests.packages.urllib3.disable_warnings()
if not demisto.params()['proxy']:
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
SERVER = demisto.params()['url'][:-1] if demisto.params()['url'].endswith('/') else demisto.params()['url']
BASE_URL = SERVER + '/api'
TENANT_ID = demisto.params()['tenant_id']
AUTH_AND_TOKEN_URL = demisto.params()['auth_id'].split('@')
AUTH_ID = AUTH_AND_TOKEN_URL[0]
ENC_KEY = demisto.params()['enc_key']
USE_SSL = not demisto.params().get('insecure', False)
FETCH_SEVERITY = demisto.params()['fetch_severity'].split(',')
FETCH_STATUS = demisto.params().get('fetch_status').split(',')
if len(AUTH_AND_TOKEN_URL) != 2:
TOKEN_RETRIEVAL_URL = 'https://oproxy.demisto.ninja/obtain-token' # disable-secrets-detection
else:
TOKEN_RETRIEVAL_URL = AUTH_AND_TOKEN_URL[1]
APP_NAME = 'ms-defender-atp'
''' HELPER FUNCTIONS '''
def epoch_seconds(d=None):
"""
Return the number of seconds for given date. If no date, return current.
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def get_encrypted(content: str, key: str) -> str:
"""
Args:
content (str): content to encrypt. For a request to Demistobot for a new access token, content should be
the tenant id
key (str): encryption key from Demistobot
Returns:
encrypted timestamp:content
"""
def create_nonce() -> bytes:
return os.urandom(12)
def encrypt(string: str, enc_key: str) -> bytes:
"""
Args:
enc_key (str):
string (str):
Returns:
bytes:
"""
# String to bytes
enc_key = base64.b64decode(enc_key)
# Create key
aes_gcm = AESGCM(enc_key)
# Create nonce
nonce = create_nonce()
# Create ciphered data
data = string.encode()
ct = aes_gcm.encrypt(nonce, data, None)
return base64.b64encode(nonce + ct)
now = epoch_seconds()
encrypted = encrypt(f'{now}:{content}', key).decode('utf-8')
return encrypted
def get_access_token():
integration_context = demisto.getIntegrationContext()
access_token = integration_context.get('access_token')
valid_until = integration_context.get('valid_until')
calling_context = demisto.callingContext.get('context', {}) # type: ignore[attr-defined]
brand_name = calling_context.get('IntegrationBrand', '')
instance_name = calling_context.get('IntegrationInstance', '')
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
headers = {'Accept': 'application/json'}
headers['X-Content-Version'] = CONTENT_RELEASE_VERSION
headers['X-Branch-Name'] = CONTENT_BRANCH_NAME
headers['X-Content-Name'] = brand_name or instance_name or 'Name not found'
dbot_response = requests.post(
TOKEN_RETRIEVAL_URL,
headers=headers,
data=json.dumps({
'app_name': APP_NAME,
'registration_id': AUTH_ID,
'encrypted_token': get_encrypted(TENANT_ID, ENC_KEY)
}),
verify=USE_SSL
)
if dbot_response.status_code not in {200, 201}:
msg = 'Error in authentication. Try checking the credentials you entered.'
try:
demisto.info('Authentication failure from server: {} {} {}'.format(
dbot_response.status_code, dbot_response.reason, dbot_response.text))
err_response = dbot_response.json()
server_msg = err_response.get('message')
if not server_msg:
title = err_response.get('title')
detail = err_response.get('detail')
if title:
server_msg = f'{title}. {detail}'
if server_msg:
msg += ' Server message: {}'.format(server_msg)
except Exception as ex:
demisto.error('Failed parsing error response - Exception: {}'.format(ex))
raise Exception(msg)
try:
gcloud_function_exec_id = dbot_response.headers.get('Function-Execution-Id')
demisto.info(f'Google Cloud Function Execution ID: {gcloud_function_exec_id}')
parsed_response = dbot_response.json()
except ValueError:
raise Exception(
'There was a problem in retrieving an updated access token.\n'
'The response from the Demistobot server did not contain the expected content.'
)
access_token = parsed_response.get('access_token')
expires_in = parsed_response.get('expires_in', 3595)
time_now = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
demisto.setIntegrationContext({
'access_token': access_token,
'valid_until': time_now + expires_in
})
return access_token
def http_request(method, url_suffix, json=None, params=None):
token = get_access_token()
r = requests.request(
method,
BASE_URL + url_suffix,
json=json,
headers={
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json'
},
verify=USE_SSL
)
if r.status_code not in {200, 201}:
try:
error = r.json().get('error')
msg = error['message'] if 'message' in error else r.reason
return_error('Error in API call to ATP [%d] - %s' % (r.status_code, msg))
except ValueError:
msg = r.text if r.text else r.reason
return_error('Error in API call to ATP [%d] - %s' % (r.status_code, msg))
if not r.text:
return {}
try:
return r.json()
except ValueError:
return {}
def alert_to_incident(alert):
incident = {}
incident['rawJSON'] = json.dumps(alert)
incident['name'] = 'Windows Defender ATP Alert ' + alert['id']
return incident
def capitalize_first_letter(string):
return string[:1].upper() + string[1:]
''' FUNCTIONS '''
def isolate_machine_command():
machine_id = demisto.args().get('machine_id')
comment = demisto.args().get('comment')
isolation_type = demisto.args().get('isolation_type')
response = isolate_machine(machine_id, comment, isolation_type)
ec = {
'MicrosoftATP.Machine(val.ID && val.ID === obj.ID)': {
'ID': machine_id,
'Isolation': {
'Isolated': True,
'Requestor': response.get('requestor'),
'RequestorComment': response.get('requestorComment')
}
}
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'The isolation request has been submitted successfully',
'EntryContext': ec
}
demisto.results(entry)
def isolate_machine(machine_id, comment, isolation_type):
cmd_url = '/machines/{}/isolate'.format(machine_id)
json = {
'Comment': comment
}
if isolation_type:
json['IsolationType'] = isolation_type
response = http_request('POST', cmd_url, json=json)
return response
def unisolate_machine_command():
machine_id = demisto.args().get('machine_id')
comment = demisto.args().get('comment')
response = unisolate_machine(machine_id, comment)
ec = {
'MicrosoftATP.Machine(val.ID && val.ID === obj.ID)': {
'ID': machine_id,
'Isolation': {
'Isolated': False,
'Requestor': response.get('requestor'),
'RequestorComment': response.get('requestorComment')
}
}
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'The request to stop the isolation has been submitted successfully',
'EntryContext': ec
}
demisto.results(entry)
def unisolate_machine(machine_id, comment):
cmd_url = '/machines/{}/unisolate'.format(machine_id)
json = {
'Comment': comment
}
response = http_request('POST', cmd_url, json=json)
return response
def get_machines_command():
machines = get_machines().get('value', [])
hostname = demisto.args().get('hostname')
ip = demisto.args().get('ip')
risk_score = demisto.args().get('risk_score')
health_status = demisto.args().get('health_status')
output = []
endpoint_context = []
for machine in machines:
computer_dns_name = machine.get('computerDnsName')
last_external_ip = machine.get('lastExternalIpAddress')
machine_risk_score = machine.get('riskScore')
machine_health_status = machine.get('healthStatus')
if (hostname and hostname != computer_dns_name) or (ip and ip != last_external_ip) or \
(risk_score and risk_score != machine_risk_score) or \
(health_status and health_status != machine_health_status):
continue
current_machine_output = {
'ComputerDNSName': computer_dns_name,
'ID': machine.get('id'),
'AgentVersion': machine.get('agentVersion'),
'FirstSeen': machine.get('firstSeen'),
'LastSeen': machine.get('lastSeen'),
'HealthStatus': machine_health_status,
'IsAADJoined': machine.get('isAadJoined'),
'LastExternalIPAddress': last_external_ip,
'LastIPAddress': machine.get('lastIpAddress'),
'Tags': machine.get('machineTags'),
'OSBuild': machine.get('osBuild'),
'OSPlatform': machine.get('osPlatform'),
'RBACGroupID': machine.get('rbacGroupId'),
'RiskScore': machine_risk_score
}
current_endpoint_output = {
'Hostname': machine.get('computerDnsName'),
'IPAddress': machine.get('lastExternalIpAddress'),
'OS': machine.get('osPlatform')
}
rbac_group_name = machine.get('rbacGroupName')
if rbac_group_name:
current_machine_output['RBACGroupName'] = rbac_group_name
aad_device_id = machine.get('aadDeviceId')
if aad_device_id:
current_machine_output['AADDeviceID'] = aad_device_id
os_version = machine.get('osVersion')
if os_version:
current_machine_output['OSVersion'] = os_version
current_endpoint_output['OSVersion'] = os_version
output.append(current_machine_output)
endpoint_context.append(current_endpoint_output)
if output:
ec = {
'MicrosoftATP.Machine(val.ID && val.ID === obj.ID)': output,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context
}
entry = {
'Type': entryTypes['note'],
'Contents': machines,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Windows Defender ATP machines', output, removeNull=True),
'EntryContext': ec
}
else:
entry = 'No results found' # type: ignore
demisto.results(entry)
def get_machines():
cmd_url = '/machines'
response = http_request('GET', cmd_url)
return response
def get_file_related_machines_command():
file = demisto.args()['file']
machines = get_file_related_machines(file).get('value', [])
if machines:
output = []
endpoint_context = []
for machine in machines:
current_machine_output = {
'ComputerDNSName': machine.get('computerDnsName'),
'ID': machine.get('id'),
'AgentVersion': machine.get('agentVersion'),
'FirstSeen': machine.get('firstSeen'),
'LastSeen': machine.get('lastSeen'),
'HealthStatus': machine.get('healthStatus'),
'IsAADJoined': machine.get('isAadJoined'),
'LastExternalIPAddress': machine.get('lastExternalIpAddress'),
'LastIPAddress': machine.get('lastIpAddress'),
'Tags': machine.get('machineTags'),
'OSBuild': machine.get('osBuild'),
'OSPlatform': machine.get('osPlatform'),
'RBACGroupID': machine.get('rbacGroupId'),
'RiskScore': machine.get('riskScore'),
'RelatedFile': file
}
current_endpoint_output = {
'Hostname': machine.get('computerDnsName'),
'IPAddress': machine.get('lastExternalIpAddress'),
'OS': machine.get('osPlatform')
}
rbac_group_name = machine.get('rbacGroupName')
if rbac_group_name:
current_machine_output['RBACGroupName'] = rbac_group_name
aad_device_id = machine.get('aadDeviceId')
if aad_device_id:
current_machine_output['AADDeviceID'] = aad_device_id
os_version = machine.get('osVersion')
if os_version:
current_machine_output['OSVersion'] = os_version
current_endpoint_output['OSVersion'] = os_version
output.append(current_machine_output)
endpoint_context.append(current_endpoint_output)
ec = {
'MicrosoftATP.Machine(val.ID && val.ID === obj.ID)': output,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context
}
title = 'Windows Defender ATP machines related to file {}'.format(file)
entry = {
'Type': entryTypes['note'],
'Contents': machines,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, output, removeNull=True),
'EntryContext': ec
}
else:
entry = 'No results found' # type: ignore
demisto.results(entry)
def get_file_related_machines(file):
cmd_url = '/files/{}/machines'.format(file)
response = http_request('GET', cmd_url)
return response
def get_machine_details_command():
machine_id = demisto.args()['machine_id']
machine = get_machine_details(machine_id)
if machine:
output = []
endpoint_context = []
current_machine_output = {
'ComputerDNSName': machine.get('computerDnsName'),
'ID': machine.get('id'),
'AgentVersion': machine.get('agentVersion'),
'FirstSeen': machine.get('firstSeen'),
'LastSeen': machine.get('lastSeen'),
'HealthStatus': machine.get('healthStatus'),
'IsAADJoined': machine.get('isAadJoined'),
'LastExternalIPAddress': machine.get('lastExternalIpAddress'),
'LastIPAddress': machine.get('lastIpAddress'),
'Tags': machine.get('machineTags'),
'OSBuild': machine.get('osBuild'),
'OSPlatform': machine.get('osPlatform'),
'RBACGroupID': machine.get('rbacGroupId'),
'RiskScore': machine.get('riskScore')
}
current_endpoint_output = {
'Hostname': machine.get('computerDnsName'),
'IPAddress': machine.get('lastExternalIpAddress'),
'OS': machine.get('osPlatform')
}
rbac_group_name = machine.get('rbacGroupName')
if rbac_group_name:
current_machine_output['RBACGroupName'] = rbac_group_name
aad_device_id = machine.get('aadDeviceId')
if aad_device_id:
current_machine_output['AADDeviceID'] = aad_device_id
os_version = machine.get('osVersion')
if os_version:
current_machine_output['OSVersion'] = os_version
current_endpoint_output['OSVersion'] = os_version
output.append(current_machine_output)
endpoint_context.append(current_endpoint_output)
ec = {
'MicrosoftATP.Machine(val.ID && val.ID === obj.ID)': output,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context
}
title = 'Windows Defender ATP machine {} details'.format(machine_id)
entry = {
'Type': entryTypes['note'],
'Contents': machine,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, output, removeNull=True),
'EntryContext': ec
}
else:
entry = 'No results found' # type: ignore
demisto.results(entry)
def get_machine_details(machine_id):
cmd_url = '/machines/{}'.format(machine_id)
response = http_request('GET', cmd_url)
return response
def block_file_command():
file_sha1 = demisto.args().get('sha1')
comment = demisto.args().get('comment')
title = demisto.args().get('title')
expiration_time = demisto.args().get('expiration_time')
severity = demisto.args().get('severity')
recommended_actions = demisto.args().get('recommended_actions')
block_file(file_sha1, comment, title, expiration_time, severity, recommended_actions)
def block_file(file_sha1, comment, title, expiration_time, severity, recommended_actions):
cmd_url = '/tiindicators'
json = {
'indicator': file_sha1,
'indicatorType': 'FileSha1',
'action': 'AlertAndBlock',
'title': title,
'expirationTime': expiration_time,
'severity': severity,
'description': comment,
'recommendedActions': recommended_actions
}
response = http_request('POST', cmd_url, json=json)
return response
def get_user_related_machines(user_id):
cmd_url = '/users/{}/machines'.format(user_id)
response = http_request('GET', cmd_url)
return response
def stop_and_quarantine_file_command():
machine_id = demisto.args().get('machine_id')
file_sha1 = demisto.args().get('file')
comment = demisto.args().get('comment')
stop_and_quarantine_file(machine_id, file_sha1, comment)
def stop_and_quarantine_file(machine_id, file_sha1, comment):
cmd_url = '/machines/{}/stopAndQuarantineFile'.format(machine_id)
json = {
'Comment': comment,
'Sha1': file_sha1
}
response = http_request('POST', cmd_url, json=json)
return response
def run_antivirus_scan_command():
machine_id = demisto.args().get('machine_id')
scan_type = demisto.args().get('scan_type')
comment = demisto.args().get('comment')
run_antivirus_scan(machine_id, comment, scan_type)
demisto.results('Antivirus scan successfully triggered')
def run_antivirus_scan(machine_id, comment, scan_type):
cmd_url = '/machines/{}/runAntiVirusScan'.format(machine_id)
json = {
'Comment': comment,
'ScanType': scan_type
}
response = http_request('POST', cmd_url, json=json)
return response
def list_alerts_command():
alerts = list_alerts().get('value', [])
severity = demisto.args().get('severity')
status = demisto.args().get('status')
output = []
for alert in alerts:
alert_severity = alert.get('severity')
alert_status = alert.get('status')
if (severity and severity != alert_severity) or (status and status != alert_status):
continue
current_alert_output = {}
for key, value in alert.items():
if value or value is False:
current_alert_output[capitalize_first_letter(key).replace('Id', 'ID')] = value
output.append(current_alert_output)
if output:
ec = {
'MicrosoftATP.Alert(val.ID && val.ID === obj.ID)': output
}
title = 'Windows Defender ATP alerts'
entry = {
'Type': entryTypes['note'],
'Contents': alerts,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, output, removeNull=True),
'EntryContext': ec
}
else:
entry = 'No results found' # type: ignore
demisto.results(entry)
def list_alerts():
cmd_url = '/alerts'
response = http_request('GET', cmd_url)
return response
def update_alert_command():
alert_id = demisto.args()['alert_id']
assigned_to = demisto.args().get('assigned_to')
status = demisto.args().get('status')
classification = demisto.args().get('classification')
determination = demisto.args().get('determination')
if all(v is None for v in [assigned_to, status, classification, determination]):
return_error('No arguments were given to update the alert')
json = {}
context = {
'ID': alert_id
}
if assigned_to:
json['assignedTo'] = assigned_to
context['AssignedTo'] = assigned_to
if status:
json['status'] = status
context['Status'] = status
if classification:
json['classification'] = classification
context['Classification'] = classification
if determination:
json['determination'] = determination
context['Determination'] = determination
update_alert(alert_id, json)
ec = {
'MicrosoftATP.Alert(val.ID && val.ID === obj.ID)': context
}
entry = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Alert {0} was updated successfully'.format(alert_id),
'EntryContext': ec
}
demisto.results(entry)
def update_alert(alert_id, json):
cmd_url = '/alerts/' + alert_id
response = http_request('PATCH', cmd_url, json=json)
return response
def get_alert_related_domains(alert_id):
cmd_url = '/alerts/{}/domains'.format(alert_id)
response = http_request('GET', cmd_url)
return response
def get_alert_related_files(alert_id):
cmd_url = '/alerts/{}/files'.format(alert_id)
response = http_request('GET', cmd_url)['value']
return response
def get_alert_related_ips(alert_id):
cmd_url = '/alerts/{}/ips'.format(alert_id)
response = http_request('GET', cmd_url)
return response
def get_advanced_hunting_command():
query = demisto.args().get('query')
response = get_advanced_hunting(query)
results = response.get('Results')
if isinstance(results, list) and len(results) == 1:
report_id = results[0].get('ReportId')
if report_id:
results[0]['ReportId'] = str(report_id)
ec = {
'MicrosoftATP.Hunt.Result': results
}
hr = tableToMarkdown('Hunt results', results, removeNull=True)
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': ec
}
demisto.results(entry)
def get_advanced_hunting(query):
cmd_url = '/advancedqueries/run'
json = {
'Query': query
}
response = http_request('POST', cmd_url, json=json)
return response
def create_alert_command():
args = demisto.args()
response = create_alert(
args.get('machine_id'),
args.get('severity'),
args.get('title'),
args.get('description'),
args.get('event_time'),
args.get('report_id'),
args.get('recommended_action'),
args.get('category')
)
output = {
'MachineID': response.get('machineId'),
'RecommendedAction': response.get('recommendedAction'),
'Title': response.get('title'),
'Description': response.get('description'),
'Severity': response.get('severity'),
'Category': response.get('Category'),
'ReportID': response.get('reportId'),
'ID': response.get('id'),
'Status': response.get('status')
}
output = {k: v for k, v in output.items() if v is not None}
ec = {
'MicrosoftATP.Alert': output
}
hr = tableToMarkdown('Alert created:', output)
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': ec
}
demisto.results(entry)
def create_alert(machine_id, severity, title, description, event_time, report_id, rec_action, category):
cmd_url = '/alerts/CreateAlertByReference'
json = {
'machineId': machine_id,
'severity': severity,
'title': title,
'description': description,
'eventTime': event_time,
'reportId': report_id
}
if rec_action:
json['recommendedAction'] = rec_action
if category:
json['category'] = category
response = http_request('POST', cmd_url, json=json)
return response
def get_alert_related_user_command():
alert_id = demisto.args().get('id')
response = get_alert_related_user(alert_id)
output = {
'ID': response.get('id'),
'AlertID': alert_id,
'FirstSeen': response.get('firstSeen'),
'LastSeen': response.get('lastSeen'),
'MostPrevalentMachineID': response.get('mostPrevalentMachineId'),
'LogonTypes': response.get('logonTypes'),
'LogonCount': response.get('logOnMachinesCount'),
'DomainAdmin': response.get('isDomainAdmin'),
'NetworkUser': response.get('isOnlyNetworkUser')
}
ec = {
'MicrosoftATP.User(val.AlertID === obj.AlertID && val.ID === obj.ID)': output
}
hr = tableToMarkdown('Alert Related User:', output, removeNull=True)
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': ec
}
demisto.results(entry)
def get_alert_related_user(alert_id):
cmd_url = '/alerts/{}/user'.format(alert_id)
response = http_request('GET', cmd_url)
return response
def fetch_incidents():
last_run = demisto.getLastRun()
if last_run and last_run['last_alert_fetched_time']:
last_alert_fetched_time = datetime.strptime(last_run['last_alert_fetched_time'], '%Y-%m-%dT%H:%M:%S.%f')
else:
last_alert_fetched_time = datetime.now() - timedelta(days=300)
previous_ids = last_run.get('last_ids', [])
latest_creation_time = last_alert_fetched_time
alerts = list_alerts()['value']
incidents = []
last_ids = []
for alert in alerts:
# Removing 'Z' from timestamp and converting to datetime
alert_creation_time = datetime.strptime(alert['alertCreationTime'][:-2], '%Y-%m-%dT%H:%M:%S.%f')
alert_status = alert['status']
alert_severity = alert['severity']
if alert_creation_time >= last_alert_fetched_time and alert_status in FETCH_STATUS and \
alert_severity in FETCH_SEVERITY and alert['id'] not in previous_ids:
incident = alert_to_incident(alert)
incidents.append(incident)
if alert_creation_time == latest_creation_time:
last_ids.append(alert["id"])
if alert_creation_time > latest_creation_time:
latest_creation_time = alert_creation_time
last_ids = [alert['id']]
if not last_ids:
last_ids = previous_ids
demisto.setLastRun({
'last_alert_fetched_time': datetime.strftime(latest_creation_time, '%Y-%m-%dT%H:%M:%S.%f'),
"last_ids": last_ids
})
demisto.incidents(incidents)
def test_function():
token = get_access_token()
response = requests.get(
BASE_URL + '/alerts',
headers={
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json',
'Accept': 'application/json'
},
params={'$top': '1'},
verify=USE_SSL
)
try:
_ = response.json() if response.text else {}
if not response.ok:
return_error(f'API call to Windows Advanced Threat Protection. '
f'Please check authentication related parameters. '
f'[{response.status_code}] - {response.reason}')
demisto.results('ok')
except TypeError as ex:
demisto.debug(str(ex))
return_error(f'API call to Windows Advanced Threat Protection failed, could not parse result. '
f'Please check authentication related parameters. [{response.status_code}]')
''' EXECUTION CODE '''
LOG('command is %s' % (demisto.command(), ))
try:
if demisto.command() == 'test-module':
test_function()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'microsoft-atp-isolate-machine':
isolate_machine_command()
elif demisto.command() == 'microsoft-atp-unisolate-machine':
unisolate_machine_command()
elif demisto.command() == 'microsoft-atp-get-machines':
get_machines_command()
elif demisto.command() == 'microsoft-atp-get-file-related-machines':
get_file_related_machines_command()
elif demisto.command() == 'microsoft-atp-get-machine-details':
get_machine_details_command()
elif demisto.command() == 'microsoft-atp-block-file':
block_file_command()
elif demisto.command() == 'microsoft-atp-stop-and-quarantine-file':
stop_and_quarantine_file_command()
elif demisto.command() == 'microsoft-atp-run-antivirus-scan':
run_antivirus_scan_command()
elif demisto.command() == 'microsoft-atp-list-alerts':
list_alerts_command()
elif demisto.command() == 'microsoft-atp-update-alert':
update_alert_command()
elif demisto.command() == 'microsoft-atp-advanced-hunting':
get_advanced_hunting_command()
elif demisto.command() == 'microsoft-atp-create-alert':
create_alert_command()
elif demisto.command() == 'microsoft-atp-get-alert-related-user':
get_alert_related_user_command()
except Exception as e:
return_error(str(e))
| 33.19181
| 112
| 0.620999
|
d9ba62bc91cdbb31ea515ed4a79509812372c1f8
| 1,215
|
py
|
Python
|
page_objects/contact_page.py
|
Himanshu-tester/Online_Shopping_Portal
|
1299be0abea923fdb366603f9fb089902689d8d3
|
[
"MIT"
] | null | null | null |
page_objects/contact_page.py
|
Himanshu-tester/Online_Shopping_Portal
|
1299be0abea923fdb366603f9fb089902689d8d3
|
[
"MIT"
] | 1
|
2020-08-03T02:02:15.000Z
|
2020-08-03T02:02:15.000Z
|
page_objects/contact_page.py
|
Himanshu-tester/Online_Shopping_Portal
|
1299be0abea923fdb366603f9fb089902689d8d3
|
[
"MIT"
] | 1
|
2020-08-30T05:19:34.000Z
|
2020-08-30T05:19:34.000Z
|
import config
from page_objects.common import Common
from utilities.data_factory import DataRead
from utilities.locator_strategy import LocatorStrategy
class Contact(Common):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
self.data = DataRead.json_read('data.json')
contact_button = LocatorStrategy.locator_by_id("contact-link")
text_message = LocatorStrategy.locator_by_id("message")
subject_heading = LocatorStrategy.locator_by_id("id_contact")
email_address = LocatorStrategy.locator_by_id("email")
send_button = LocatorStrategy.locator_by_id("email")
upload_file = LocatorStrategy.locator_by_id("fileUpload")
def contact_us_form(self):
self.click(Contact.contact_button)
self.enter_text(Contact.text_message, text="This is a test.")
select = self.select_option_from_drop_down(Contact.subject_heading)
select.select_by_index(1)
self.clear_text(Contact.email_address)
self.enter_text(Contact.email_address,text=self.data['contact_email'])
self.enter_text(Contact.upload_file,text=config.file_path + self.data['upload_file'])
self.click(Contact.send_button)
| 35.735294
| 93
| 0.745679
|
32ba126acdc50c5b9fea95e6a5d13ad599e6f4e5
| 12,868
|
py
|
Python
|
OttBands5minFixedOtt/jessepickerdata/dnafiles/ATOM-USDT 2021-09-11 2021-10-11.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 38
|
2021-09-18T15:33:28.000Z
|
2022-02-21T17:29:08.000Z
|
OttBands5minFixedOtt/jessepickerdata/dnafiles/ATOM-USDT 2021-09-11 2021-10-11.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 4
|
2022-01-02T14:46:12.000Z
|
2022-02-16T18:39:41.000Z
|
OttBands5minFixedOtt/jessepickerdata/dnafiles/ATOM-USDT 2021-09-11 2021-10-11.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 11
|
2021-10-19T06:21:43.000Z
|
2022-02-21T17:29:10.000Z
|
dnas = [
['_W6,U', 40, 84, 9.31, 50, 14, 6.21, {'ott_len': 32, 'ott_percent': 210, 'ott_bw': 79, 'tps_qty_index': 6, 'max_risk': 43}],
['o4,@X', 42, 98, 8.28, 45, 20, 5.45, {'ott_len': 34, 'ott_percent': 166, 'ott_bw': 66, 'tps_qty_index': 38, 'max_risk': 45}],
['ftUQf', 46, 66, 10.18, 58, 12, 3.51, {'ott_len': 33, 'ott_percent': 246, 'ott_bw': 117, 'tps_qty_index': 65, 'max_risk': 54}],
['QYRcn', 50, 65, 19.63, 58, 12, 3.49, {'ott_len': 30, 'ott_percent': 212, 'ott_bw': 114, 'tps_qty_index': 93, 'max_risk': 59}],
['=bVNC', 46, 62, 22.8, 50, 12, -0.59, {'ott_len': 28, 'ott_percent': 224, 'ott_bw': 119, 'tps_qty_index': 60, 'max_risk': 32}],
['YOR9c', 51, 60, 21.87, 58, 12, 2.39, {'ott_len': 31, 'ott_percent': 200, 'ott_bw': 114, 'tps_qty_index': 27, 'max_risk': 52}],
['`RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['cRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['\\RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
[']RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['aRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['^RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['_RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['ui*5<', 44, 84, 12.12, 42, 14, 6.81, {'ott_len': 35, 'ott_percent': 232, 'ott_bw': 64, 'tps_qty_index': 21, 'max_risk': 28}],
['eRNWZ', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 47}],
['@cW*?', 47, 61, 22.93, 50, 12, 0.29, {'ott_len': 28, 'ott_percent': 225, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['O=ITi', 49, 69, 21.32, 61, 13, 4.06, {'ott_len': 30, 'ott_percent': 177, 'ott_bw': 102, 'tps_qty_index': 70, 'max_risk': 56}],
['eRNWk', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 57}],
['eRQWv', 52, 63, 17.59, 63, 11, 4.81, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 112, 'tps_qty_index': 74, 'max_risk': 64}],
['g^VGt', 57, 61, 16.78, 63, 11, 5.52, {'ott_len': 33, 'ott_percent': 219, 'ott_bw': 119, 'tps_qty_index': 49, 'max_risk': 63}],
['ePRWv', 53, 60, 20.61, 63, 11, 4.2, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 114, 'tps_qty_index': 74, 'max_risk': 64}],
['ePPW\\', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 48}],
['@_W*?', 44, 58, 22.34, 55, 9, 4.25, {'ott_len': 28, 'ott_percent': 220, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3fWHn', 56, 64, 27.28, 58, 12, 4.26, {'ott_len': 26, 'ott_percent': 229, 'ott_bw': 120, 'tps_qty_index': 51, 'max_risk': 59}],
['WrVZ;', 49, 65, 9.97, 50, 10, -1.45, {'ott_len': 31, 'ott_percent': 244, 'ott_bw': 119, 'tps_qty_index': 79, 'max_risk': 27}],
['@^W*?', 45, 57, 21.06, 55, 9, 4.26, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['CMNWv', 52, 71, 22.36, 58, 12, 4.3, {'ott_len': 28, 'ott_percent': 197, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['eVPWc', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['ePPWb', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['t]bik', 57, 35, 9.33, 62, 8, 4.47, {'ott_len': 35, 'ott_percent': 217, 'ott_bw': 134, 'tps_qty_index': 103, 'max_risk': 57}],
['eRNDv', 53, 67, 17.86, 53, 13, 3.08, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 44, 'max_risk': 64}],
['wVW*?', 48, 52, 15.52, 40, 10, -0.23, {'ott_len': 35, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3OWXC', 54, 57, 20.13, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 32}],
['3OWXE', 55, 58, 20.61, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 33}],
['eSNWd', 50, 67, 18.68, 53, 13, 2.22, {'ott_len': 33, 'ott_percent': 205, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 53}],
['fVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['gVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['-VUWv', 53, 66, 19.51, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 117, 'tps_qty_index': 74, 'max_risk': 64}],
['BVV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['<VV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['XRV.a', 50, 54, 25.07, 58, 12, 1.52, {'ott_len': 31, 'ott_percent': 204, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePPHa', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 51}],
['FVW*?', 50, 53, 22.75, 36, 11, -1.52, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3eWXn', 53, 64, 29.51, 58, 12, 4.39, {'ott_len': 26, 'ott_percent': 227, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 59}],
['?^WWv', 53, 60, 21.94, 63, 11, 6.61, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['ePPH]', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 49}],
['hPmHf', 73, 19, 19.46, 75, 4, 4.96, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 147, 'tps_qty_index': 51, 'max_risk': 54}],
['ePjWv', 68, 22, 19.21, 66, 6, 5.68, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 144, 'tps_qty_index': 74, 'max_risk': 64}],
['?dpMr', 61, 26, 28.05, 50, 8, 2.43, {'ott_len': 28, 'ott_percent': 226, 'ott_bw': 151, 'tps_qty_index': 59, 'max_risk': 62}],
['IVWWv', 51, 58, 22.46, 58, 12, 1.85, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['eEsWv', 66, 9, 10.3, 75, 4, 5.13, {'ott_len': 33, 'ott_percent': 187, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['[VsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['WVsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['eVkWv', 72, 22, 20.19, 66, 6, 5.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 145, 'tps_qty_index': 74, 'max_risk': 64}],
[':YY:_', 54, 59, 21.43, 58, 12, 3.52, {'ott_len': 27, 'ott_percent': 212, 'ott_bw': 122, 'tps_qty_index': 28, 'max_risk': 50}],
['-VWW^', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 49}],
['-dW6n', 51, 64, 27.68, 58, 12, 5.23, {'ott_len': 26, 'ott_percent': 226, 'ott_bw': 120, 'tps_qty_index': 22, 'max_risk': 59}],
['q9da]', 71, 14, 11.37, 75, 4, 3.13, {'ott_len': 34, 'ott_percent': 172, 'ott_bw': 136, 'tps_qty_index': 90, 'max_risk': 49}],
['7Fpob', 66, 12, 12.15, 75, 4, 3.62, {'ott_len': 27, 'ott_percent': 189, 'ott_bw': 151, 'tps_qty_index': 112, 'max_risk': 52}],
['HPqWv', 64, 17, 16.65, 60, 5, 2.69, {'ott_len': 29, 'ott_percent': 201, 'ott_bw': 152, 'tps_qty_index': 74, 'max_risk': 64}],
['?VW.v', 49, 59, 25.96, 58, 12, 2.45, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['LewDb', 76, 17, 19.15, 80, 5, 8.45, {'ott_len': 30, 'ott_percent': 227, 'ott_bw': 160, 'tps_qty_index': 44, 'max_risk': 52}],
['[\\sta', 66, 18, 12.71, 80, 5, 5.61, {'ott_len': 31, 'ott_percent': 216, 'ott_bw': 155, 'tps_qty_index': 120, 'max_risk': 51}],
['eVswv', 63, 19, 11.55, 100, 4, 5.34, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 125, 'max_risk': 64}],
['@VW*A', 49, 55, 25.8, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 31}],
['-VW=v', 55, 61, 21.99, 58, 12, 3.27, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 33, 'max_risk': 64}],
[':VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['7VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW;v', 56, 60, 21.81, 58, 12, 3.24, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 30, 'max_risk': 64}],
['MVsWv', 66, 18, 17.72, 60, 5, 4.17, {'ott_len': 30, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['=ptVt', 73, 26, 30.29, 50, 8, 1.89, {'ott_len': 28, 'ott_percent': 241, 'ott_bw': 156, 'tps_qty_index': 73, 'max_risk': 63}],
['-VW?v', 55, 61, 23.02, 58, 12, 3.04, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 64}],
['-VW.v', 49, 61, 23.86, 58, 12, 4.35, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['?VW2v', 52, 59, 27.13, 58, 12, 2.6, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 16, 'max_risk': 64}],
['-VW<v', 58, 60, 23.78, 58, 12, 3.9, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 32, 'max_risk': 64}],
['-SUNv', 51, 64, 24.47, 58, 12, 3.76, {'ott_len': 26, 'ott_percent': 205, 'ott_bw': 117, 'tps_qty_index': 60, 'max_risk': 64}],
['-VWLv', 50, 60, 24.44, 58, 12, 2.84, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 57, 'max_risk': 64}],
['@VW)?', 48, 54, 23.4, 45, 11, -1.08, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 2, 'max_risk': 30}],
['@XW*?', 50, 54, 25.83, 50, 10, 1.55, {'ott_len': 28, 'ott_percent': 211, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['-VWMv', 50, 61, 23.08, 58, 12, 3.48, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 59, 'max_risk': 64}],
['-VWWu', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
[',VWWv', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['vNqn]', 81, 11, 12.65, 100, 4, 9.27, {'ott_len': 35, 'ott_percent': 199, 'ott_bw': 152, 'tps_qty_index': 111, 'max_risk': 49}],
['@VW*F', 49, 55, 26.22, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 34}],
['?VuWv', 62, 16, 15.34, 60, 5, 2.75, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 157, 'tps_qty_index': 74, 'max_risk': 64}],
['@VW.?', 49, 55, 20.38, 45, 11, -0.98, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 30}],
['@VWF?', 54, 55, 19.17, 45, 11, -1.64, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 47, 'max_risk': 30}],
['-VWWl', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 58}],
['-VWWs', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 62}],
['@VX*?', 50, 53, 24.04, 50, 10, 1.23, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 121, 'tps_qty_index': 3, 'max_risk': 30}],
['-VW*9', 57, 49, 23.19, 27, 11, -0.52, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 26}],
['hPPHs', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 62}],
['ePPHt', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 63}],
['@VW4?', 49, 55, 17.59, 45, 11, -1.73, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 19, 'max_risk': 30}],
['3OW?n', 54, 59, 24.5, 66, 12, 3.73, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 59}],
['-VWWa', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 51}],
['-VW*=', 55, 54, 29.83, 33, 12, -1.75, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 28}],
['E^c[A', 58, 34, 10.18, 50, 10, -1.0, {'ott_len': 29, 'ott_percent': 219, 'ott_bw': 135, 'tps_qty_index': 81, 'max_risk': 31}],
['@TW*?', 51, 56, 14.24, 45, 11, -1.0, {'ott_len': 28, 'ott_percent': 206, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3OW5n', 50, 59, 24.24, 66, 12, 4.05, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 21, 'max_risk': 59}],
]
| 124.932039
| 129
| 0.586027
|
ffa134341a503e28229eebcf0841bf696436eb7d
| 1,706
|
py
|
Python
|
boa3/model/builtin/interop/oracle/oracleresponsecodetype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3/model/builtin/interop/oracle/oracleresponsecodetype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3/model/builtin/interop/oracle/oracleresponsecodetype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from typing import Any, Dict
from boa3.model.symbol import ISymbol
from boa3.model.type.itype import IType
from boa3.model.type.primitive.inttype import IntType
class OracleResponseCodeType(IntType):
"""
A class used to represent Neo's OracleResponseCode.
"""
def __init__(self):
super().__init__()
self._identifier = 'OracleResponseCode'
@classmethod
def build(cls, value: Any = None) -> IType:
if value is None or cls._is_type_of(value):
return _OracleResponseCode
@classmethod
def _is_type_of(cls, value: Any):
from boa3.builtin.interop.oracle.oracleresponsecode import OracleResponseCode
return isinstance(value, (OracleResponseCode, OracleResponseCodeType))
@property
def symbols(self) -> Dict[str, ISymbol]:
"""
Gets the class symbols of this type.
:return: a dictionary that maps each symbol in the module with its name
"""
from boa3.builtin.interop.role.roletype import Role
from boa3.model.variable import Variable
_symbols = super().symbols
_symbols.update({name: Variable(self) for name in Role.__members__.keys()})
return _symbols
def get_value(self, symbol_id) -> Any:
"""
Gets the literal value of a symbol.
:return: the value if this type has this symbol. None otherwise
"""
from boa3.builtin.interop.oracle.oracleresponsecode import OracleResponseCode
if symbol_id in self.symbols and symbol_id in OracleResponseCode.__members__:
return OracleResponseCode.__members__[symbol_id]
return None
_OracleResponseCode = OracleResponseCodeType()
| 29.929825
| 85
| 0.684642
|
e89cea9c6c7e556929ee7656fdfcae391c0ce2ee
| 880
|
py
|
Python
|
sourcecode/raspberrypi_node/pong.py
|
sansastra/Animal-Human-Cohabitation
|
50662240e3387da5b41c2f52745d710783485dcf
|
[
"MIT"
] | 5
|
2020-12-22T10:17:35.000Z
|
2020-12-22T11:23:39.000Z
|
sourcecode/raspberrypi_node/pong.py
|
sansastra/Animal-Human-Cohabitation
|
50662240e3387da5b41c2f52745d710783485dcf
|
[
"MIT"
] | null | null | null |
sourcecode/raspberrypi_node/pong.py
|
sansastra/Animal-Human-Cohabitation
|
50662240e3387da5b41c2f52745d710783485dcf
|
[
"MIT"
] | null | null | null |
"""import serial
import time
from xbee import XBee
serial_port = serial.Serial('/dev/ttyUSB0', 9600)
def print_data(data):
print (data)
xbee = XBee(serial_port, callback=print_data)
while True:
try:
time.sleep(0.001)
except KeyboardInterrupt:
break
xbee.halt()
serial_port.close()
"""
import serial
from xbee import XBee
import struct
from time import sleep
SERVER_ADDRESS = "\x00\x01"
BAUDRATE = 57600
PORT = '/dev/ttyAMA0'
def pong(msg):
print(msg)
payload= struct.pack(">B",2)
payload+="Hi".encode()
xbee.tx(dest_addr=SERVER_ADDRESS,data=payload)
print("ping received")
serial = serial.Serial(PORT, BAUDRATE)
xbee = XBee(serial,callback=pong)
while True:
try:
sleep(0.001)
except KeyboardInterrupt:
print("Bye!")
break
#THE END!
xbee.halt()
serial.close()
| 17.959184
| 50
| 0.65
|
8deee3da9f4f86b94730574cb01990426de7e707
| 8,283
|
py
|
Python
|
venv/Lib/site-packages/websocket/tests/test_http.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 4
|
2021-07-27T23:39:02.000Z
|
2021-09-23T04:17:08.000Z
|
venv/Lib/site-packages/websocket/tests/test_http.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 12
|
2021-04-11T19:46:06.000Z
|
2021-06-18T16:08:37.000Z
|
venv/Lib/site-packages/websocket/tests/test_http.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 3
|
2021-07-27T17:33:58.000Z
|
2021-07-29T12:46:59.000Z
|
# -*- coding: utf-8 -*-
#
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import os.path
import websocket as ws
from websocket._http import proxy_info, read_headers, _open_proxied_socket, _tunnel, _get_addrinfo_list, connect
import sys
import unittest
import ssl
import websocket
import socks
import socket
sys.path[0:0] = [""]
# Skip test to access the internet.
TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
TEST_WITH_PROXY = os.environ.get('TEST_WITH_PROXY', '0') == '1'
class SockMock(object):
def __init__(self):
self.data = []
self.sent = []
def add_packet(self, data):
self.data.append(data)
def gettimeout(self):
return None
def recv(self, bufsize):
if self.data:
e = self.data.pop(0)
if isinstance(e, Exception):
raise e
if len(e) > bufsize:
self.data.insert(0, e[bufsize:])
return e[:bufsize]
def send(self, data):
self.sent.append(data)
return len(data)
def close(self):
pass
class HeaderSockMock(SockMock):
def __init__(self, fname):
SockMock.__init__(self)
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, "rb") as f:
self.add_packet(f.read())
class OptsList():
def __init__(self):
self.timeout = 1
self.sockopt = []
self.sslopt = {"cert_reqs": ssl.CERT_NONE}
class HttpTest(unittest.TestCase):
def testReadHeader(self):
status, header, status_message = read_headers(HeaderSockMock("data/header01.txt"))
self.assertEqual(status, 101)
self.assertEqual(header["connection"], "Upgrade")
# header02.txt is intentionally malformed
self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
def testTunnel(self):
self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header01.txt"), "example.com", 80, ("username", "password"))
self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header02.txt"), "example.com", 80, ("username", "password"))
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testConnect(self):
# Not currently testing an actual proxy connection, so just check whether TypeError is raised. This requires internet for a DNS lookup
self.assertRaises(TypeError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host=None, http_proxy_port=None, proxy_type=None))
self.assertRaises(socks.ProxyConnectionError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http"))
self.assertRaises(socks.ProxyConnectionError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks4"))
self.assertRaises(socks.ProxyConnectionError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks5h"))
self.assertRaises(TypeError, _get_addrinfo_list, None, 80, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http"))
self.assertRaises(TypeError, _get_addrinfo_list, None, 80, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http"))
self.assertRaises(socks.ProxyConnectionError, connect, "wss://example.com", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=8080, proxy_type="socks4"), None)
self.assertRaises(socket.timeout, connect, "wss://google.com", OptsList(), proxy_info(http_proxy_host="8.8.8.8", http_proxy_port=8080, proxy_type="http"), None)
self.assertEqual(
connect("wss://google.com", OptsList(), proxy_info(http_proxy_host="8.8.8.8", http_proxy_port=8080, proxy_type="http"), True),
(True, ("google.com", 443, "/")))
# The following test fails on Mac OS with a gaierror, not an OverflowError
# self.assertRaises(OverflowError, connect, "wss://example.com", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=99999, proxy_type="socks4", timeout=2), False)
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
@unittest.skipUnless(TEST_WITH_PROXY, "This test requires a HTTP proxy to be running on port 8899")
def testProxyConnect(self):
self.assertEqual(_open_proxied_socket("wss://api.bitfinex.com/ws/2", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http"))[1], ("api.bitfinex.com", 443, '/ws/2'))
self.assertEqual(_get_addrinfo_list("api.bitfinex.com", 443, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http")),
(socket.getaddrinfo("127.0.0.1", 8899, 0, socket.SOCK_STREAM, socket.SOL_TCP), True, None))
self.assertEqual(connect("wss://api.bitfinex.com/ws/2", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=8899, proxy_type="http"), None)[1], ("api.bitfinex.com", 443, '/ws/2'))
# TO DO: Test SOCKS4 and SOCK5 proxies with unit tests
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testSSLopt(self):
ssloptions = {
"cert_reqs": ssl.CERT_NONE,
"check_hostname": False,
"server_hostname": "ServerName",
"ssl_version": ssl.PROTOCOL_TLS,
"ciphers": "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:\
TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:\
ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:\
ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:\
DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:\
ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:\
ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:\
DHE-RSA-AES256-SHA256:ECDHE-ECDSA-AES128-SHA256:\
ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:\
ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA",
"ecdh_curve": "prime256v1"
}
ws_ssl1 = websocket.WebSocket(sslopt=ssloptions)
ws_ssl1.connect("wss://api.bitfinex.com/ws/2")
ws_ssl1.send("Hello")
ws_ssl1.close()
ws_ssl2 = websocket.WebSocket(sslopt={"check_hostname": True})
ws_ssl2.connect("wss://api.bitfinex.com/ws/2")
ws_ssl2.close
def testProxyInfo(self):
self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").type, "http")
self.assertRaises(ValueError, proxy_info, http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="badval")
self.assertEqual(proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http").host, "example.com")
self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").port, "8080")
self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").auth, None)
if __name__ == "__main__":
unittest.main()
| 50.815951
| 212
| 0.68212
|
acfdb0cf2b3947bda82b7f5ee751eb3022dc939d
| 22,544
|
py
|
Python
|
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:30:57.000Z
|
2016-08-27T18:30:57.000Z
|
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | null | null | null |
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:31:00.000Z
|
2016-08-27T18:31:00.000Z
|
from itertools import combinations
from collections import defaultdict
import numpy as np
import networkx as nx
from pgmpy.factors import TabularCPD
from pgmpy.base import DirectedGraph, UndirectedGraph
class DynamicBayesianNetwork(DirectedGraph):
def __init__(self, ebunch=None):
"""
Base class for Dynamic Bayesian Network
This is a time variant model of the static Bayesian model, where each
time-slice has some static nodes and is then replicated over a certain
time period.
The nodes can be any hashable python objects.
Parameters:
----------
ebunch: Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any NetworkX
graph object
Examples:
--------
Create an empty Dynamic Bayesian Network with no nodes and no edges:
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
Adding nodes and edges inside the dynamic bayesian network. A single
node can be added using the method below. For adding edges we need to
specify the time slice since edges can be across different time slices.
For example for a network as [image](http://s8.postimg.org/aaybw4x2t/Blank_Flowchart_New_Page_1.png),
we will need to add all the edges in the 2-TBN as:
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
We can query the edges and nodes in the network as:
>>> dbn.nodes()
['G', 'D', 'I', 'L']
>>> dbn.edges()
[(('D', 1), ('G', 1)), (('I', 0), ('G', 0)), (('I', 0), ('I', 1)),
(('I', 1), ('G', 1)), (('G', 0), ('L', 0)), (('G', 0), ('G', 1)),
(('G', 0), ('L', 1)), (('D', 0), ('G', 0)), (('D', 0), ('D', 1)),
(('L', 0), ('L', 1)), (('G', 1), ('L', 1))]
If any variable is not present in the network while adding an edge,
pgmpy will automatically add that variable to the network.
But for adding nodes to the model we don't need to specify the time
slice as it is common in all the time slices. And therefore pgmpy
automatically replicated it all the time slices. For example, for
adding a new variable `S` in the above network we can simply do:
>>> dbn.add_node('S')
>>> dbn.nodes()
['S', 'G', 'D', 'I', 'L']
Public Methods:
---------------
add_node
add_nodes_from
add_edges
add_edges_from
add_cpds
initialize_initial_state
inter_slice
intra_slice
"""
super(DynamicBayesianNetwork, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_node(self, node, **attr):
"""
Adds a single node to the Network
Parameters
----------
node: node
A node can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_node('A')
['A']
"""
super(DynamicBayesianNetwork, self).add_node((node, 0), **attr)
def add_nodes_from(self, nodes, **attr):
"""
Add multiple nodes to the Network.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
"""
for node in nodes:
self.add_node(node)
def nodes(self):
"""
Returns the list of nodes present in the network
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
>>> dbn.nodes()
['B', 'A', 'C']
"""
return list(set([node for node, timeslice in
super(DynamicBayesianNetwork, self).nodes()]))
def add_edge(self, start, end, **kwargs):
"""
Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> model.edges()
[(('D', 1), ('I', 1)), (('D', 0), ('I', 0))]
"""
try:
if len(start) != 2 or len(end) !=2:
raise ValueError('Nodes must be of type (node, time_slice).')
elif not isinstance(start[1], int) or not isinstance(end[1], int):
raise ValueError('Nodes must be of type (node, time_slice).')
elif start[1] == end[1]:
start = (start[0], 0)
end = (end[0], 0)
elif start[1] == end[1] - 1:
start = (start[0], 0)
end = (end[0], 1)
elif start[1] > end[1]:
raise NotImplementedError('Edges in backward direction are not allowed.')
elif start[1] != end[1]:
raise ValueError("Edges over multiple time slices is not currently supported")
except TypeError:
raise ValueError('Nodes must be of type (node, time_slice).')
if start == end:
raise ValueError('Self Loops are not allowed')
elif start in super(DynamicBayesianNetwork, self).nodes() and end \
in super(DynamicBayesianNetwork, self).nodes() and \
nx.has_path(self, end, start):
raise ValueError(
'Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format(
start=str(start), end=str(end)))
super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs)
if start[1] == end[1]:
super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1]))
def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python object.
Parameters
----------
ebunch : list, array-like
List of edges to add. Each edge must be of the form of
((start, time_slice), (end, time_slice)).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])
>>> dbn.nodes()
['G', 'I', 'D']
>>> dbn.edges()
[(('D', 1), ('G', 1)),
(('I', 1), ('G', 1)),
(('D', 0), ('G', 0)),
(('I', 0), ('G', 0))]
"""
for edge in ebunch:
self.add_edge(edge[0], edge[1])
def get_intra_edges(self, time_slice=0):
"""
Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0]
def get_inter_edges(self):
"""
Returns the inter-slice edges present in the 2-TBN.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_inter_edges()
[(('D', 0), ('D', 1)),
(('G', 0), ('G', 1)),
(('G', 0), ('L', 1)),
(('I', 0), ('I', 1)),
(('L', 0), ('L', 1))]
"""
return [edge for edge in self.edges() if edge[0][1] != edge[1][1]]
def get_interface_nodes(self, time_slice=0):
"""
Returns the nodes in the first timeslice whose children are present in the first timeslice.
Parameter
---------
time_slice:int
The timeslice should be a positive value greater than or equal to zero
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0)),(('D',0),('D',1))])
>>> dbn.get_interface_nodes()
[('D', 0)]
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [(edge[0][0], time_slice) for edge in self.get_inter_edges()]
def get_slice_nodes(self, time_slice=0):
"""
Returns the nodes present in a particular timeslice
Parameter
---------
time_slice:int
The timeslice should be a positive value greater than or equal to zero
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('G', 0),('L', 0)),(('D', 0),('D', 1))])
>>> dbn.get_slice_nodes()
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [(node, time_slice) for node in self.nodes()]
def add_cpds(self, *cpds):
"""
This method adds the cpds to the dynamic bayesian network.
Note that while adding variables and the evidence in cpd,
they have to be of the following form
(node_name, time_slice)
Here, node_name is the node that is inserted
while the time_slice is an integer value, which denotes
the index of the time_slice that the node belongs to.
Parameter
---------
cpds : list, set, tuple (array-like)
List of CPDs which are to be associated with the model. Each CPD
should be an instance of `TabularCPD`.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('D', 0),('D', 1)),(('I', 0),('I', 1))])
>>> grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.9, 0.5],
... [0.4, 0.25, 0.8, 0.03],
... [0.3, 0.7, 0.02, 0.2]],
... evidence=[('I', 0),('D', 0)],
... evidence_card=[2, 2])
>>> d_i_cpd = TabularCPD(('D',1), 2, [[0.6, 0.3],
... [0.4, 0.7]],
... evidence=[('D',0)],
... evidence_card=2)
>>> diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]])
>>> intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]])
>>> i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4],
... [0.5, 0.6]],
... evidence=[('I', 0)],
... evidence_card=2)
>>> dbn.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd)
>>> dbn.get_cpds()
[<TabularCPD representing P(('G', 0):3 | ('I', 0):2, ('D', 0):2) at 0x7ff7f27b0cf8>,
<TabularCPD representing P(('D', 1):2 | ('D', 0):2) at 0x7ff810b9c2e8>,
<TabularCPD representing P(('D', 0):2) at 0x7ff7f27e6f98>,
<TabularCPD representing P(('I', 0):2) at 0x7ff7f27e6ba8>,
<TabularCPD representing P(('I', 1):2 | ('I', 0):2) at 0x7ff7f27e6668>]
"""
for cpd in cpds:
if not isinstance(cpd, TabularCPD):
raise ValueError('cpd should be an instance of TabularCPD')
if set(cpd.variables) - set(cpd.variables).intersection(set(
super(DynamicBayesianNetwork, self).nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
self.cpds.extend(cpds)
def get_cpds(self, node=None, time_slice=0):
"""
Returns the CPDs that have been associated with the network.
Parameter
---------
node: tuple (node_name, time_slice)
The node should be in the following form (node_name, time_slice).
Here, node_name is the node that is inserted while the time_slice is
an integer value, which denotes the index of the time_slice that the
node belongs to.
time_slice: int
The time_slice should be a positive integer greater than or equal to zero.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds()
"""
# TODO: fix bugs in this
if node:
if node not in super(DynamicBayesianNetwork, self).nodes():
raise ValueError('Node not present in the model.')
else:
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return [cpd for cpd in self.cpds if set(list(cpd.variables)).issubset(self.get_slice_nodes(time_slice))]
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds : list, set, tuple (array-like)
List of CPDs which are to be associated with the model. Each CPD
should be an instance of `TabularCPD`.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds()
[<TabularCPD representing P(('G', 0):3 | ('I', 0):2, ('D', 0):2) at 0x3348ab0>]
>>> dbn.remove_cpds(grade_cpd)
>>> dbn.get_cpds()
[]
"""
for cpd in cpds:
if isinstance(cpd, (tuple, list)):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities in each associated CPD for each
state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
boolean: True if everything seems to be order. Otherwise raises error
according to the problem.
"""
for node in super(DynamicBayesianNetwork, self).nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.evidence
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(cpd.evidence_card)),
atol=0.01):
raise ValueError('Sum of probabilities of states for node {node}'
' is not equal to 1'.format(node=node))
return True
def initialize_initial_state(self):
"""
This method will automatically re-adjust the cpds and the edges added to the bayesian network.
If an edge that is added as an intra time slice edge in the 0th timeslice, this method will
automatically add it in the 1st timeslice. It will also add the cpds. However, to call this
method, one needs to add cpds as well as the edges in the bayesian network of the whole
skeleton including the 0th and the 1st timeslice,.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> student = DBN()
>>> student.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> student.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('D', 0),('D', 1)),(('I', 0),('I', 1))])
>>> grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.9, 0.5],
... [0.4, 0.25, 0.8, 0.03],
... [0.3, 0.7, 0.02, 0.2]],
... evidence=[('I', 0),('D', 0)],
... evidence_card=[2, 2])
>>> d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3],
... [0.4, 0.7]],
... evidence=[('D', 0)],
... evidence_card=2)
>>> diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]])
>>> intel_cpd = TabularCPD(('I',0), 2, [[0.7, 0.3]])
>>> i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4],
... [0.5, 0.6]],
... evidence=[('I', 0)],
... evidence_card=2)
>>> student.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd)
>>> student.initialize_initial_state()
"""
for cpd in self.cpds:
temp_var = (cpd.variable[0], 1 - cpd.variable[1])
parents = self.get_parents(temp_var)
if not any(x.variable == temp_var for x in self.cpds):
if all(x[1] == parents[0][1] for x in parents):
if parents:
new_cpd = TabularCPD(temp_var, cpd.variable_card,
cpd.values.reshape(cpd.variable_card, np.prod(cpd.evidence_card)),
parents, cpd.evidence_card)
else:
new_cpd = TabularCPD(temp_var, cpd.variable_card, np.split(cpd.values, cpd.variable_card))
self.add_cpds(new_cpd)
self.check_model()
def moralize(self):
"""
Removes all the immoralities in the Network and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))])
>>> moral_graph = dbn.moralize()
>>> moral_graph.edges()
[(('G', 0), ('I', 0)),
(('G', 0), ('D', 0)),
(('D', 1), ('I', 1)),
(('D', 1), ('G', 1)),
(('I', 0), ('D', 0)),
(('G', 1), ('I', 1))]
"""
moral_graph = self.to_undirected()
for node in super(DynamicBayesianNetwork, self).nodes():
moral_graph.add_edges_from(combinations(
self.get_parents(node), 2))
return moral_graph
| 41.365138
| 117
| 0.490596
|
62b12507ed3710e5cf4c8c7dc898a98d1d5aae9d
| 96,809
|
py
|
Python
|
test/functional/p2p_segwit.py
|
Altcoin-Cash/bitcoinV
|
bbb0ded0ea81e8a418c439fb32ac655713d58795
|
[
"MIT"
] | 122
|
2019-05-08T22:15:42.000Z
|
2022-03-18T08:12:48.000Z
|
test/functional/p2p_segwit.py
|
Altcoin-Cash/bitcoinV
|
bbb0ded0ea81e8a418c439fb32ac655713d58795
|
[
"MIT"
] | 91
|
2016-03-02T12:24:46.000Z
|
2021-02-20T13:45:05.000Z
|
test/functional/p2p_segwit.py
|
Altcoin-Cash/bitcoinV
|
bbb0ded0ea81e8a418c439fb32ac655713d58795
|
[
"MIT"
] | 58
|
2019-05-24T10:27:19.000Z
|
2022-03-10T20:55:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_block(block) if with_witness else msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
| 46.275813
| 220
| 0.655053
|
cada63a4bdf3240a90fbd29d70e442eb0a4e9e51
| 349
|
py
|
Python
|
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_E.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | 1
|
2021-12-09T12:55:56.000Z
|
2021-12-09T12:55:56.000Z
|
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_E.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | null | null | null |
MATA37-ILP 2021.2/JUDE/Lista 3 e Prova 3 - Loop/lista3_E.py
|
jeffersonraimon/Programming-UFBA
|
6a6803bfd0e6aa72f8c2b9ffa120792d73c727ca
|
[
"MIT"
] | 1
|
2022-02-21T12:01:53.000Z
|
2022-02-21T12:01:53.000Z
|
E, P =input().split()
E = int(E)
P = int(P)
cont = E - P
contador = 1
contadorP = P - 1
if contadorP > 0:
while cont > 0:
cont = cont - contadorP
contador = contador + 1
if contadorP <= 0:
print("F")
break
contadorP = contadorP - 1
else:
print(contador)
else:
print("F")
| 19.388889
| 33
| 0.495702
|
a3ea11876ff415908cf5730e548a0e6cc5d6ec81
| 2,949
|
py
|
Python
|
turtletrans/pl.py
|
wrwrwr/turtle-trans
|
3b906bfaa45912d16a8b8877649388539b02eb75
|
[
"MIT"
] | null | null | null |
turtletrans/pl.py
|
wrwrwr/turtle-trans
|
3b906bfaa45912d16a8b8877649388539b02eb75
|
[
"MIT"
] | null | null | null |
turtletrans/pl.py
|
wrwrwr/turtle-trans
|
3b906bfaa45912d16a8b8877649388539b02eb75
|
[
"MIT"
] | null | null | null |
from .translate import translate_methods, turtle_subclass
# Translate the class name itself.
Żółw = Żółwik = turtle_subclass("Żółw")
# Original method --> list of aliases (order as in turtle docs).
translate_methods(Żółw, {
# Turtle motion.
'forward': ("naprzód", "np", "doprzodu", "dp"),
'back': ("wstecz", "ws", "dotyłu", "dt"),
'right': ("naprawo", "prawo", "prawa", "pw"),
'left': ("nalewo", "lewo", "lewa", "lw"),
'goto': ("idźdo", "napoz", "zmieńpoz", "zpoz"),
'setx': ("zmieńx", "zx"),
'sety': ("zmieńy", "zy"),
'setheading': ("obróć", "skieruj", "zmieńkierunek", "zk"),
'home': ("dodomu", "dom", "naśrodek", "środek", "wróć"),
'circle': ("okrąg", "koło", "kółko"),
'dot': ("kropka", "punkt"),
'stamp': ("stempel", "pieczęć", "pieczątka"),
'clearstamp': ("usuństempel", "usuńpieczęć", "usuńpieczątkę"),
'clearstamps': ("usuństemple", "usuńpieczęci", "usuńpieczątki"),
'undo': ("cofnij", "c"),
'speed': ("szybkość", "prędkość"),
# Turtle state.
'position': ("pozycja", "poz"),
'towards': ("wkierunku", "kierunek"),
'xcor': ("wspx", "x"),
'ycor': ("wspy", "y"),
'heading': ("kierunek",),
'distance': ("odległość",),
# Measurement.
'degrees': ("stopnie",),
'radians': ("radiany",),
# Pen control.
'pendown': ("opuśćpisak", "opuść", "opu"),
'penup': ("podnieśpisak", "podnieś", "pod"),
'pensize': ("rozmiarpisaka", "rozmiar", "roz", "ugp"),
'pen': ("pisak",),
'isdown': ("czyopuszczony", "opuszczony", "czypisze"),
'pencolor': ("kolorpisaka", "kolpis", "ukp"),
'fillcolor': ("kolormalowania", "kolmal", "ukm"),
'color': ("kolor", "kol", "uk"),
'filling': ("malowanie", "czymaluje", "wypełnianie", "czywypełnia"),
'begin_fill': ("rozpocznij_malowanie", "maluj", "wypełniaj"),
'end_fill': ("zakończ_malowanie", "niemaluj", "niewypełniaj"),
'reset': ("resetuj", "cs"),
'clear': ("wyczyść", "czyść"),
'write': ("pisz", "napisz", "tekst"),
# Turtle state.
'hideturtle': ("schowaj", "chowaj", "ukryj", "sż"),
'showturtle': ("pokaż", "pż"),
'isvisible': ("czywidoczny", "widoczny"),
# Appearance.
'shape': ("kształt",),
'resizemode': ("trybrozmiaru",),
'shapesize': ("rozmiarkształtu", "rozmiarżółwia"),
'shearfactor': ("ścięcie",),
'tilt': ("odchylenie",),
'tiltangle': ("kątodchylenia",),
'shapetransform': ("przekształcenie",),
'get_shapepoly': ("wielokąt_kształtu",),
# Events.
'onclick': ("pokliknięciu",),
'onrelease': ("popuszczeniu",),
'ondrag': ("poprzeciągnięciu",),
# Special.
'begin_poly': ("rozpocznijwielokąt",),
'end_poly': ("zakończwielokąt",),
'get_poly': ("nagranywielokąt",),
'clone': ("klonuj",),
'getturtle': ("zółw",),
'getscreen': ("obraz",),
'setundobuffer': ("buforcofania",),
'undobufferentries': ("wpisybuforacofania", "wpisybuforucofania"),
})
| 35.107143
| 72
| 0.565955
|
ad44640302c6d34e8e326934a920c13e45f93daa
| 12,824
|
py
|
Python
|
plgx-esp-ui/migrations/versions/d2b00dd93241_initial_revision.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 20
|
2019-12-09T13:55:13.000Z
|
2022-01-10T09:10:42.000Z
|
plgx-esp-ui/migrations/versions/d2b00dd93241_initial_revision.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 13
|
2019-12-03T13:27:27.000Z
|
2021-12-03T05:22:49.000Z
|
plgx-esp-ui/migrations/versions/d2b00dd93241_initial_revision.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 16
|
2019-11-15T11:45:06.000Z
|
2022-01-07T08:07:11.000Z
|
"""Initial revision for migrations going forward.
Revision ID: d2b00dd93241
Revises: None
Create Date: 2016-05-01 09:57:32.779107
"""
# revision identifiers, used by Alembic.
revision = 'd2b00dd93241'
down_revision = None
from alembic import op
import sqlalchemy as sa
import polylogyx.database
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('file_path',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(), nullable=False),
sa.Column('target_paths', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('category')
)
op.create_table('node_config',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True, unique=True),
sa.Column('type', sa.String(), nullable=True),
sa.Column('config', sa.String(), nullable=True),
sa.Column('apply_by_default', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'), )
op.create_table('options',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False, unique=True),
sa.Column('option', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'), )
op.create_table('settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False, unique=True),
sa.Column('setting', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'), )
op.create_table('node',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('os_info', postgresql.JSONB(), nullable=False),
sa.Column('network_info', postgresql.JSONB(), nullable=False),
sa.Column('node_key', sa.String(), nullable=False),
sa.Column('platform', sa.String(), nullable=True),
sa.Column('enroll_secret', sa.String(), nullable=True),
sa.Column('enrolled_on', sa.DateTime(), nullable=True),
sa.Column('host_identifier', sa.String(), nullable=True),
sa.Column('last_checkin', sa.DateTime(), nullable=True),
sa.Column('last_results_update_date', sa.DateTime(), nullable=True),
sa.Column('last_results_seen_date', sa.DateTime(), nullable=True),
sa.Column('config_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['config_id'], ['node_config.id'], ),
sa.UniqueConstraint('node_key')
)
op.create_table('alerts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('message', postgresql.JSONB(), nullable=False),
sa.Column('sql', sa.String(), nullable=True),
sa.Column('query_name', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.Column('message', postgresql.JSONB(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('pack',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('platform', sa.String(), nullable=True),
sa.Column('version', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('shard', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('sql', sa.String(), nullable=False),
sa.Column('interval', sa.Integer(), nullable=True),
sa.Column('platform', sa.String(), nullable=True),
sa.Column('version', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('value', sa.String(), nullable=True),
sa.Column('removed', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('value')
)
op.create_table('distributed_query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('guid', sa.String(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.Column('sql', sa.String(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('not_before', sa.DateTime(), nullable=True),
sa.Column('retrieved', sa.DateTime(), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('guid')
)
op.create_table('file_path_tags',
sa.Column('tag.id', sa.Integer(), nullable=True),
sa.Column('file_path.id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['file_path.id'], ['file_path.id'], ),
sa.ForeignKeyConstraint(['tag.id'], ['tag.id'], )
)
op.create_table('node_tags',
sa.Column('tag.id', sa.Integer(), nullable=True),
sa.Column('node.id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['node.id'], ['node.id'], ),
sa.ForeignKeyConstraint(['tag.id'], ['tag.id'], )
)
op.create_table('pack_tags',
sa.Column('tag.id', sa.Integer(), nullable=True),
sa.Column('pack.id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pack.id'], ['pack.id'], ),
sa.ForeignKeyConstraint(['tag.id'], ['tag.id'], )
)
op.create_table('query_packs',
sa.Column('pack.id', sa.Integer(), nullable=True),
sa.Column('query.id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pack.id'], ['pack.id'], ),
sa.ForeignKeyConstraint(['query.id'], ['query.id'], )
)
op.create_table('query_tags',
sa.Column('tag.id', sa.Integer(), nullable=True),
sa.Column('query.id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['query.id'], ['query.id'], ),
sa.ForeignKeyConstraint(['tag.id'], ['tag.id'], )
)
op.create_table('result_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('action', sa.String(), nullable=True),
sa.Column('columns', postgresql.JSONB(), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('status_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('line', sa.Integer(), nullable=True),
sa.Column('message', sa.String(), nullable=True),
sa.Column('severity', sa.Integer(), nullable=True),
sa.Column('filename', sa.String(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('distributed_query_result',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('columns', postgresql.JSONB(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('distributed_query_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['distributed_query_id'], ['distributed_query.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
op.create_table('email_recipient',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipient', sa.String(), nullable=False),
sa.Column('status', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('recipient')
)
op.create_table('carve_session',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('session_id', sa.String(), nullable=True),
sa.Column('carve_guid', sa.String(), nullable=True),
sa.Column('archive', sa.String(), nullable=True),
sa.Column('carve_size', sa.Integer(), nullable=True),
sa.Column('block_size', sa.Integer(), nullable=True),
sa.Column('block_count', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('node_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('data', postgresql.JSONB(), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['node.id'], ),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('distributed_query_result')
op.drop_table('status_log')
op.drop_table('result_log')
op.drop_table('query_tags')
op.drop_table('query_packs')
op.drop_table('pack_tags')
op.drop_table('node_tags')
op.drop_table('file_path_tags')
op.drop_table('distributed_query')
op.drop_table('tag')
op.drop_table('query')
op.drop_table('pack')
op.drop_table('node')
op.drop_table('file_path')
op.drop_table('alerts')
### end Alembic commands ###
| 50.488189
| 98
| 0.514738
|
e122da9a7daca39fe4d287f08eb1ff30f9af0927
| 2,460
|
py
|
Python
|
carin/app.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
carin/app.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
carin/app.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
from carin.help import show_help
from carin.help import show_bad_argument_help
from enum import Enum, auto
import sys
import getopt
import urllib.request
import json
debug = False
base_url = "https://api.carbonintensity.org.uk"
def log(message):
if debug:
print(message)
def run():
log("Carbon Intensity API")
class Endpoint(Enum):
UNKNOWN = auto()
GENERATION = auto()
INTENSITY = auto()
REGIONAL = auto()
endpoint = Endpoint.UNKNOWN
postcode = None
argument_count = len(sys.argv)
if argument_count == 1:
show_help()
else:
log("Parse Arguments")
log(sys.argv)
try:
opts, args = getopt.getopt(sys.argv[1:], "e:p:", ["endpoint=", "postcode="])
except getopt.GetoptError:
show_bad_argument_help()
sys.exit(2)
for opt, arg in opts:
log("option: " + opt + " argument: " + arg)
if opt == '-e':
if arg == 'generation':
endpoint = Endpoint.GENERATION
elif arg == "intensity":
endpoint = Endpoint.INTENSITY
elif arg == "regional":
endpoint = Endpoint.REGIONAL
if opt == '-p':
postcode = arg
if endpoint == Endpoint.GENERATION:
generation()
elif endpoint == Endpoint.INTENSITY:
intensity()
elif endpoint == Endpoint.REGIONAL:
regional(postcode=postcode)
elif endpoint == Endpoint.UNKNOWN:
show_bad_argument_help()
def generation():
log("Endpoint: generation")
request = urllib.request.urlopen(base_url + "/generation")
generation_json = json.loads(request.read())
print(json.dumps(generation_json, indent=2, sort_keys=True))
def intensity():
log("Endpoint: intensity")
request = urllib.request.urlopen(base_url + "/intensity")
intensity_json = json.loads(request.read())
print(json.dumps(intensity_json, indent=2, sort_keys=True))
def regional(**kwargs):
log("Endpoint: regional")
postcode = kwargs.get('postcode', None)
if postcode is None:
request = urllib.request.urlopen(base_url + "/regional")
else:
request = urllib.request.urlopen(base_url + "/regional/postcode/" + postcode)
regional_json = json.loads(request.read())
print(json.dumps(regional_json, indent=2, sort_keys=True))
| 27.954545
| 88
| 0.600813
|
05d1496f28cbbe6287d3a21c15caa2d5a5f7d05e
| 10,078
|
py
|
Python
|
mlep/mlep_process.py
|
NREL/python-mlep
|
3d8284d04b551093005c162680a72523911bfeaa
|
[
"BSD-3-Clause"
] | 1
|
2021-04-28T01:51:21.000Z
|
2021-04-28T01:51:21.000Z
|
mlep/mlep_process.py
|
NREL/python-mlep
|
3d8284d04b551093005c162680a72523911bfeaa
|
[
"BSD-3-Clause"
] | 1
|
2020-12-28T16:28:46.000Z
|
2020-12-28T16:28:46.000Z
|
mlep/mlep_process.py
|
NREL/python-mlep
|
3d8284d04b551093005c162680a72523911bfeaa
|
[
"BSD-3-Clause"
] | null | null | null |
########################################################################################################################
# Copyright (c) 2008-2020, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
# derived from this software without specific prior written permission from the respective party.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
# STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
########################################################################################################################
# -*- coding: utf-8 -*-
"""
mlepProcess
~~~~~~~~~~~
A class of a co-simulation process. This class represents a co-simulation
process. It enables data exchanges between the host (in Python) and the
client (the co-simulation process - E+), using the communication protocol
defined in BCVTB.
This class wraps the mlep* functions.
See also:
<a href="https://gaia.lbl.gov/bcvtb">BCVTB (hyperlink)</a>
Usage:
> mlepProcess()
Note: This function is based on the Matlab implementation for mlep. The original
files were written by Nghiem Truong and Willy Bernal
Protocol Version 1 & 2:
Packet has the form:
v f dr di db t r1 r2 ... i1 i2 ... b1 b2 ... \n
where
v - version number (1,2)
f - flag (0: communicate, 1: finish, -10: initialization error,
-20: time integration error, -1: unknown error)
dr - number of real values
di - number of integer values
db - number of boolean values
t - current simulation time in seconds (format 20.15e)
r1 r2 ... are real values (format 20.15e)
i1 i2 ... are integer values (format d)
b1 b2 ... are boolean values (format d)
\n - carriage return
Note that if f is non-zero, other values after it will not be processed.
:author: Willy Bernal Heredia
:copyright: (c) 2016 by The Alliance for Sustainable Energy
:license: BSD-3
"""
import mlep
import socket
class MlepProcess:
def __init__(self):
self.description = 'mlepProcess Object'
self.version = 2.0 # Current Version of the Protocol
self.program = 'runenergyplus' # Excecutable
self.env = {'BCVTB_HOME': '/Users/wbernalh/Documents/Projects/J2/Code/TCP/bcvtb/'} # Environment
# Arguments to the client program
self.arguments = ('/Applications/EnergyPlus-8-6-0/ExampleFiles/1ZoneUncontrolled.idf',
'/Applications/EnergyPlus-8-6-0/WeatherData/USA_CO_Golden-NREL.724666_TMY3.epw')
self.workDir = './' # Working directory (default is current directory)
self.port = 0 # Socket port (default 0 = any free port)
self.host = 'localhost' # Host name (default '' = localhost)
self.bcvtbDir = '/Users/wbernalh/Documents/Projects/J2/Code/TCP/bcvtb/' # Directory to BCVTB
# (default '' means that if no environment variable exist, set it to current directory)
self.configFile = 'socket.cfg' # Name of socket configuration file
self.configFileWriteOnce = False # if true, only write the socket config file
# for the first time and when server socket changes.
self.accept_timeout = 20000 # Timeout for waiting for the client to connect
self.exe_cmd = 'subprocess' # How to execute EnergyPlus from Matlab (system/Java)
self.status = 0
self.msg = ''
# Property
self.rwTimeout = 0 # Timeout for sending/receiving data (0 = infinite)
self.is_running = False # Is co-simulation running?
self.server_socket = None # Server socket to listen to client
self.comm_socket = None # Socket for sending/receiving data
self.writer = '' # Buffered writer stream
self.reader = '' # Buffered reader stream
self.pid = () # Process ID for E+
self.deltaT = 0 # Time step for E+
self.kStep = 0 # E+ simulation step
self.flag = 0 # Co-simulation flag
self.MAX_STEPS = 0 # Co-simulation max. steps
self.inputs_list = [] # Co-simulation input list
self.outputs_list = [] # Co-simulation output list
self.client_address = None # Client Address
self.inputs = [] # E+ Simulation Inputs
self.outputs = [] # E+ Simulation Outputs
self.mapping = '' # Path to the haystack mapping file
# Start
# ==============================================================
def start(self):
# status and msg are returned from the client process
# status = 0 --> success
if self.is_running:
return
# Check parameters
if self.program is None:
print('Program name must be specified.')
# Call mlepCreate
try:
if self.server_socket is not None:
the_port = self.server_socket
if self.configFileWriteOnce:
the_config_file = -1 # Do not write socket config file
else:
the_config_file = self.configFile
else:
the_port = self.port
the_config_file = self.configFile
# Call MLEPCreate function
[self.server_socket, self.comm_socket, status, msg] = \
mlep.mlep_create(self.program, self.arguments, self.workDir, self.accept_timeout, the_port,
self.host, self.bcvtbDir, the_config_file, self.env, self.exe_cmd)
except BaseException:
import traceback
traceback.print_exc()
print('Throw Error/Close Socket')
status = 1
msg = 'Could not start the process.'
# Return
return status, msg
# Accept Socket
# ==============================================================
def accept_socket(self):
# status and msg are returned from the client process
# status = 0 --> success
status = self.status
msg = self.msg
# Accept Socket
(self.comm_socket, self.client_address) = self.server_socket.accept()
# Create Streams
if status == 0 and isinstance(self.comm_socket, socket.socket):
self.is_running = True
msg = ''
# Return
return status, msg
# Stop
# ==============================================================
def stop(self, stop_signal):
# Not Running
if not self.is_running:
return
try:
# Send stop signal
if stop_signal:
self.write(mlep.mlep_encode_real_data(2, 1, None, (1,)))
# Close connection
if self.comm_socket:
# self.comm_socket.stop() #original way of terminating a socket connection
# time.sleep(10) # add some extra time for buffer to finish energyplus post-processing
# pass
self.comm_socket.close() # the correct way by Yanfei
self.comm_socket = None
except Exception as e:
print('Error {0}'.format(e))
# Update
self.is_running = False
# Read
# ==============================================================
def read(self, packet_size=4096):
"""
Read data from the bcvtb server. The bcvtb protocol is designed
to end a message with a newline (\n).
:param packet_size: [int]
:return: [bytes]
"""
packet = []
# Read Packet
if self.is_running and isinstance(self.comm_socket, socket.socket):
while True:
piece = self.comm_socket.recv(packet_size)
packet.append(piece)
decoded = piece.decode('utf-8')
if decoded.endswith('\n'):
packet = b''.join(packet)
break
else:
packet = b''
print('Co-simulation is not running.')
# Return
return packet
# Write
# ==============================================================
def write(self, packet):
if self.is_running:
packet = packet.encode(encoding='UTF-8')
self.comm_socket.sendall(packet)
else:
print('Co-simulation is not running.')
| 43.627706
| 124
| 0.571344
|
177f285cbbcf4bbac268526b21c11d505366d5cf
| 5,174
|
py
|
Python
|
plugins/modules/panos_commit_firewall.py
|
bkarypid/pan-os-ansible
|
d7b376192b24fd7c8f0af6debc099a0aa676b6fd
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/panos_commit_firewall.py
|
bkarypid/pan-os-ansible
|
d7b376192b24fd7c8f0af6debc099a0aa676b6fd
|
[
"Apache-2.0"
] | 22
|
2020-10-19T06:12:10.000Z
|
2022-03-07T10:04:30.000Z
|
plugins/modules/panos_commit_firewall.py
|
patrickdaj/pan-os-ansible
|
1e3daf5fe0d862516561cc95e420691c03a38403
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: panos_commit_firewall
short_description: Commit the firewall's candidate configuration.
description:
- Module that will commit the candidate configuration of a PAN-OS firewall.
- The new configuration will become active immediately.
author:
- Robert Hagen (@stealthllama)
version_added: '2.0.0'
requirements:
- pan-os-python
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.provider
options:
description:
description:
- A description of the commit.
type: str
admins:
description:
- Commit only the changes made by specified list of administrators.
type: list
elements: str
exclude_device_and_network:
description:
- Exclude network and device configuration changes.
type: bool
default: False
exclude_shared_objects:
description:
- Exclude shared object configuration changes.
type: bool
default: False
exclude_policy_and_objects:
description:
- Exclude policy and object configuration changes.
type: bool
default: False
force:
description:
- Force the commit.
type: bool
default: False
sync:
description:
- Wait for the commit to complete.
type: bool
default: True
'''
EXAMPLES = r'''
- name: commit candidate configs on firewall
panos_commit_firewall:
provider: '{{ credentials }}'
- name: commit changes by specified admins on firewall
panos_commit_firewall:
provider: '{{ credentials }}'
admins: ['netops','secops','cloudops']
description: 'Saturday change window'
- name: commit only policy and object changes on firewall
panos_commit_firewall:
provider: '{{ credentials }}'
exclude_device_and_network: True
'''
RETURN = r'''
jobid:
description: The ID of the PAN-OS commit job.
type: int
returned: always
sample: 49152
details:
description: Commit job completion messages.
type: str
returned: on success
sample: Configuration committed successfully
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection
try:
from panos.firewall import FirewallCommit
except ImportError:
pass
def main():
# Instantiate the connection helper
helper = get_connection(
min_pandevice_version=(1, 0, 0),
min_panos_version=(8, 0, 0),
argument_spec=dict(
description=dict(type='str'),
admins=dict(type='list', elements='str'),
exclude_device_and_network=dict(type='bool'),
exclude_shared_objects=dict(type='bool'),
exclude_policy_and_objects=dict(type='bool'),
force=dict(type='bool'),
sync=dict(type='bool', default=True)
)
)
# Initialize the Ansible module
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of
)
# Verify libs are present, get the parent object.
parent = helper.get_pandevice_parent(module)
# Construct the commit command
cmd = FirewallCommit(
description=module.params['description'],
admins=module.params['admins'],
exclude_device_and_network=module.params['exclude_device_and_network'],
exclude_shared_objects=module.params['exclude_shared_objects'],
exclude_policy_and_objects=module.params['exclude_policy_and_objects'],
force=module.params['force']
)
# Execute the commit
commit_results = dict(changed=False, jobid=0)
# commit_results = {}
sync = module.params['sync']
result = parent.commit(cmd=cmd, sync=sync)
# Exit with status
if result is None:
# No commit was needed
pass
elif not sync:
# When sync is False only jobid is returned
commit_results['jobid'] = int(result)
elif not result['success']:
# The commit failed
module.fail_json(msg=' | '.join(result["messages"]))
else:
# The commit succeeded
commit_results['changed'] = True
commit_results['jobid'] = result['jobid']
commit_results['details'] = result['messages']
module.exit_json(**commit_results)
if __name__ == '__main__':
main()
| 29.735632
| 96
| 0.674333
|
2c3d8fb0d1fea47d6d386165cb5190df9261201d
| 156
|
py
|
Python
|
whiteboxgui/__init__.py
|
dongyi1996/whiteboxgui
|
7c4a7fcc4efb4e53dabe9770b330afe56e8a3060
|
[
"MIT"
] | null | null | null |
whiteboxgui/__init__.py
|
dongyi1996/whiteboxgui
|
7c4a7fcc4efb4e53dabe9770b330afe56e8a3060
|
[
"MIT"
] | null | null | null |
whiteboxgui/__init__.py
|
dongyi1996/whiteboxgui
|
7c4a7fcc4efb4e53dabe9770b330afe56e8a3060
|
[
"MIT"
] | 1
|
2021-01-23T18:07:32.000Z
|
2021-01-23T18:07:32.000Z
|
"""Top-level package for whiteboxgui."""
__author__ = """Qiusheng Wu"""
__email__ = 'giswqs@gmail.com'
__version__ = '0.1.3'
from .whiteboxgui import show
| 22.285714
| 40
| 0.711538
|
eeba0764a2e307d930f0c634fc8620a6c3ae2a8a
| 307
|
py
|
Python
|
jiraticketing/models.py
|
GMedian/archerysec
|
9591fdb6b21ca56d77364d1433acbaff84437c7f
|
[
"BSD-3-Clause"
] | 4
|
2019-03-19T16:53:49.000Z
|
2021-12-18T22:12:55.000Z
|
jiraticketing/models.py
|
GMedian/archerysec
|
9591fdb6b21ca56d77364d1433acbaff84437c7f
|
[
"BSD-3-Clause"
] | 8
|
2020-02-12T00:43:21.000Z
|
2022-03-11T23:25:08.000Z
|
jiraticketing/models.py
|
GMedian/archerysec
|
9591fdb6b21ca56d77364d1433acbaff84437c7f
|
[
"BSD-3-Clause"
] | 1
|
2018-08-12T17:29:35.000Z
|
2018-08-12T17:29:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class jirasetting(models.Model):
jira_server = models.TextField(blank=True, null=True)
jira_username = models.TextField(blank=True, null=True)
jira_password = models.TextField(blank=True, null=True)
| 27.909091
| 59
| 0.749186
|
74583fc16f93b4f5e52c2c15a580f54a7c28af65
| 48
|
py
|
Python
|
woke/woke/d_compile/solc_frontend/exceptions.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | 7
|
2022-01-28T06:50:00.000Z
|
2022-02-14T11:34:32.000Z
|
woke/woke/d_compile/solc_frontend/exceptions.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | 30
|
2022-01-26T17:54:48.000Z
|
2022-03-21T12:33:53.000Z
|
woke/woke/d_compile/solc_frontend/exceptions.py
|
Ackee-Blockchain/woke
|
0d27de25720142beb9619a89619b7a94c3556af1
|
[
"ISC"
] | null | null | null |
class SolcCompilationError(Exception):
pass
| 16
| 38
| 0.791667
|
b1e4631a16ce2602984be2f4ddb4ac76d4c51952
| 8,614
|
py
|
Python
|
simple_history/tests/tests/test_manager.py
|
john-parton/django-simple-history
|
8255458909e10e31f91cb671893b29975c648141
|
[
"BSD-3-Clause"
] | 1
|
2021-03-02T11:57:12.000Z
|
2021-03-02T11:57:12.000Z
|
simple_history/tests/tests/test_manager.py
|
john-parton/django-simple-history
|
8255458909e10e31f91cb671893b29975c648141
|
[
"BSD-3-Clause"
] | 55
|
2020-12-25T06:47:15.000Z
|
2022-03-28T20:06:13.000Z
|
simple_history/tests/tests/test_manager.py
|
john-parton/django-simple-history
|
8255458909e10e31f91cb671893b29975c648141
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, timedelta
from operator import attrgetter
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.test import TestCase, skipUnlessDBFeature
from ..models import Document, Poll
User = get_user_model()
class AsOfTest(TestCase):
model = Document
def setUp(self):
user = User.objects.create_user("tester", "tester@example.com")
self.now = datetime.now()
self.yesterday = self.now - timedelta(days=1)
self.obj = self.model.objects.create()
self.obj.changed_by = user
self.obj.save()
self.model.objects.all().delete() # allows us to leave PK on instance
(
self.delete_history,
self.change_history,
self.create_history,
) = self.model.history.all()
self.create_history.history_date = self.now - timedelta(days=2)
self.create_history.save()
self.change_history.history_date = self.now - timedelta(days=1)
self.change_history.save()
self.delete_history.history_date = self.now
self.delete_history.save()
def test_created_after(self):
"""An object created after the 'as of' date should not be
included.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=5)))
self.assertFalse(as_of_list)
def test_deleted_before(self):
"""An object deleted before the 'as of' date should not be
included.
"""
as_of_list = list(self.model.history.as_of(self.now + timedelta(days=1)))
self.assertFalse(as_of_list)
def test_deleted_after(self):
"""An object created before, but deleted after the 'as of'
date should be included.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(len(as_of_list), 1)
self.assertEqual(as_of_list[0].pk, self.obj.pk)
def test_modified(self):
"""An object modified before the 'as of' date should reflect
the last version.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(as_of_list[0].changed_by, self.obj.changed_by)
class AsOfAdditionalTestCase(TestCase):
def test_create_and_delete(self):
now = datetime.now()
document = Document.objects.create()
document.delete()
for doc_change in Document.history.all():
doc_change.history_date = now
doc_change.save()
docs_as_of_tmw = Document.history.as_of(now + timedelta(days=1))
self.assertFalse(list(docs_as_of_tmw))
def test_multiple(self):
document1 = Document.objects.create()
document2 = Document.objects.create()
historical = Document.history.as_of(datetime.now() + timedelta(days=1))
self.assertEqual(list(historical), [document1, document2])
class BulkHistoryCreateTestCase(TestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=datetime.now()),
Poll(id=2, question="Question 2", pub_date=datetime.now()),
Poll(id=3, question="Question 3", pub_date=datetime.now()),
Poll(id=4, question="Question 4", pub_date=datetime.now()),
]
def test_simple_bulk_history_create(self):
created = Poll.history.bulk_history_create(self.data)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(
Poll.history.order_by("question"),
["Question 1", "Question 2", "Question 3", "Question 4"],
attrgetter("question"),
)
self.assertTrue(
all([history.history_type == "+" for history in Poll.history.all()])
)
created = Poll.history.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Poll.history.count(), 4)
def test_bulk_history_create_with_change_reason(self):
for poll in self.data:
poll._change_reason = "reason"
Poll.history.bulk_history_create(self.data)
self.assertTrue(
all(
[
history.history_change_reason == "reason"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_with_default_user(self):
user = User.objects.create_user("tester", "tester@example.com")
Poll.history.bulk_history_create(self.data, default_user=user)
self.assertTrue(
all([history.history_user == user for history in Poll.history.all()])
)
def test_bulk_history_create_with_default_change_reason(self):
Poll.history.bulk_history_create(self.data, default_change_reason="test")
self.assertTrue(
all(
[
history.history_change_reason == "test"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_history_user_overrides_default(self):
user1 = User.objects.create_user("tester1", "tester1@example.com")
user2 = User.objects.create_user("tester2", "tester2@example.com")
for data in self.data:
data._history_user = user1
Poll.history.bulk_history_create(self.data, default_user=user2)
self.assertTrue(
all([history.history_user == user1 for history in Poll.history.all()])
)
def test_bulk_history_create_change_reason_overrides_default(self):
for data in self.data:
data._change_reason = "my_reason"
Poll.history.bulk_history_create(self.data, default_change_reason="test")
self.assertTrue(
all(
[
history.history_change_reason == "my_reason"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_on_objs_without_ids(self):
self.data = [
Poll(question="Question 1", pub_date=datetime.now()),
Poll(question="Question 2", pub_date=datetime.now()),
Poll(question="Question 3", pub_date=datetime.now()),
Poll(question="Question 4", pub_date=datetime.now()),
]
with self.assertRaises(IntegrityError):
Poll.history.bulk_history_create(self.data)
def test_set_custom_history_date_on_first_obj(self):
self.data[0]._history_date = datetime(2000, 1, 1)
Poll.history.bulk_history_create(self.data)
self.assertEqual(
Poll.history.order_by("question")[0].history_date, datetime(2000, 1, 1)
)
def test_set_custom_history_user_on_first_obj(self):
user = User.objects.create_user("tester", "tester@example.com")
self.data[0]._history_user = user
Poll.history.bulk_history_create(self.data)
self.assertEqual(Poll.history.order_by("question")[0].history_user, user)
@skipUnlessDBFeature("has_bulk_insert")
def test_efficiency(self):
with self.assertNumQueries(1):
Poll.history.bulk_history_create(self.data)
class BulkHistoryUpdateTestCase(TestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=datetime.now()),
Poll(id=2, question="Question 2", pub_date=datetime.now()),
Poll(id=3, question="Question 3", pub_date=datetime.now()),
Poll(id=4, question="Question 4", pub_date=datetime.now()),
]
def test_simple_bulk_history_create(self):
created = Poll.history.bulk_history_create(self.data, update=True)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(
Poll.history.order_by("question"),
["Question 1", "Question 2", "Question 3", "Question 4"],
attrgetter("question"),
)
self.assertTrue(
all([history.history_type == "~" for history in Poll.history.all()])
)
created = Poll.history.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Poll.history.count(), 4)
def test_bulk_history_create_with_change_reason(self):
for poll in self.data:
poll._change_reason = "reason"
Poll.history.bulk_history_create(self.data)
self.assertTrue(
all(
[
history.history_change_reason == "reason"
for history in Poll.history.all()
]
)
)
| 34.318725
| 83
| 0.620385
|
716aa2c1ed37224ff178a527d2b02f3fd7ec35b0
| 9,014
|
py
|
Python
|
traversals/binary_tree_traversals.py
|
writtik/Python
|
cead6d77e1f084557dcf7ef659c23b54bdb286b9
|
[
"MIT"
] | null | null | null |
traversals/binary_tree_traversals.py
|
writtik/Python
|
cead6d77e1f084557dcf7ef659c23b54bdb286b9
|
[
"MIT"
] | null | null | null |
traversals/binary_tree_traversals.py
|
writtik/Python
|
cead6d77e1f084557dcf7ef659c23b54bdb286b9
|
[
"MIT"
] | null | null | null |
# flake8: noqa
"""
This is pure Python implementation of tree traversal algorithms
"""
from __future__ import annotations
import queue
class TreeNode:
def __init__(self, data):
self.data = data
self.right = None
self.left = None
def build_tree():
print("\n********Press N to stop entering at any point of time********\n")
check = input("Enter the value of the root node: ").strip().lower() or "n"
if check == "n":
return None
q: queue.Queue = queue.Queue()
tree_node = TreeNode(int(check))
q.put(tree_node)
while not q.empty():
node_found = q.get()
msg = "Enter the left node of %s: " % node_found.data
check = input(msg).strip().lower() or "n"
if check == "n":
return tree_node
left_node = TreeNode(int(check))
node_found.left = left_node
q.put(left_node)
msg = "Enter the right node of %s: " % node_found.data
check = input(msg).strip().lower() or "n"
if check == "n":
return tree_node
right_node = TreeNode(int(check))
node_found.right = right_node
q.put(right_node)
def pre_order(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> pre_order(root)
1,2,4,5,3,6,7,
"""
if not isinstance(node, TreeNode) or not node:
return
print(node.data, end=",")
pre_order(node.left)
pre_order(node.right)
def in_order(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> in_order(root)
4,2,5,1,6,3,7,
"""
if not isinstance(node, TreeNode) or not node:
return
in_order(node.left)
print(node.data, end=",")
in_order(node.right)
def post_order(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> post_order(root)
4,5,2,6,7,3,1,
"""
if not isinstance(node, TreeNode) or not node:
return
post_order(node.left)
post_order(node.right)
print(node.data, end=",")
def level_order(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> level_order(root)
1,2,3,4,5,6,7,
"""
if not isinstance(node, TreeNode) or not node:
return
q: queue.Queue = queue.Queue()
q.put(node)
while not q.empty():
node_dequeued = q.get()
print(node_dequeued.data, end=",")
if node_dequeued.left:
q.put(node_dequeued.left)
if node_dequeued.right:
q.put(node_dequeued.right)
def level_order_actual(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> level_order_actual(root)
1,
2,3,
4,5,6,7,
"""
if not isinstance(node, TreeNode) or not node:
return
q: queue.Queue = queue.Queue()
q.put(node)
while not q.empty():
list = []
while not q.empty():
node_dequeued = q.get()
print(node_dequeued.data, end=",")
if node_dequeued.left:
list.append(node_dequeued.left)
if node_dequeued.right:
list.append(node_dequeued.right)
print()
for node in list:
q.put(node)
# iteration version
def pre_order_iter(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> pre_order_iter(root)
1,2,4,5,3,6,7,
"""
if not isinstance(node, TreeNode) or not node:
return
stack: List[TreeNode] = []
n = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=",")
stack.append(n)
n = n.left
# end of while means current node doesn't have left child
n = stack.pop()
# start to traverse its right child
n = n.right
def in_order_iter(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> in_order_iter(root)
4,2,5,1,6,3,7,
"""
if not isinstance(node, TreeNode) or not node:
return
stack: List[TreeNode] = []
n = node
while n or stack:
while n:
stack.append(n)
n = n.left
n = stack.pop()
print(n.data, end=",")
n = n.right
def post_order_iter(node: TreeNode) -> None:
"""
>>> root = TreeNode(1)
>>> tree_node2 = TreeNode(2)
>>> tree_node3 = TreeNode(3)
>>> tree_node4 = TreeNode(4)
>>> tree_node5 = TreeNode(5)
>>> tree_node6 = TreeNode(6)
>>> tree_node7 = TreeNode(7)
>>> root.left, root.right = tree_node2, tree_node3
>>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5
>>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7
>>> post_order_iter(root)
4,5,2,6,7,3,1,
"""
if not isinstance(node, TreeNode) or not node:
return
stack1, stack2 = [], []
n = node
stack1.append(n)
while stack1: # to find the reversed order of post order, store it in stack2
n = stack1.pop()
if n.left:
stack1.append(n.left)
if n.right:
stack1.append(n.right)
stack2.append(n)
while stack2: # pop up from stack2 will be the post order
print(stack2.pop().data, end=",")
def prompt(s: str = "", width=50, char="*") -> str:
if not s:
return "\n" + width * char
left, extra = divmod(width - len(s) - 2, 2)
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
node = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 29.077419
| 81
| 0.589749
|
24dea8c23635079d76503504a11ec99b74c28151
| 1,839
|
py
|
Python
|
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from . import _C
import matplotlib.pyplot as plt
import numpy as np
from copy import copy, deepcopy
EXTENDED_PERCENT = 0.1
###################################################################################################################################################
def get_xlim(_x_values, axis_extended_percent):
x_values = np.array(_x_values)
assert len(x_values.shape)==1
assert axis_extended_percent<=1
x_min, x_max = x_values.min(), x_values.max()
dx = x_max-x_min
x_margin = axis_extended_percent*dx
xlim = (x_min-x_margin, x_max+x_margin)
return xlim
###################################################################################################################################################
class AxisLims(object):
def __init__(self, axis_clip_values,
axis_extended_percent=EXTENDED_PERCENT,
):
self.axis_clip_values = axis_clip_values
self.axis_extended_percent = {k:axis_extended_percent for k in axis_clip_values.keys()} if not isinstance(axis_extended_percent, dict) else axis_extended_percent
self.reset()
def reset(self):
self.axis_d = {k:[] for k in self.axis_clip_values.keys()}
pass
def append(self, axis_name, axis_values):
self.axis_d[axis_name] += [x for x in axis_values]
def get_axis_lim(self, axis_name):
axis_extended_percent = self.axis_extended_percent[axis_name]
axis_clip_values = self.axis_clip_values[axis_name]
axis_lim = get_xlim(self.axis_d[axis_name], axis_extended_percent)
axis_lim = np.clip(axis_lim, axis_clip_values[0], axis_clip_values[1]) if not (axis_clip_values[0] is None and axis_clip_values[1] is None) else axis_lim
return axis_lim
def set_ax_axis_lims(self, ax):
for k in self.axis_d.keys():
getattr(ax, f'set_{k}lim')(self.get_axis_lim(k))
return ax
| 35.365385
| 163
| 0.650353
|
e2c038b51b502520cbffea59fbb6286fd618ea75
| 1,169
|
py
|
Python
|
pyplanes/fem/meshutils.py
|
pyplanes/pyplanes
|
0b69ac4cfff0d278497fe2ad5ae096721c983f6f
|
[
"MIT"
] | null | null | null |
pyplanes/fem/meshutils.py
|
pyplanes/pyplanes
|
0b69ac4cfff0d278497fe2ad5ae096721c983f6f
|
[
"MIT"
] | 1
|
2021-06-01T23:05:58.000Z
|
2021-06-01T23:05:58.000Z
|
pyplanes/fem/meshutils.py
|
pyplanes/pyplanes
|
0b69ac4cfff0d278497fe2ad5ae096721c983f6f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding:utf8 -*-
#
# meshutils.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact mathieu@matael.org.
#
# Copyright (c) 2018 The pyplanes authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
from pyplanes.mesh import MeshPart
class FEMMesh(MeshPart):
"""Holds a mesh and utility methods for querying FEM-aware features
Parameters
----------
base_mesh: pyplanes.mesh.Mesh
mesh instance upon which the FEMMesh is based
"""
def __init__(self, base_mesh):
super().__init__(base_mesh=base_mesh)
| 32.472222
| 80
| 0.735672
|
493c096969654a485be8247efcec814ee8d383c5
| 4,029
|
py
|
Python
|
rpi_src/src/nmea_navsat_driver/src/libnmea_navsat_driver/nodes/nmea_socket_driver.py
|
HyunCello/Tutorial
|
1254b2ce66a5fe7450e254ac636064877b2416f7
|
[
"Apache-2.0"
] | 3
|
2020-07-31T07:10:46.000Z
|
2021-11-14T16:21:17.000Z
|
rpi_src/src/nmea_navsat_driver/src/libnmea_navsat_driver/nodes/nmea_socket_driver.py
|
HyunCello/Tutorial
|
1254b2ce66a5fe7450e254ac636064877b2416f7
|
[
"Apache-2.0"
] | 6
|
2020-08-19T21:21:58.000Z
|
2020-10-05T13:33:19.000Z
|
rpi_src/src/nmea_navsat_driver/src/libnmea_navsat_driver/nodes/nmea_socket_driver.py
|
HyunCello/Tutorial
|
1254b2ce66a5fe7450e254ac636064877b2416f7
|
[
"Apache-2.0"
] | 1
|
2021-04-17T14:10:43.000Z
|
2021-04-17T14:10:43.000Z
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2016, Rein Appeldoorn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the names of the authors nor the names of their
# affiliated organizations may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Defines the main method for the nmea_socket_driver executable."""
import select
import sys
import traceback
try:
import socketserver
except ImportError:
import SocketServer as socketserver # Python 2.7
import rospy
from libnmea_navsat_driver.driver import RosNMEADriver
class NMEAMessageHandler(socketserver.DatagramRequestHandler):
def handle(self):
for line in self.rfile:
line = line.strip()
if not line:
continue
try:
self.server.driver.add_sentence(line, self.server.frame_id)
except ValueError:
rospy.logwarn(
"ValueError, likely due to missing fields in the NMEA "
"message. Please report this issue at "
"https://github.com/ros-drivers/nmea_navsat_driver"
", including the following:\n\n"
"```\n" +
repr(line) + "\n\n" +
traceback.format_exc() +
"```")
def main():
"""Create and run the nmea_socket_driver ROS node.
Creates a ROS NMEA Driver and feeds it NMEA sentence strings from a UDP socket.
ROS parameters:
~ip (str): IPV4 address of the socket to open.
~port (int): Local port of the socket to open.
~timeout (float): The time out period for the socket, in seconds.
"""
rospy.init_node('nmea_socket_driver')
try:
local_ip = rospy.get_param('~ip', '0.0.0.0')
local_port = rospy.get_param('~port', 10110)
timeout = rospy.get_param('~timeout_sec', 2)
except KeyError as e:
rospy.logerr("Parameter %s not found" % e)
sys.exit(1)
# Create a socket
server = socketserver.UDPServer((local_ip, local_port), NMEAMessageHandler,
bind_and_activate=False)
server.frame_id = RosNMEADriver.get_frame_id()
server.driver = RosNMEADriver()
# Start listening for connections
server.server_bind()
server.server_activate()
# Handle incoming connections until ROS shuts down
try:
while not rospy.is_shutdown():
rlist, _, _ = select.select([server], [], [], timeout)
if server in rlist:
server.handle_request()
except Exception:
rospy.logerr(traceback.format_exc())
finally:
server.server_close()
| 36.297297
| 83
| 0.671134
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.