repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
aldookware/auto-send-tweets | kinesis_read.py | <filename>kinesis_read.py
import boto3
import json
import uuid
import time
stream_name = 'twitter-stream'
kinesis_client = boto3.client('kinesis', region_name='eu-west-2')
# iterator
# shard id
# describe the stream
stream_description = kinesis_client.describe_stream(StreamName=stream_name)
print(f'\n{stream_description}')
# get the shard ID
shard_id = stream_description['StreamDescription']['Shards'][0]['ShardId']
print(f' \nShard ID: {shard_id}')
# get iterator
shard_iterator = kinesis_client.get_shard_iterator(
StreamName=stream_name,
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON'
)
# iterator hash
shard_iterator_key = shard_iterator['ShardIterator']
# data required to work this
record_response = kinesis_client.get_records(ShardIterator=shard_iterator_key, Limit=2)
while 'NextShardIterator' in record_response:
record_response = kinesis_client.get_records(ShardIterator=record_response['NextShardIterator'])
print(f'\n \n {record_response}')
time.sleep(5) |
aldookware/auto-send-tweets | encrypt.py | import boto3
import base64
# Use this function in the python shell to encrypt the
# values you will store in the config file
def encrypt(b_plaintext, key_id):
"""Encrypt plaintext with KMS key"""
kms = boto3.client('kms')
kms_result = kms.encrypt(
# Sample key_id format: 'alias/MyAliasName'
KeyId = key_id,
Plaintext = b_plaintext
)
ciphertext = base64.b64encode(kms_result['CiphertextBlob'])
return ciphertext
|
marpie/PythonForWindows | windows/winobject/network.py | import windows
import ctypes
import socket
import struct
from windows import winproxy
import windows.generated_def as gdef
from windows.com import interfaces as cominterfaces
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
class TCP4Connection(MIB_TCPROW_OWNER_PID):
"""A TCP4 socket (connected or listening)"""
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP (x.x.x.x)
:type: :class:`str`"""
return socket.inet_ntoa(struct.pack("<I", self.dwLocalAddr))
@property
def remote_addr(self):
"""remote address IP (x.x.x.x)
:type: :class:`str`"""
if not self.established:
return None
return socket.inet_ntoa(struct.pack("<I", self.dwRemoteAddr))
@property
def remote_proto(self):
"""Identification of the protocol associated with the remote port.
Equals ``remote_port`` if no protocol is associated with it.
:type: :class:`str` or :class:`int`
"""
try:
return socket.getservbyport(self.remote_port, 'tcp')
except socket.error:
return self.remote_port
@property
def remote_host(self):
"""Identification of the remote hostname.
Equals ``remote_addr`` if the resolution fails
:type: :class:`str` or :class:`int`
"""
try:
return socket.gethostbyaddr(self.remote_addr)
except socket.error:
return self.remote_addr
def close(self):
"""Close the connection <require elevated process>"""
closing = MIB_TCPROW()
closing.dwState = MIB_TCP_STATE_DELETE_TCB
closing.dwLocalAddr = self.dwLocalAddr
closing.dwLocalPort = self.dwLocalPort
closing.dwRemoteAddr = self.dwRemoteAddr
closing.dwRemotePort = self.dwRemotePort
return winproxy.SetTcpEntry(ctypes.byref(closing))
def __repr__(self):
if not self.established:
return "<TCP IPV4 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV4 Connection {s.local_addr}:{s.local_port} -> {s.remote_addr}:{s.remote_port}>".format(s=self)
class TCP6Connection(MIB_TCP6ROW_OWNER_PID):
"""A TCP6 socket (connected or listening)"""
@staticmethod
def _str_ipv6_addr(addr):
return ":".join(c.encode('hex') for c in addr)
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP
:type: :class:`str`"""
return self._str_ipv6_addr(self.ucLocalAddr)
@property
def remote_addr(self):
"""remote address IP
:type: :class:`str`"""
if not self.established:
return None
return self._str_ipv6_addr(self.ucRemoteAddr)
@property
def remote_proto(self):
"""Equals to ``self.remote_port`` for Ipv6"""
return self.remote_port
@property
def remote_host(self):
"""Equals to ``self.remote_addr`` for Ipv6"""
return self.remote_addr
def close(self):
raise NotImplementedError("Closing IPV6 connection non implemented")
def __repr__(self):
if not self.established:
return "<TCP IPV6 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV6 Connection {0}:{1} -> {2}:{3}>".format(self.local_addr, self.local_port, self.remote_addr, self.remote_port)
def get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
class _GENERATED_MIB_TCPTABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP4Connection * nb_entry),
]
return _GENERATED_MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
def get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
# Struct _MIB_TCP6TABLE_OWNER_PID definitions
class _GENERATED_MIB_TCP6TABLE_OWNER_PID(Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP6Connection * nb_entry),
]
return _GENERATED_MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
class Firewall(cominterfaces.INetFwPolicy2):
"""The windows firewall"""
@property
def rules(self):
"""The rules of the firewall
:type: [:class:`FirewallRule`] -- A list of rule
"""
ifw_rules = cominterfaces.INetFwRules()
self.get_Rules(ifw_rules)
nb_rules = gdef.LONG()
ifw_rules.get_Count(nb_rules)
unknw = cominterfaces.IUnknown()
ifw_rules.get__NewEnum(unknw)
pVariant = cominterfaces.IEnumVARIANT()
unknw.QueryInterface(pVariant.IID, pVariant)
count = gdef.ULONG()
var = windows.com.ImprovedVariant()
rules = []
for i in range(nb_rules.value):
pVariant.Next(1, var, count)
if not count.value:
break
rule = FirewallRule()
idisp = var.asdispatch
idisp.QueryInterface(rule.IID, rule)
rules.append(rule)
return rules
@property
def current_profile_types(self):
"""Mask of the profiles currently enabled
:type: :class:`long`
"""
cpt = gdef.LONG()
self.get_CurrentProfileTypes(cpt)
return cpt.value
@property
def enabled(self):
"""A maping of the active firewall profiles
{
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_DOMAIN(0x1L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PRIVATE(0x2L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PUBLIC(0x4L)``: ``True`` or ``False``,
}
:type: :class:`dict`
"""
profiles = [gdef.NET_FW_PROFILE2_DOMAIN, gdef.NET_FW_PROFILE2_PRIVATE, gdef.NET_FW_PROFILE2_PUBLIC]
return {prof: self.enabled_for_profile_type(prof) for prof in profiles}
def enabled_for_profile_type(self, profile_type):
enabled = gdef.VARIANT_BOOL()
self.get_FirewallEnabled(profile_type, enabled)
return enabled.value
class FirewallRule(cominterfaces.INetFwRule):
"""A rule of the firewall"""
@property
def name(self):
"""Name of the rule
:type: :class:`unicode`
"""
name = gdef.BSTR()
self.get_Name(name)
return name.value
@property
def description(self):
"""Description of the rule
:type: :class:`unicode`
"""
description = gdef.BSTR()
self.get_Description(description)
return description.value
@property
def application_name(self):
"""Name of the application to which apply the rule
:type: :class:`unicode`
"""
applicationname = gdef.BSTR()
self.get_ApplicationName(applicationname)
return applicationname.value
@property
def service_name(self):
"""Name of the service to which apply the rule
:type: :class:`unicode`
"""
servicename = gdef.BSTR()
self.get_ServiceName(servicename)
return servicename.value
@property
def protocol(self):
"""Protocol to which apply the rule
:type: :class:`long`
"""
protocol = gdef.LONG()
self.get_Protocol(protocol)
return protocol.value
@property
def local_address(self):
"""Local address of the rule
:type: :class:`unicode`
"""
local_address = gdef.BSTR()
self.get_LocalAddresses(local_address)
return local_address.value
@property
def remote_address(self):
"""Remote address of the rule
:type: :class:`unicode`
"""
remote_address = gdef.BSTR()
self.get_RemoteAddresses(remote_address)
return remote_address.value
@property
def direction(self):
"""Direction of the rule, values might be:
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_IN(0x1L)``
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_OUT(0x2L)``
subclass of :class:`long`
"""
direction = gdef.NET_FW_RULE_DIRECTION()
self.get_Direction(direction)
return direction.value
@property
def interface_types(self):
"""Types of interface of the rule
:type: :class:`unicode`
"""
interface_type = gdef.BSTR()
self.get_InterfaceTypes(interface_type)
return interface_type.value
@property
def local_port(self):
"""Local port of the rule
:type: :class:`unicode`
"""
local_port = gdef.BSTR()
self.get_LocalPorts(local_port)
return local_port.value
@property
def remote_port(self):
"""Remote port of the rule
:type: :class:`unicode`
"""
remote_port = gdef.BSTR()
self.get_RemotePorts(remote_port)
return remote_port.value
@property
def action(self):
"""Action of the rule, values might be:
* ``NET_FW_ACTION_.NET_FW_ACTION_BLOCK(0x0L)``
* ``NET_FW_ACTION_.NET_FW_ACTION_ALLOW(0x1L)``
subclass of :class:`long`
"""
action = gdef.NET_FW_ACTION()
self.get_Action(action)
return action.value
@property
def enabled(self):
"""``True`` if rule is enabled"""
enabled = gdef.VARIANT_BOOL()
self.get_Enabled(enabled)
return enabled.value
@property
def grouping(self):
"""Grouping of the rule
:type: :class:`unicode`
"""
grouping = gdef.BSTR()
self.get_RemotePorts(grouping)
return grouping.value
@property
def icmp_type_and_code(self):
icmp_type_and_code = gdef.BSTR()
self.get_RemotePorts(icmp_type_and_code)
return icmp_type_and_code.value
def __repr__(self):
return u'<{0} "{1}">'.format(type(self).__name__, self.name).encode("ascii", errors='backslashreplace')
class Network(object):
NetFwPolicy2 = windows.com.IID.from_string("E2B3C97F-6AE1-41AC-817A-F6F92166D7DD")
@property
def firewall(self):
"""The firewall of the system
:type: :class:`Firewall`
"""
windows.com.init()
firewall = Firewall()
windows.com.create_instance(self.NetFwPolicy2, firewall)
return firewall
@staticmethod
def _get_tcp_ipv4_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET)
t = get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
@staticmethod
def _get_tcp_ipv6_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET6)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET6)
t = get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
ipv4 = property(lambda self: self._get_tcp_ipv4_sockets())
"""List of TCP IPv4 socket (connection and listening)
:type: [:class:`TCP4Connection`]"""
ipv6 = property(lambda self: self._get_tcp_ipv6_sockets())
"""List of TCP IPv6 socket (connection and listening)
:type: [:class:`TCP6Connection`]
"""
|
marpie/PythonForWindows | windows/debug/debugger.py | import os.path
from collections import defaultdict, namedtuple
from contextlib import contextmanager
import windows
import windows.winobject.exception as winexception
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from windows.winobject.process import WinProcess, WinThread
from windows.dbgprint import dbgprint
from windows import winproxy
from windows.generated_def.winstructs import *
from windows.generated_def import windef
from .breakpoints import *
#from windows.syswow64 import CS_32bits
from windows.winobject.exception import VectoredException
PAGE_SIZE = 0x1000
class DEBUG_EVENT(DEBUG_EVENT):
KNOWN_EVENT_CODE = dict((x,x) for x in [EXCEPTION_DEBUG_EVENT,
CREATE_THREAD_DEBUG_EVENT, CREATE_PROCESS_DEBUG_EVENT,
EXIT_THREAD_DEBUG_EVENT, EXIT_PROCESS_DEBUG_EVENT, LOAD_DLL_DEBUG_EVENT,
UNLOAD_DLL_DEBUG_EVENT, OUTPUT_DEBUG_STRING_EVENT, RIP_EVENT])
@property
def code(self):
return self.KNOWN_EVENT_CODE.get(self.dwDebugEventCode, self.dwDebugEventCode)
WatchedPage = namedtuple('WatchedPage', ["original_prot", "bps"])
class Debugger(object):
"""A debugger based on standard Win32 API. Handle :
* Standard BP (int3)
* Hardware-Exec BP (DrX)
* Memory BP (virtual_protect)"""
def __init__(self, target):
"""``target`` must be a debuggable :class:`WinProcess`."""
self._init_dispatch_handlers()
self.target = target
self.is_target_launched = False
#if not already_debuggable:
# winproxy.DebugActiveProcess(target.pid)
self.processes = {}
self.threads = {}
self.current_process = None
self.current_thread = None
# List of breakpoints
self.breakpoints = {}
self._pending_breakpoints = {} #Breakpoints to put in new process / threads
# Values rewritten by "\xcc"
self._memory_save = dict()
# Dict of {tid : {drx taken : BP}}
self._hardware_breakpoint = {}
# Breakpoints to reput..
self._breakpoint_to_reput = {}
self._module_by_process = {}
self._pending_breakpoints_new = defaultdict(list)
self._explicit_single_step = {}
self._watched_pages = {}# Dict [page_modif] -> [mem bp on the page]
# [start] -> (size, current_proctection, original_prot)
self._virtual_protected_memory = [] # List of memory-range modified by a MemBP
@classmethod
def attach(cls, target):
"""attach to ``target`` (must be a :class:`WinProcess`)
:rtype: :class:`Debugger`
.. note::
see :ref:`Debugger.attach sample <sample_debugger_attach>`"""
winproxy.DebugActiveProcess(target.pid)
return cls(target)
def detach(self, target=None):
"""Detach from all debugged processes or process ``target``"""
if target is None:
targets = self.processes.values()
if not targets:
# We are not following any process
# maybe a attach/detach with Debugger.loop
# Just detach from the initial target
if self.target:
tpid = self.target.pid
self.target = None # Remove ref to process -> GC -> CloseHandle -> process is destroyed
windows.winproxy.DebugActiveProcessStop(tpid)
return
for proc in targets:
self.detach(proc)
del targets
return
if not isinstance(target, WinProcess):
raise ValueError("Detach accept only WinProcess")
self.disable_all_memory_breakpoints(target)
for bp in self.breakpoints[target.pid].values():
if not bp.apply_to_target(target):
target_threads = [t for t in target.threads if t.tid in self.threads]
bp_threads = []
# TODO: clean API tu request HXBP on a thread
for t in target_threads:
t_bps = [pos for pos, hbp in self._hardware_breakpoint[t.tid].items() if hbp == bp]
if t_bps:
bp_threads.append(t)
self.del_bp(bp, bp_threads)
else:
self.del_bp(bp, [target])
for thread in [t for t in target.threads if t.tid in self.threads]:
del self._explicit_single_step[thread.tid]
del self._breakpoint_to_reput[thread.tid]
del self.threads[thread.tid]
ctx = thread.context
if ctx.EEFlags.TF: # Remove TRAPFlag before detaching (or it will lead to a crash)
ctx.EEFlags.TF = 0
thread.set_context(ctx)
del self.processes[target.pid]
del self._watched_pages[target.pid]
del self._module_by_process[target.pid]
if target is self.current_process:
self._finish_debug_event(self.REMOVE_ME_debug_event, DBG_CONTINUE)
self.current_process = None
self.current_thread = None
if self.target and target.pid == self.target.pid:
self.target = None
windows.winproxy.DebugActiveProcessStop(target.pid)
def _killed_in_action(self):
"""Return ``True`` if current process have been detached by user callback"""
# Fix ? _handle_exit_process remove from processes but need a FinishDebugEvent
return self.current_process is None or self.current_process.pid not in self.processes
@classmethod
def debug(cls, path, args=None, dwCreationFlags=0, show_windows=False):
"""Create a process and debug it.
:rtype: :class:`Debugger`"""
dwCreationFlags |= DEBUG_PROCESS
c = windows.utils.create_process(path, args=args, dwCreationFlags=dwCreationFlags, show_windows=show_windows)
return cls(c)
def _init_dispatch_handlers(self):
dbg_evt_dispatch = {}
dbg_evt_dispatch[EXCEPTION_DEBUG_EVENT] = self._handle_exception
dbg_evt_dispatch[CREATE_THREAD_DEBUG_EVENT] = self._handle_create_thread
dbg_evt_dispatch[CREATE_PROCESS_DEBUG_EVENT] = self._handle_create_process
dbg_evt_dispatch[EXIT_PROCESS_DEBUG_EVENT] = self._handle_exit_process
dbg_evt_dispatch[EXIT_THREAD_DEBUG_EVENT] = self._handle_exit_thread
dbg_evt_dispatch[LOAD_DLL_DEBUG_EVENT] = self._handle_load_dll
dbg_evt_dispatch[UNLOAD_DLL_DEBUG_EVENT] = self._handle_unload_dll
dbg_evt_dispatch[RIP_EVENT] = self._handle_rip
dbg_evt_dispatch[OUTPUT_DEBUG_STRING_EVENT] = self._handle_output_debug_string
self._DebugEventCode_dispatch = dbg_evt_dispatch
def _debug_event_generator(self):
while True:
debug_event = DEBUG_EVENT()
winproxy.WaitForDebugEvent(debug_event)
yield debug_event
def _finish_debug_event(self, event, action):
if action not in [windef.DBG_CONTINUE, windef.DBG_EXCEPTION_NOT_HANDLED]:
raise ValueError('Unknow action : <0>'.format(action))
winproxy.ContinueDebugEvent(event.dwProcessId, event.dwThreadId, action)
def _add_exe_to_module_list(self, create_process_event):
"""Add the intial exe file described by create_process_event to the list of module in the process"""
exe_path = self.current_process.get_mapped_filename(create_process_event.lpBaseOfImage)
exe_name = os.path.basename(exe_path)
#print("Exe name is {0}".format(exe_name))
self._module_by_process[self.current_process.pid][exe_name] = windows.pe_parse.GetPEFile(create_process_event.lpBaseOfImage, self.current_process)
#self._setup_pending_breakpoints_load_dll(exe_name) # Already setup in _setup_pending_breakpoints_new_process
def _update_debugger_state(self, debug_event):
self.current_process = self.processes[debug_event.dwProcessId]
self.current_thread = self.threads[debug_event.dwThreadId]
def _dispatch_debug_event(self, debug_event):
#print("DISPATCH {0}".format(DEBUG_EVENT.KNOWN_EVENT_CODE.get(debug_event.dwDebugEventCode)))
handler = self._DebugEventCode_dispatch.get(debug_event.dwDebugEventCode, self._handle_unknown_debug_event)
return handler(debug_event)
def _dispatch_breakpoint(self, exception, addr):
bp = self.breakpoints[self.current_process.pid][addr]
with self.DisabledMemoryBreakpoint():
x = bp.trigger(self, exception)
return x
def _resolve(self, addr, target):
dbgprint("Resolving <{0}> in <{1}>".format(addr, target), "DBG")
if not isinstance(addr, basestring):
return addr
dll, api = addr.split("!")
dll = dll.lower()
modules = self._module_by_process[target.pid]
mod = None
if dll in modules:
mod = [modules[dll]]
if not mod:
return None
# TODO: optim exports are the same for whole system (32 vs 64 bits)
# I don't have to reparse the exports each time..
# Try to interpret api as an int
try:
api_int = int(api, 0)
return mod[0].baseaddr + api_int
except ValueError:
pass
exports = mod[0].exports
if api not in exports:
dbgprint("Error resolving <{0}> in <{1}>".format(addr, target), "DBG")
raise ValueError("Unknown API <{0}> in DLL {1}".format(api, dll))
return exports[api]
def add_pending_breakpoint(self, bp, target):
self._pending_breakpoints_new[target].append(bp)
def remove_pending_breakpoint(self, bp, target):
self._pending_breakpoints_new[target].remove(bp)
def _setup_breakpoint(self, bp, target):
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
if target is None:
if bp.type in [STANDARD_BP, MEMORY_BREAKPOINT]: #TODO: better..
targets = self.processes.values()
else:
targets = self.threads.values()
else:
targets = [target]
for target in targets:
return _setup_method(bp, target)
def _restore_breakpoints(self):
for bp in self._breakpoint_to_reput[self.current_thread.tid]:
if bp.type == HARDWARE_EXEC_BP:
raise NotImplementedError("Why is this here ? we use RF flags to pass HXBP")
restore = getattr(self, "_restore_breakpoint_" + bp.type)
restore(bp, self.current_process)
del self._breakpoint_to_reput[self.current_thread.tid][:]
return
def _setup_breakpoint_BP(self, bp, target):
if not isinstance(target, WinProcess):
raise ValueError("Cannot setup STANDARD_BP on {0}".format(target))
addr = self._resolve(bp.addr, target)
if addr is None:
return False
bp._addr = addr
self._memory_save[target.pid][addr] = target.read_memory(addr, 1)
self.breakpoints[target.pid][addr] = bp
target.write_memory(addr, "\xcc")
return True
def _restore_breakpoint_BP(self, bp, target):
self._memory_save[target.pid][bp._addr] = target.read_memory(bp._addr, 1)
return target.write_memory(bp._addr, "\xcc")
def _remove_breakpoint_BP(self, bp, target):
if not isinstance(target, WinProcess):
raise ValueError("SETUP STANDARD_BP on {0}".format(target))
addr = self._resolve(bp.addr, target)
target.write_memory(addr, self._memory_save[target.pid][addr])
del self._memory_save[target.pid][addr]
del self.breakpoints[target.pid][addr]
return True
def _setup_breakpoint_HXBP(self, bp, target):
#print("Setup {0} into {1}".format(bp, target))
if not isinstance(target, WinThread):
raise ValueError("SETUP HXBP_BP on {0}".format(target))
# Todo: opti, not reparse exports for all thread of the same process..
addr = self._resolve(bp.addr, target.owner)
if addr is None:
return False
x = self._hardware_breakpoint[target.tid]
if all(pos in x for pos in range(4)):
raise ValueError("Cannot put {0} in {1} (DRx full)".format(bp, target))
empty_drx = str([pos for pos in range(4) if pos not in x][0])
ctx = target.context
# Windows DebugCtl aliasing in DR7
# See https://www.codeproject.com/Articles/517466/Last-branch-records-and-branch-tracing
ctx.EDr7.LE = 0 # bit 8 of DR7 represents bit 0 of DebugCtl. This is the LBR bit. (last branch record, will explain)
ctx.EDr7.GE = 0 # bit 9 of DR7 represents bit 1 of DebugCtl. This is the BTF bit. (single-step on branches)
setattr(ctx.EDr7, "L" + empty_drx, 1)
setattr(ctx, "Dr" + empty_drx, addr)
x[int(empty_drx)] = bp
target.set_context(ctx)
self.breakpoints[target.owner.pid][addr] = bp
return True
def _remove_breakpoint_HXBP(self, bp, target):
if not isinstance(target, WinThread):
raise ValueError("SETUP HXBP_BP on {0}".format(target))
addr = self._resolve(bp.addr, target.owner)
bp_pos = [pos for pos, hbp in self._hardware_breakpoint[target.tid].items() if hbp == bp]
if not bp_pos:
raise ValueError("Asked to remove {0} from {1} but not present in hbp_list".format(bp, target))
bp_pos_str = str(bp_pos[0])
ctx = target.context
setattr(ctx.EDr7, "L" + bp_pos_str, 0)
setattr(ctx, "Dr" + bp_pos_str, 0)
target.set_context(ctx)
try: # TODO: vraiment faire les HXBP par thread ? ...
del self.breakpoints[target.owner.pid][addr]
except:
pass
return True
## MemBP internal helpers
def _compute_page_access_for_event(self, target, events):
if "R" in events:
return PAGE_NOACCESS
if set("WX").issubset(events):
return PAGE_READONLY
if events == set("W"):
return PAGE_EXECUTE_READ
if events == set("X"):
# Might have problem if DEP is not enabled
if target.bitness == 64:
has_DEP = True
elif windows.winproxy.is_implemented(windows.winproxy.GetProcessDEPPolicy):
has_DEP = DWORD()
permaned = LONG()
windows.winproxy.GetProcessDEPPolicy(target.handle, has_DEP, permaned)
has_DEP = has_DEP.value
else:
has_DEP = False
return PAGE_READWRITE if has_DEP else PAGE_NOACCESS
raise ValueError("Unexpected set of event for Membp: {0}".format(events))
def _setup_breakpoint_MEMBP(self, bp, target):
addr = self._resolve(bp.addr, target)
bp._addr = addr
self._events = set(bp.events)
if addr is None:
return False
# Split in affected pages:
protection_for_bp = self._compute_page_access_for_event(target, self._events)
affected_pages = range((addr >> 12) << 12, addr + bp.size, PAGE_SIZE)
old_prot = DWORD()
cp_watch_page = self._watched_pages[self.current_process.pid]
for page_addr in affected_pages:
if page_addr not in cp_watch_page:
target.virtual_protect(page_addr, PAGE_SIZE, protection_for_bp, old_prot)
# Page with no other MemBP
cp_watch_page[page_addr] = WatchedPage(old_prot.value, [bp])
else:
# Reduce the right of the page to the common need
cp_watch_page[page_addr].bps.append(bp)
full_page_events = set.union(*[bp.events for bp in cp_watch_page[page_addr].bps])
protection_for_page = self._compute_page_access_for_event(target, full_page_events)
target.virtual_protect(page_addr, PAGE_SIZE, protection_for_page, None)
# TODO: watch for overlap with other MEM breakpoints
return True
def _restore_breakpoint_MEMBP(self, bp, target):
(page_addr, page_prot) = bp._reput_page
return target.virtual_protect(page_addr, PAGE_SIZE, page_prot, None)
def _remove_breakpoint_MEMBP(self, bp, target):
affected_pages = range((bp._addr >> 12) << 12, bp._addr + bp.size, PAGE_SIZE)
vprot_begin = affected_pages[0]
vprot_size = PAGE_SIZE * len(affected_pages)
cp_watch_page = self._watched_pages[self.current_process.pid]
for page_addr in affected_pages:
cp_watch_page[page_addr].bps.remove(bp)
if not cp_watch_page[page_addr].bps:
try:
target.virtual_protect(page_addr, PAGE_SIZE, cp_watch_page[page_addr].original_prot, None)
except WindowsError as e:
# TODO
# What should we do if the virtual protect fail on a Non-Free page ?
# It may be because the page was dealloc + map as a view..
# For now: keep the page as-is
if not target.query_memory(page_addr).State == MEM_FREE:
pass
# If page is MEM_FREE ignore the error
del cp_watch_page[page_addr]
else:
full_page_events = set.union(*[bp.events for bp in cp_watch_page[page_addr].bps])
protection_for_page = self._compute_page_access_for_event(target, full_page_events)
try:
target.virtual_protect(page_addr, PAGE_SIZE, protection_for_page, None)
except Exception as e:
# if not target.query_memory(page_addr).State == MEM_FREE:
# raise
for bp in cp_watch_page[page_addr].bps:
bp.on_error(self, page_addr)
# TODO: handle case were it is mem-free ?
return True
def _setup_pending_breakpoints_new_process(self, new_process):
for bp in self._pending_breakpoints_new[None]:
if bp.apply_to_target(new_process): #BP for thread or process ?
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
_setup_method(bp, new_process)
for bp in list(self._pending_breakpoints_new[new_process.pid]):
if bp.apply_to_target(new_process):
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
if _setup_method(bp, new_process):
self._pending_breakpoints_new[new_process.pid].remove(bp)
def _setup_pending_breakpoints_new_thread(self, new_thread):
for bp in self._pending_breakpoints_new[None]:
if bp.apply_to_target(new_thread): #BP for thread or process ?
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
_setup_method(bp, new_thread)
for bp in self._pending_breakpoints_new[new_thread.owner.pid]:
if bp.apply_to_target(new_thread):
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
_setup_method(bp, new_thread)
for bp in list(self._pending_breakpoints_new[new_thread.tid]):
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
if _setup_method(bp, new_thread):
self._pending_breakpoints_new[new_thread.tid].remove(bp)
def _setup_pending_breakpoints_load_dll(self, dll_name):
for bp in self._pending_breakpoints_new[None]:
if isinstance(bp.addr, basestring):
target_dll = bp.addr.lower().split("!")[0]
# Cannot work AS-IS yet. Implement it ?
# if target_dll == "*" or target_dll == dll_name:
if target_dll == dll_name:
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
if bp.apply_to_target(self.current_process):
_setup_method(bp, self.current_process)
else:
for t in [t for t in self.current_process.threads if t.tid in self.threads]:
_setup_method(bp, t)
for bp in self._pending_breakpoints_new[self.current_process.pid]:
if isinstance(bp.addr, basestring):
target_dll = bp.addr.split("!")[0]
if target_dll == dll_name:
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
_setup_method(bp, self.current_process)
for thread in self.current_process.threads:
for bp in self._pending_breakpoints_new[thread.tid]:
if isinstance(bp.addr, basestring):
target_dll = bp.addr.split("!")[0]
if target_dll == dll_name:
_setup_method = getattr(self, "_setup_breakpoint_" + bp.type)
_setup_method(bp, self.thread)
def _pass_breakpoint(self, addr):
process = self.current_process
thread = self.current_thread
process.write_memory(addr, self._memory_save[process.pid][addr])
regs = thread.context
regs.EFlags |= (1 << 8)
#regs.pc -= 1 # Done in _handle_exception_breakpoint before dispatch
thread.set_context(regs)
bp = self.breakpoints[self.current_process.pid][addr]
self._breakpoint_to_reput[thread.tid].append(bp) #Register pending breakpoint for next single step
def _pass_memory_breakpoint(self, bp, page_protect, fault_page):
cp = self.current_process
page_prot = DWORD()
cp.virtual_protect(fault_page, PAGE_SIZE, page_protect, page_prot)
thread = self.current_thread
ctx = thread.context
ctx.EEFlags.TF = 1
thread.set_context(ctx)
bp._reput_page = (fault_page, page_prot.value)
self._breakpoint_to_reput[thread.tid].append(bp)
# debug event handlers
def _handle_unknown_debug_event(self, debug_event):
raise NotImplementedError("dwDebugEventCode = {0}".format(debug_event.dwDebugEventCode))
def _handle_exception_breakpoint(self, exception, excp_addr):
excp_bitness = self.get_exception_bitness(exception)
if excp_addr in self.breakpoints[self.current_process.pid]:
thread = self.current_thread
if self.current_process.bitness == 32 and excp_bitness == 64:
ctx = thread.context_syswow
else:
ctx = thread.context
ctx.pc -= 1
if self.current_process.bitness == 32 and excp_bitness == 64:
thread.set_syswow_context(ctx)
else:
thread.set_context(ctx)
del thread
continue_flag = self._dispatch_breakpoint(exception, excp_addr)
if self._killed_in_action():
return continue_flag
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
if excp_addr in self.breakpoints[self.current_process.pid]:
# Setup BP if not suppressed
self._pass_breakpoint(excp_addr)
return continue_flag
with self.DisabledMemoryBreakpoint():
return self.on_exception(exception)
def _handle_exception_singlestep(self, exception, excp_addr):
if self.current_thread.tid in self._breakpoint_to_reput and self._breakpoint_to_reput[self.current_thread.tid]:
self._restore_breakpoints()
if self._explicit_single_step[self.current_thread.tid]:
with self.DisabledMemoryBreakpoint():
self.on_single_step(exception)
if not self._killed_in_action():
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
return DBG_CONTINUE
elif excp_addr in self.breakpoints[self.current_process.pid]:
# Verif that's not a standard BP ?
bp = self.breakpoints[self.current_process.pid][excp_addr]
with self.DisabledMemoryBreakpoint():
bp.trigger(self, exception)
if self._killed_in_action():
return DBG_CONTINUE
ctx = self.current_thread.context
self._explicit_single_step[self.current_thread.tid] = ctx.EEFlags.TF
if excp_addr in self.breakpoints[self.current_process.pid]:
ctx.EEFlags.RF = 1
self.current_thread.set_context(ctx)
return DBG_CONTINUE
elif self._explicit_single_step[self.current_thread.tid]:
with self.DisabledMemoryBreakpoint():
continue_flag = self.on_single_step(exception)
if self._killed_in_action():
return continue_flag
# Does not handle case where EEFlags.TF was by the debugge before trigering the exception
# Should set the flag explicitly in single_step ? and not just use EEFlags.TF ?
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
return continue_flag
else:
with self.DisabledMemoryBreakpoint():
continue_flag = self.on_exception(exception)
if self._killed_in_action():
return continue_flag
# Does not handle case where EEFlags.TF was by the debugge before trigering the exception
# Should set the flag explicitly in single_step ? and not just use EEFlags.TF ?
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
return continue_flag
# === Testing PAGE_NOACCESS(0x1L) ===
# exception: access violation reading 0x00470000
# exception: access violation writing 0x00470000
# === Testing PAGE_READONLY(0x2L) ===
# exception: access violation writing 0x00470000
# === Testing PAGE_READWRITE(0x4L) ===
# === Testing PAGE_EXECUTE(0x10L) ===
# exception: access violation writing 0x00470000
# === Testing PAGE_EXECUTE_READ(0x20L) ===
# exception: access violation writing 0x00470000
# === Testing PAGE_EXECUTE_READWRITE(0x40L) ===
def _handle_exception_access_violation(self, exception, excp_addr):
READ = 0
WRITE = 1
EXEC = 2
EVENT_STR = "RWX"
fault_type = exception.ExceptionRecord.ExceptionInformation[0]
fault_addr = exception.ExceptionRecord.ExceptionInformation[1]
pc_addr = self.current_thread.context.pc
if fault_addr == pc_addr:
fault_type = EXEC
event = EVENT_STR[fault_type]
fault_page = (fault_addr >> 12) << 12
cp_watch_page = self._watched_pages[self.current_process.pid]
mem_bp = self.get_memory_breakpoint_at(fault_addr, self.current_process)
if mem_bp is False: # No BP on this page
with self.DisabledMemoryBreakpoint():
return self.on_exception(exception)
original_prot = cp_watch_page[fault_page].original_prot
if mem_bp is None or event not in mem_bp.events: # Page has MEMBP but None handle this address | event not asked by membp
# This hack is bad, find a BP on the page to restore original access..
bp = cp_watch_page[fault_page].bps[-1]
self._pass_memory_breakpoint(bp, original_prot, fault_page)
return DBG_CONTINUE
with self.DisabledMemoryBreakpoint():
continue_flag = mem_bp.trigger(self, exception)
if self._killed_in_action():
return continue_flag
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
# If BP has not been removed in trigger, pas it
if fault_page in cp_watch_page and mem_bp in cp_watch_page[fault_page].bps:
self._pass_memory_breakpoint(mem_bp, original_prot, fault_page)
return continue_flag
# TODO: self._explicit_single_step setup by single_step() ? check at the end ? finally ?
def _handle_exception(self, debug_event):
"""Handle EXCEPTION_DEBUG_EVENT"""
exception = debug_event.u.Exception
self._update_debugger_state(debug_event)
if windows.current_process.bitness == 32:
exception.__class__ = winexception.EEXCEPTION_DEBUG_INFO32
else:
exception.__class__ = winexception.EEXCEPTION_DEBUG_INFO64
excp_code = exception.ExceptionRecord.ExceptionCode
excp_addr = exception.ExceptionRecord.ExceptionAddress
if excp_code in [EXCEPTION_BREAKPOINT, STATUS_WX86_BREAKPOINT] and excp_addr in self.breakpoints[self.current_process.pid]:
return self._handle_exception_breakpoint(exception, excp_addr)
elif excp_code in [EXCEPTION_SINGLE_STEP, STATUS_WX86_SINGLE_STEP]:
return self._handle_exception_singlestep(exception, excp_addr)
elif excp_code == EXCEPTION_ACCESS_VIOLATION:
return self._handle_exception_access_violation(exception, excp_addr)
else:
with self.DisabledMemoryBreakpoint():
continue_flag = self.on_exception(exception)
if self._killed_in_action():
return continue_flag
self._explicit_single_step[self.current_thread.tid] = self.current_thread.context.EEFlags.TF
return continue_flag
def _get_loaded_dll(self, load_dll):
name_sufix = ""
pe = windows.pe_parse.GetPEFile(load_dll.lpBaseOfDll, self.current_process)
if self.current_process.bitness == 32 and pe.bitness == 64:
name_sufix = "64"
addr = None
if load_dll.lpImageName:
try:
addr = self.current_process.read_ptr(load_dll.lpImageName)
except:
pass
if not addr:
pe = windows.pe_parse.GetPEFile(load_dll.lpBaseOfDll, self.current_process)
dll_name = pe.export_name
if not dll_name:
dll_name = os.path.basename(self.current_process.get_mapped_filename(load_dll.lpBaseOfDll))
return dll_name + name_sufix
if load_dll.fUnicode:
return self.current_process.read_wstring(addr) + name_sufix
return self.current_process.read_string(addr) + name_sufix
def _handle_create_process(self, debug_event):
"""Handle CREATE_PROCESS_DEBUG_EVENT"""
create_process = debug_event.u.CreateProcessInfo
# Duplicate handle, so garbage collection of the process/thread does not
# break the debug API invariant (those x_event handle are close by the debug API itself)
proc_handle = HANDLE()
thread_handle = HANDLE()
cp_handle = windows.current_process.handle
winproxy.DuplicateHandle(cp_handle, create_process.hProcess, cp_handle, ctypes.byref(proc_handle), dwOptions=DUPLICATE_SAME_ACCESS)
winproxy.DuplicateHandle(cp_handle, create_process.hThread, cp_handle, ctypes.byref(thread_handle), dwOptions=DUPLICATE_SAME_ACCESS)
dbgprint(" Got PROC handle {0:#x}".format(create_process.hProcess, self), "HANDLE")
dbgprint(" PROC handle duplicated: {0:#x}".format(proc_handle.value), "HANDLE")
dbgprint(" Got THREAD handle {0:#x}".format(create_process.hThread, self), "HANDLE")
dbgprint(" THREAD handle duplicated: {0:#x}".format(thread_handle.value), "HANDLE")
self.current_process = WinProcess._from_handle(proc_handle.value)
self.current_thread = WinThread._from_handle(thread_handle.value)
dbgprint("New process: {0}".format(self.current_process), "DBG")
self.threads[self.current_thread.tid] = self.current_thread
self._explicit_single_step[self.current_thread.tid] = False
self._hardware_breakpoint[self.current_thread.tid] = {}
self._breakpoint_to_reput[self.current_thread.tid] = []
self.processes[self.current_process.pid] = self.current_process
self._watched_pages[self.current_process.pid] = {} #defaultdict(list)
self.breakpoints[self.current_process.pid] = {}
self._memory_save[self.current_process.pid] = {}
self._module_by_process[self.current_process.pid] = {}
self._update_debugger_state(debug_event)
self._add_exe_to_module_list(create_process)
self._setup_pending_breakpoints_new_process(self.current_process)
self._setup_pending_breakpoints_new_thread(self.current_thread)
with self.DisabledMemoryBreakpoint():
try:
return self.on_create_process(create_process)
finally:
if create_process.hFile:
winproxy.CloseHandle(create_process.hFile)
def _handle_exit_process(self, debug_event):
"""Handle EXIT_PROCESS_DEBUG_EVENT"""
self._update_debugger_state(debug_event)
exit_process = debug_event.u.ExitProcess
retvalue = self.on_exit_process(exit_process)
del self.threads[self.current_thread.tid]
del self._explicit_single_step[self.current_thread.tid]
del self._hardware_breakpoint[self.current_thread.tid]
del self._breakpoint_to_reput[self.current_thread.tid]
del self.processes[self.current_process.pid]
del self._watched_pages[self.current_process.pid]
del self._memory_save[self.current_process.pid]
del self._module_by_process[self.current_process.pid]
cpid = self.current_process.pid
self.current_thread = None
self.current_process = None
if self.target and cpid == self.target.pid:
self.target = None
return retvalue
def _handle_create_thread(self, debug_event):
"""Handle CREATE_THREAD_DEBUG_EVENT"""
create_thread = debug_event.u.CreateThread
# Duplicate handle, so garbage collection of the thread does not
# break the debug API invariant (those x_event handle are close by the debug API itself)
thread_handle = HANDLE()
cp_handle = windows.current_process.handle
winproxy.DuplicateHandle(cp_handle, create_thread.hThread, cp_handle, ctypes.byref(thread_handle), dwOptions=DUPLICATE_SAME_ACCESS)
new_thread = WinThread._from_handle(thread_handle.value)
self.threads[new_thread.tid] = new_thread
# The new thread is on the thread pool: we can now update the debugger state
self._update_debugger_state(debug_event)
self._explicit_single_step[self.current_thread.tid] = False
self._breakpoint_to_reput[self.current_thread.tid] = []
self._hardware_breakpoint[self.current_thread.tid] = {}
self._setup_pending_breakpoints_new_thread(self.current_thread)
with self.DisabledMemoryBreakpoint():
return self.on_create_thread(create_thread)
def _handle_exit_thread(self, debug_event):
"""Handle EXIT_THREAD_DEBUG_EVENT"""
self._update_debugger_state(debug_event)
exit_thread = debug_event.u.ExitThread
with self.DisabledMemoryBreakpoint():
retvalue = self.on_exit_thread(exit_thread)
del self.threads[self.current_thread.tid]
del self._hardware_breakpoint[self.current_thread.tid]
del self._explicit_single_step[self.current_thread.tid]
del self._breakpoint_to_reput[self.current_thread.tid]
return retvalue
def _handle_load_dll(self, debug_event):
"""Handle LOAD_DLL_DEBUG_EVENT"""
self._update_debugger_state(debug_event)
load_dll = debug_event.u.LoadDll
dll = self._get_loaded_dll(load_dll)
dll_name = os.path.basename(dll).lower()
if dll_name.endswith(".dll"):
dll_name = dll_name[:-4]
# Mais c'est debile..
# Si j'ai ntdll et ntdll64: les deux vont avoir le meme nom..
if dll_name.endswith(".dll64"):
dll_name = dll_name[:-6] + "64" # Crade..
#print("Load {0} -> {1}".format(dll, dll_name))
self._module_by_process[self.current_process.pid][dll_name] = windows.pe_parse.GetPEFile(load_dll.lpBaseOfDll, self.current_process)
self._setup_pending_breakpoints_load_dll(dll_name)
with self.DisabledMemoryBreakpoint():
try:
return self.on_load_dll(load_dll)
finally:
if load_dll.hFile:
winproxy.CloseHandle(load_dll.hFile)
def _handle_unload_dll(self, debug_event):
"""Handle UNLOAD_DLL_DEBUG_EVENT"""
self._update_debugger_state(debug_event)
unload_dll = debug_event.u.UnloadDll
with self.DisabledMemoryBreakpoint():
return self.on_unload_dll(unload_dll)
def _handle_output_debug_string(self, debug_event):
"""Handle OUTPUT_DEBUG_STRING_EVENT"""
self._update_debugger_state(debug_event)
debug_string = debug_event.u.DebugString
with self.DisabledMemoryBreakpoint():
return self.on_output_debug_string(debug_string)
def _handle_rip(self, debug_event):
"""Handle RIP_EVENT"""
self._update_debugger_state(debug_event)
rip_info = debug_event.u.RipInfo
with self.DisabledMemoryBreakpoint():
return self.on_rip(rip_info)
## Public API
def loop(self):
"""Debugging loop: handle event / dispatch to breakpoint. Returns when all targets are dead/detached"""
for debug_event in self._debug_event_generator():
self.REMOVE_ME_debug_event = debug_event
dbg_continue_flag = self._dispatch_debug_event(debug_event)
if dbg_continue_flag is None:
dbg_continue_flag = DBG_CONTINUE
if debug_event.dwDebugEventCode == EXIT_PROCESS_DEBUG_EVENT or not self._killed_in_action():
#if not self._killed_in_action():
# should we always _finish_debug_event even if process was killed ?
# rhaaa _killed_in_action is a REALLY bad name, it's not killed, it's detached
# TODO: FIXME
self._finish_debug_event(debug_event, dbg_continue_flag)
if not self.processes:
break
def add_bp(self, bp, addr=None, type=None, target=None):
"""Add a breakpoint, bp can be:
* a :class:`Breakpoint` (addr and type must be ``None``)
* any callable (addr and type must NOT be ``None``) (NON-TESTED)
If the ``bp`` type is ``STANDARD_BP`` or ``MEMORY_BREAKPOINT``, target can be ``None`` (all targets) or a process.
If the ``bp`` type is ``HARDWARE_EXEC_BP``, target can be ``None`` (all targets), a process or a thread.
"""
if getattr(bp, "addr", None) is None:
if addr is None or type is None:
raise ValueError("SUCK YOUR NONE")
bp = ProxyBreakpoint(bp, addr, type)
else:
if addr is not None or type is not None:
raise ValueError("Given <addr|type> by parameters but BP object have them")
del addr
del type
if target is None:
# Need to add it to all other breakpoint
self.add_pending_breakpoint(bp, None)
elif target is not None:
# Check that targets are accepted
if target not in self.processes.values() + self.threads.values():
# if target == self.target: # Original target (that have not been lauched yet)
return self.add_pending_breakpoint(bp, target)
# else:
# raise ValueError("Unknown target {0}".format(target))
return self._setup_breakpoint(bp, target)
def del_bp(self, bp, targets=None):
"""Delete a breakpoint, if targets is ``None``: delete it from all targets"""
original_target = targets
_remove_method = getattr(self, "_remove_breakpoint_" + bp.type)
if targets is None:
if bp.type in [STANDARD_BP, MEMORY_BREAKPOINT]: #TODO: better..
targets = self.processes.values()
else:
targets = self.threads.values()
for target in targets:
_remove_method(bp, target)
if original_target is None:
return self.remove_pending_breakpoint(bp, original_target)
def single_step(self):
"""Make the ``current_thread`` ``single_step``. ``Debugger.on_single_step`` will be called after that"""
t = self.current_thread
ctx = t.context
ctx.EEFlags.TF = 1
t.set_context(ctx)
## Memory Breakpoint helper
def get_memory_breakpoint_at(self, addr, process=None):
"""Get the memory breakpoint that handle ``addr``
Return values are:
* ``False`` if the page has no memory breakpoint (real fault)
* ``None`` if the page as memBP but None handle ``addr``
* ``bp`` the MemBP that handle ``addr``
"""
if process is None:
process = self.current_process
fault_page = (addr >> 12) << 12
if fault_page not in self._watched_pages[process.pid]:
return False
for bp in self._watched_pages[process.pid][fault_page].bps:
if bp._addr <= addr < bp._addr + bp.size:
return bp
return None
def disable_all_memory_breakpoints(self, target=None):
"""Restore all pages to their original access rights.
If target is ``None``, use ``current_process``
:return: a mapping of all disabled breakpoints that must be passed to :func:`restore_all_memory_breakpoints`"""
if target is None:
target = self.current_process
res = {}
cp_watch_page = self._watched_pages[target.pid]
page_protection = DWORD()
for page_addr, watched_page in cp_watch_page.items():
try:
target.virtual_protect(page_addr, PAGE_SIZE, watched_page.original_prot, page_protection)
except WindowsError as e:
# Check if page have been unmapped
# print("disable_all_memory_breakpoints failed on page {0:#x} in state {1:#x}".format(page_addr, target.query_memory(page_addr).State))
# if not target.query_memory(page_addr).State == MEM_FREE:
# import pdb;pdb.set_trace()
# raise
# If page have been unmap, warn the concerned Breakpoints.
for bp in watched_page.bps:
# TODO: Document
bp.on_error(self, page_addr)
res[page_addr] = page_protection.value
return res
def restore_all_memory_breakpoints(self, data, target=None):
"""Re-setup all memory breakpoints, affecting pages access rights.
If target is ``None``, use ``current_process``
``data`` is the result of the corresponding call to :func:`disable_all_memory_breakpoints`"""
if target is None:
target = self.current_process
for page_addr, protection in data.items():
# Prevent restoring deleted breakpoints
if page_addr in self._watched_pages[target.pid]:
target.virtual_protect(page_addr, PAGE_SIZE, protection, None)
return
@contextmanager
def DisabledMemoryBreakpoint(self, target=None):
"""A context-manager that disable all memory breakpoints and restore them on exit"""
data = self.disable_all_memory_breakpoints(target)
try:
yield
finally:
if not self._killed_in_action():
self.restore_all_memory_breakpoints(data, target)
def get_exception_bitness(self, exc):
"""Return the bitness in which the exception occured.
Useful when debugingg a 32b process from a 64bits one
:return: :class:`int` -- 32 or 64"""
if windows.current_process.bitness == 32:
return 32
if exc.ExceptionRecord.ExceptionCode in [STATUS_WX86_BREAKPOINT, STATUS_WX86_SINGLE_STEP]:
return 32
return 64
# Public callback
def on_exception(self, exception):
"""Called on exception event other that known breakpoint or requested single step. ``exception`` is one of the following type:
* :class:`windows.winobject.exception.EEXCEPTION_DEBUG_INFO32`
* :class:`windows.winobject.exception.EEXCEPTION_DEBUG_INFO64`
The default behaviour is to return ``DBG_CONTINUE`` for the known exception code
and ``DBG_EXCEPTION_NOT_HANDLED`` else
"""
dbgprint("Exception: {0} at ".format(exception.ExceptionRecord.ExceptionCode, exception.ExceptionRecord.ExceptionAddress), "DBG")
if not exception.ExceptionRecord.ExceptionCode in winexception.exception_name_by_value:
return DBG_EXCEPTION_NOT_HANDLED
return DBG_CONTINUE
def on_single_step(self, exception):
"""Called on requested single step ``exception`` is one of the following type:
* :class:`windows.winobject.exception.EEXCEPTION_DEBUG_INFO32`
* :class:`windows.winobject.exception.EEXCEPTION_DEBUG_INFO64`
There is no default implementation, if you use ``Debugger.single_step()`` you should implement ``on_single_step``
"""
raise NotImplementedError("Debugger that explicitly single step should implement <on_single_step>")
def on_create_process(self, create_process):
"""Called on create_process event
:param CREATE_PROCESS_DEBUG_INFO create_process:"""
pass
def on_exit_process(self, exit_process):
"""Called on exit_process event
:param EXIT_PROCESS_DEBUG_INFO exit_process:"""
pass
def on_create_thread(self, create_thread):
"""Called on create_thread event
:param CREATE_THREAD_DEBUG_INFO create_thread:"""
pass
def on_exit_thread(self, exit_thread):
"""Called on exit_thread event
:param EXIT_THREAD_DEBUG_INFO exit_thread:"""
pass
def on_load_dll(self, load_dll):
"""Called on load_dll event
:param LOAD_DLL_DEBUG_INFO load_dll:"""
pass
def on_unload_dll(self, unload_dll):
"""Called on unload_dll event
:param UNLOAD_DLL_DEBUG_INFO unload_dll:"""
pass
def on_output_debug_string(self, debug_string):
"""Called on debug_string event
:param OUTPUT_DEBUG_STRING_INFO debug_string:"""
pass
def on_rip(self, rip_info):
"""Called on rip_info event
:param RIP_INFO rip_info:"""
pass |
marpie/PythonForWindows | windows/crypto/helper.py | from windows.generated_def import CRYPT_DATA_BLOB, BYTE
class ECRYPT_DATA_BLOB(CRYPT_DATA_BLOB):
@classmethod
def from_string(cls, buf):
self = cls()
self.cbData = len(buf)
self.pbData = (BYTE * self.cbData)(*bytearray(buf))
return self
@property
def data(self):
return bytearray(self.pbData[:self.cbData])
|
marpie/PythonForWindows | ctypes_generation/extended_structs/_GUID.py | INITIAL_GUID = _GUID
class _GUID(INITIAL_GUID):
def __init__(self, Data1=None, Data2=None, Data3=None, Data4=None, name=None, strid=None):
data_tuple = (Data1, Data2, Data3, Data4)
self.name = name
self.strid = strid
if all(data is None for data in data_tuple):
return super(_GUID, self).__init__()
if any(data is None for data in data_tuple):
raise ValueError("All or none of (Data1, Data2, Data3, Data4) should be None")
super(_GUID, self).__init__(Data1, Data2, Data3, Data4)
def __repr__(self):
notpresent = object()
# Handle IID created without '__init__' (like ctypes-ptr deref)
if getattr(self, "strid", notpresent) is notpresent:
self.strid = self.to_string()
if self.strid is None:
return super(_GUID, self).__repr__()
if getattr(self, "name", notpresent) is notpresent:
self.name = None
if self.name is None:
return '<IID "{0}">'.format(self.strid.upper())
return '<IID "{0}({1})">'.format(self.strid.upper(), self.name)
def to_string(self):
data4_format = "{0:02X}{1:02X}-" + "".join("{{{i}:02X}}".format(i=i + 2) for i in range(6))
data4_str = data4_format.format(*self.Data4)
return "{0:08X}-{1:04X}-{2:04X}-".format(self.Data1, self.Data2, self.Data3) + data4_str
def update_strid(self):
new_strid = self.to_string()
self.strid = new_strid
@classmethod
def from_string(cls, iid):
part_iid = iid.split("-")
datas = [int(x, 16) for x in part_iid[:3]]
datas.append(int(part_iid[3][:2], 16))
datas.append(int(part_iid[3][2:], 16))
for i in range(6):
datas.append(int(part_iid[4][i * 2:(i + 1) * 2], 16))
return cls.from_raw(*datas, strid=iid)
@classmethod
def from_raw(cls, Data1, Data2, Data3, Data41, Data42, Data43, Data44, Data45, Data46, Data47, Data48, **kwargs):
return cls(Data1, Data2, Data3, (BYTE*8)(Data41, Data42, Data43, Data44, Data45, Data46, Data47, Data48), **kwargs)
def __eq__(self, other):
if not isinstance(other, (_GUID, INITIAL_GUID)):
return NotImplemented
return (self.Data1, self.Data2, self.Data3, self.Data4[:]) == (other.Data1, other.Data2, other.Data3, other.Data4[:])
|
marpie/PythonForWindows | ctypes_generation/generate.py | import sys
import os
import os.path
import re
import glob
import textwrap
import StringIO
import shutil
import dummy_wintypes
import struct_parser
import func_parser
import def_parser
import com_parser
from simpleparser import ParsingError
pjoin = os.path.join
pexists = os.path.exists
dedent = textwrap.dedent
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
print(SCRIPT_DIR)
from_here = lambda path: pjoin(SCRIPT_DIR, path)
DEST_DIR = from_here(r"..\windows\generated_def")
to_dest = lambda path: pjoin(DEST_DIR, path)
class ParsedFile(object):
def __init__(self, filename):
self.filename = filename
try:
self.data = self.PARSER(open(filename).read()).parse()
except ParsingError as e:
print(" !! Error while parsing file <{0}> !!".format(filename))
print(e)
raise
self.exports = set()
self.imports = set()
self.compute_imports_exports(self.data)
def add_exports(self, *names):
self.exports.update(names)
def add_imports(self, *names):
self.imports.update(names)
def compute_imports_exports(self):
raise NotImplementedError("compute_imports_exports")
def __repr__(self):
return '<{clsname} "{0}">'.format(self.filename, clsname=type(self).__name__)
class StructureParsedFile(ParsedFile):
PARSER = struct_parser.WinStructParser
def compute_imports_exports(self, data):
structs, enums = data
for enum in enums:
self.add_exports(enum.name)
self.add_exports(*enum.typedef)
for struct in structs:
self.add_exports(struct.name)
self.add_exports(*struct.typedef)
for field_type, field_name, nb_rep in struct.fields:
if field_type.name not in self.exports:
self.add_imports(field_type.name)
try:
int(nb_rep)
except:
self.add_imports(nb_rep)
class SimpleTypeParsedFile(ParsedFile):
PARSER = struct_parser.SimpleTypesParser
def compute_imports_exports(self, data):
for simple_type in data:
self.add_exports(simple_type.lvalue) # No dependancy check on rvalue for now
class DefinitionParsedFile(ParsedFile):
PARSER = def_parser.WinDefParser
def compute_imports_exports(self, data):
for windef in data:
self.add_exports(windef.name) # No dependancy check on rvalue for now
class NtStatusParsedFile(ParsedFile):
PARSER = def_parser.NtStatusParser
def compute_imports_exports(self, data):
for ntstatus in data:
self.add_exports(ntstatus[1])
class FunctionParsedFile(ParsedFile):
PARSER = func_parser.WinFuncParser
def compute_imports_exports(self, data):
for func in data:
if isinstance(func.return_type, tuple) and func.return_type[0] == "PTR":
self.add_imports(func.return_type[1])
else:
self.add_imports(func.return_type)
for param_type, _ in func.params:
if param_type.startswith("POINTER(") and param_type.endswith(")"):
param_type = param_type[len("POINTER("): -1]
self.add_imports(param_type)
self.add_exports(func.name)
class COMParsedFile(ParsedFile):
PARSER = com_parser.WinComParser
IGNORED_INTERFACE = set(["ITypeInfo"])
def compute_imports_exports(self, cominterface):
self.add_exports(cominterface.name)
if cominterface.typedefptr:
self.add_exports(cominterface.typedefptr)
for method in cominterface.methods:
self.compute_method_imports_exports(cominterface, method)
def compute_method_imports_exports(self, interface, method):
self.add_imports(method.ret_type)
for arg in method.args[1:]: # First one is 'this'
if arg.byreflevel > 0:
if arg.type == "void":
continue # PVOID DEP: don't care
if arg.type in self.IGNORED_INTERFACE:
continue # Will be replaced by "PVOID" at generation: ignore dep
if arg.type == interface.name:
continue # Do not add dependence to our own COM interface
self.add_imports(arg.type)
class ParsedFileGraph(object):
def __init__(self, nodes, depnodes, missing_handler=None): # depnodes: nodes that we dont have to handle but want can take export from
self.nodes = nodes
self.depnodes = depnodes
self.exports_database = {}
self.depandances_database = {node: set() for node in nodes}
self.build_export_database(self.nodes)
self.missing_handler = missing_handler
self.build_depandance_database()
def build_dependancy_graph(self):
todo = set(self.nodes)
if not todo:
return []
start = self.find_starting_node()
print("Starting node is {0}".format(start))
todo.remove(start)
flatten = [start]
depdone = set(flatten) | set(self.depnodes)
while todo:
for node in todo:
if self.depandances_database[node].issubset(depdone):
break
else:
raise ValueError("Could not find a next node for dep flattening")
flatten.append(node)
depdone.add(node)
todo.remove(node)
print("Next is <{0}>".format(node))
return flatten
def build_depandance_database(self):
for node in self.nodes:
for import_ in node.imports:
try:
self.depandances_database[node].add(self.exports_database[import_])
except KeyError as e:
self.on_missing_dependancy(import_, node)
# raise ValueError("Missing dependancy <{0}> of {1}".format(import_, node))
def on_missing_dependancy(self, import_, node):
if self.missing_handler is not None:
return self.missing_handler(import_, node)
raise ValueError("Missing dependancy <{0}> of {1}".format(import_, node))
def build_export_database(self, nodes):
for node in self.nodes + self.depnodes:
for export in node.exports:
if export in self.exports_database:
raise ValueError("{0} IN {1} but already exported by {2}".format(export, self.exports_database[export], node))
self.exports_database[export] = node
def find_starting_node(self):
for node in self.nodes:
if self.depandances_database[node].issubset(set(self.depnodes)):
return node
raise ValueError("Could not find a starting NODE without dependancy")
class BasicTypeNodes(object):
@property
def exports(self):
# Let allow ourself to redefine the bugged BYTE define & MAX_PATH which is NOT A TYPE !
return set(dummy_wintypes.names) - set(["BYTE", "MAX_PATH"])
class FakeExporter(object):
def __init__(self, exports):
self.exports = exports
class ParsedDirectory(object):
def __init__(self, filetype, src, recurse=False):
if not recurse:
if os.path.isdir(src):
srcglob = pjoin(src, "*.txt")
else:
srcglob = src
files = glob.glob(srcglob)
else:
# Recurse search of .txt files
files = [os.path.join(path, filename)
for (path, _, files) in os.walk(src)
for filename in files
if filename.endswith(".txt")]
self.nodes = [filetype(f) for f in files]
### Generation Class ###
class CtypesGenerator(object):
def __init__(self, parsed_files, template):
self.files = parsed_files # Already in generation order
self.template = template # MAKE BETTER
self.result = StringIO.StringIO()
self.imported_name = set([])
def add_import_name(self, name):
self.imported_name.add(name)
def emit(self, str):
self.result.write(str)
def emitline(self, str):
self.emit(str)
self.emit("\n")
def before_emit_template(self):
pass
def after_emit_template(self):
pass
def emit_import_dependancies(self):
for name in self.imported_name:
self.emitline("from {0} import *".format(name))
def copy_template(self):
with open(self.template) as f:
self.emit(f.read())
def generate(self):
self.emit_import_dependancies()
self.before_emit_template()
self.copy_template()
self.after_emit_template()
self.generate_files(self.files)
def generate_files(self, files):
for file in files:
self.generate_for_file(file)
def generate_for_file(self, file):
pass
def generate_into(self, filename):
self.generate()
print("Writing generated code into {0}".format(filename))
with open(filename, "w") as f:
f.write(self.result.getvalue())
class NoTemplatedGenerator(CtypesGenerator):
def __init__(self, parsed_files):
self.files = parsed_files # Already in generation order
self.result = StringIO.StringIO()
self.imported_name = set([])
def copy_template(self):
pass
NTSTATUS_MODULE = "ntstatus"
class DefineCtypesGenerator(CtypesGenerator):
def after_emit_template(self):
self.emitline("from {0} import *".format(NTSTATUS_MODULE))
def generate_for_file(self, file):
for define in file.data:
self.emitline(define.generate_ctypes())
# TEST Documentation generator
class DefineDocGenerator(NoTemplatedGenerator):
def copy_template(self):
self.emitline(".. currentmodule:: windows.generated_def")
self.emitline("")
self.emitline("Windef")
self.emitline("------")
def generate_for_file(self, file):
for define in file.data:
self.emitline(".. autodata:: {define.name}".format(define=define))
class NtStatusCtypesGenerator(CtypesGenerator):
def generate_for_file(self, file):
for value, name, descr in file.data:
value = "{:#x}".format(value)
line = '{1} = NtStatusException.register_ntstatus({0}, "{1}", "{2}")'.format(value, name, descr)
self.emitline(line)
class NtStatusDocGenerator(NoTemplatedGenerator):
def copy_template(self):
self.emitline(".. currentmodule:: windows.generated_def")
self.emitline("")
self.emitline("Ntstatus")
self.emitline("--------")
def generate_for_file(self, file):
for value, name, descr in file.data:
self.emitline(".. autodata:: {name}".format(name=name))
class COMCtypesGenerator(CtypesGenerator):
IGNORED_INTERFACE = set(COMParsedFile.IGNORED_INTERFACE)
def __init__(self, *args, **kwargs):
super(COMCtypesGenerator, self).__init__(*args, **kwargs)
self.iids_def = {}
self.generated_interfaces_names = set(self.IGNORED_INTERFACE)
for file in self.files:
self.generated_interfaces_names.update(file.exports)
def parse_iid_file(self, filename):
data = open(filename).read()
for line in data.split("\n"):
name, iid = line.split("|")
self.iids_def[name] = self.parse_iid(iid), iid
def generate_files(self, files):
# We generate COM interface in 2 step
# 1) The Class intself with the IDD
# 2) The function list after all class we generated
# - This allow COM function to refer the interface in their def :)
for file in files:
self.generate_com_interface_class_iid(file.data)
for file in files:
self.generate_com_interface_functions(file.data)
def generate_com_interface_class_iid(self, cominterface):
name = cominterface.name
if cominterface.iid is not None:
iid_str = cominterface.iid
iid_python = self.parse_iid(iid_str)
else:
print("Lookup of IID for <{0}>".format(cominterface.name))
iid_python, iid_str = self.iids_def[cominterface.name]
cls_format_param = {"name": name, "iid_python" : iid_python, "iid_str": iid_str}
self.emitline("class {name}(COMInterface):".format(**cls_format_param))
self.emitline(' IID = generate_IID({iid_python}, name="{name}", strid="{iid_str}")'.format(**cls_format_param))
self.emitline('')
def generate_com_interface_functions(self, cominterface):
name = cominterface.name
self.emitline("{name}._functions_ = {{".format(name=name))
self.emit_com_interface_functions(cominterface)
self.emitline(' }')
self.emitline('')
def emit_com_interface_functions(self, cominterface):
indent = " " * 8
for method_nb, method in enumerate(cominterface.methods):
args_to_define = method.args[1:] # ctypes doesnt not need the This
name = method.name
params = ", ".join([arg.name +":"+ ("*"* arg.byreflevel) +arg.type for arg in args_to_define])
self.emitline(indent + "# {name} -> {params}".format(name=name, params=params))
str_args = []
for arg in args_to_define:
if arg.type == "void" and arg.byreflevel > 0:
arg = type(arg)("PVOID", arg.byreflevel - 1, arg.name)
atype = arg.type
byreflevel = arg.byreflevel
if atype in self.generated_interfaces_names:
# If the parameter is a COM interface, remove a *
# (as PFW ComInterface are PVOID)
byreflevel = arg.byreflevel - 1
if atype in self.IGNORED_INTERFACE:
# If the interface if ignored -> replace by a raw pointer
atype = "PVOID"
for i in range(byreflevel):
atype = "POINTER({0})".format(atype)
str_args.append(atype)
params = ", ".join([method.ret_type] + str_args)
ctypes_functype = 'WINFUNCTYPE' if method.functype == 'stdcall' else 'CFUNCTYPE'
self.emitline(indent + '"{0}": ctypes.{functype}({1})({2}, "{0}"),'.format(name, params, method_nb, functype=ctypes_functype))
return
def parse_iid(self, iid_str):
part_iid = iid_str.split("-")
str_iid = []
str_iid.append("0x" + part_iid[0])
str_iid.append("0x" + part_iid[1])
str_iid.append("0x" + part_iid[2])
str_iid.append("0x" + part_iid[3][:2])
str_iid.append("0x" + part_iid[3][2:])
for i in range(6): str_iid.append("0x" + part_iid[4][i * 2:(i + 1) * 2])
return ", ".join(str_iid)
#TODO: subclass NOTEMPLATE
class FunctionCtypesGenerator(NoTemplatedGenerator):
def generate_for_file(self, file):
for item in file.data:
self.emitline(item.generate_ctypes())
EXTENDED_STRUCT_FILE = glob.glob(pjoin(SCRIPT_DIR, "extended_structs", "*.py"))
EXTENDED_STRUCT = [os.path.basename(filename)[:-len(".py")] for filename in EXTENDED_STRUCT_FILE]
class StructureCtypesGenerator(CtypesGenerator):
def generate_for_simple_type_file(self, file):
for simple_type in file.data:
self.emitline(simple_type.generate_ctypes())
def generate_for_file(self, file):
if isinstance(file, SimpleTypeParsedFile):
return self.generate_for_simple_type_file(file)
structs, enums = file.data
for definition in [d for l in (enums, structs) for d in l]:
self.emitline(definition.generate_ctypes())
if definition.name in EXTENDED_STRUCT:
print("Including extended definition for <{0}>".format(definition.name))
extended_struct_filename = from_here(os.path.join("extended_structs", "{0}.py".format(definition.name)))
with open(extended_struct_filename) as f:
self.emitline(f.read())
# RE-generate the typedef to apply them to the extended definition
self.emitline(definition.generate_typedef_ctypes())
class StructureDocGenerator(NoTemplatedGenerator):
STRUCT_NAME_SEPARATOR = "'"
def copy_template(self):
self.emitline(".. module:: windows.generated_def.winstructs")
self.emitline("")
def generate(self):
self.copy_template()
self.emitline("Winstructs")
self.emitline("----------")
for file in self.files:
self.generate_structures_for_file(file)
self.emitline("WinEnums")
self.emitline("--------")
for file in self.files:
self.generate_enums_for_file(file)
def generate_doc_simple_type_file(self, file):
# TODO !
self.emitline("Simple types")
self.emitline(self.STRUCT_NAME_SEPARATOR * len("Simple types"))
for simpledef in file.data:
if simpledef.rvalue.startswith("POINTER("):
# import pdb;pdb.set_trace()
rtype = simpledef.rvalue[len("POINTER("):-1]
self.emitline(".. class:: {0}".format(simpledef.lvalue))
self.emitline("")
self.emitline(" Pointer to :class:`{0}`".format(rtype))
else:
self.emitline(".. autoclass:: {0}".format(simpledef.lvalue))
self.emitline("")
return
def generate_structures_for_file(self, file):
if isinstance(file, SimpleTypeParsedFile):
return self.generate_doc_simple_type_file(file)
structs, enums = file.data
for struct in structs:
self.emitline(struct.name)
self.emitline(self.STRUCT_NAME_SEPARATOR * len(struct.name))
# Emit typedef
for name, type in struct.typedef.items():
self.emitline(".. class:: {0}".format(name))
self.emitline("")
if hasattr(type, "type"):
self.emitline(" Pointer to :class:`{0}`".format(type.type.name))
else:
self.emitline(" Alias for :class:`{0}`".format(type.name))
self.emitline("")
# Emit struct Definition
self.emitline(".. class:: {0}".format(struct.name))
for ftype, fname, nb in struct.fields:
array_str = " ``[{nb}]``".format(nb=nb) if nb > 1 else ""
self.emitline("")
self.emitline(" .. attribute:: {fname}".format(fname=fname))
self.emitline("")
self.emitline(" :class:`{ftype.name}`{array_str}".format(ftype=ftype, array_str=array_str))
self.emitline("")
def generate_enums_for_file(self, file):
if isinstance(file, SimpleTypeParsedFile):
return
structs, enums = file.data
for enum in enums:
self.emitline(enum.name)
self.emitline(self.STRUCT_NAME_SEPARATOR * len(enum.name))
# Emit typedef
for name, type in enum.typedef.items():
self.emitline(".. class:: {0}".format(name))
self.emitline("")
if hasattr(type, "type"):
self.emitline(" Pointer to :class:`{0}`\n\n".format(type.type.name))
else:
self.emitline(" Alias for :class:`{0}`\n\n".format(type.name))
# Emit enum Definition
self.emitline(".. class:: {0}".format(enum.name))
self.emitline("")
for enum_value, enum_name in enum.fields:
self.emitline("")
self.emitline(" .. attribute:: {0}({1})".format(enum_name, enum_value))
self.emitline("")
META_WALKER = """
def generate_walker(namelist, target_module):
def my_walker():
for name in namelist:
yield name, getattr(target_module, name)
return my_walker
"""
class MetaFileGenerator(NoTemplatedGenerator):
def __init__(self):
self.result = StringIO.StringIO()
self.modules = []
def add_exportlist(self, name, modname, exports):
self.modules.append((name, modname, exports))
def add_export_module(self, module):
self.add_exportlist(module.name, module.name, module.modules_exports())
def generate(self):
for name, modname, exports in self.modules:
self.emitline("{0} = {1}".format(name, exports))
self.emitline(META_WALKER)
for name, modname, exports in self.modules:
self.emitline("import {0} as {0}_module".format(modname))
self.emitline("{0}_walker = generate_walker({0}, {1}_module)".format(name, modname))
class ModuleGenerator(object):
def __init__(self, name, filetype, ctypesgenerator, docgenerator, src):
self.name = name
self.filetype = filetype
self.ctypesgenerator = ctypesgenerator
self.docgenerator = docgenerator
self.src = src
self.parsed_dir = None
self.nodes = []
self.dependances_modules = set([])
def add_module_dependancy(self, module):
self.dependances_modules.add(module)
def get_template_filename(self):
return pjoin(self.src, "template.py")
def parse_source_directory(self, recurse=False):
self.nodes += ParsedDirectory(self.filetype, self.src, recurse=recurse).nodes
def resolve_dependancies(self, depnodes=[]):
g = ParsedFileGraph(self.nodes, depnodes=depnodes)
return g.build_dependancy_graph()
def check_dependancies_without_flattening(self, depnodes):
self.missing_interfaces = []
g = ParsedFileGraph(self.nodes, depnodes=depnodes, missing_handler=self.missing_com_interface) # init check for missing dependance
if self.missing_interfaces:
missing_names = [x[0] for x in self.missing_interfaces]
if not args.autocopy:
raise ValueError("Missing COM dependancy Names : {0}".format(missing_names))
print("Missing COM interfaces are: {0}".format(missing_names))
autocopied = []
for name, node in self.missing_interfaces:
filename = "{0}\\{1}.txt".format(args.autocopy, name)
if os.path.exists(filename):
autocopied.append(name)
print("Auto-copy <{0}>".format(filename))
targetdir = os.path.dirname(node.filename)
print(filename, targetdir)
shutil.copy(filename, targetdir)
if autocopied:
raise ValueError("Auto-copyied Names : {0}".format(autocopied))
raise ValueError("Missing COM dependancy Names : {0}".format(missing_names))
return g.nodes
def missing_com_interface(self, import_, node):
print("Missing name <{0}> in file <{1}>".format(import_, node.filename))
self.missing_interfaces.append((import_, node))
def generate(self):
self.parse_source_directory()
# Flatten the graph
flatten_nodes = self.resolve_dependancies()
self.generate_from_nodelist(flatten_nodes)
self.nodes = flatten_nodes
def resolve_dep_and_generate(self, depnodes=[]):
depnodes = list(depnodes)
# Add module dependancies nodes to the finals depnodes
for moddep in self.dependances_modules:
depnodes += moddep.nodes
flatten_nodes = self.resolve_dependancies(depnodes=depnodes)
self.generate_from_nodelist(flatten_nodes)
def after_ctypes_generator_init(self, ctypesgen):
pass
def generate_from_nodelist(self, nodelist):
template = self.get_template_filename()
if template is not None:
ctypesgen = self.ctypesgenerator(nodelist, template)
else:
ctypesgen = self.ctypesgenerator(nodelist)
for moddep in self.dependances_modules:
ctypesgen.add_import_name(moddep.name)
self.after_ctypes_generator_init(ctypesgen)
finalfilename = "{0}.py".format(self.name)
ctypesgen.generate_into(to_dest(finalfilename)) # Need to handle dest != PythonForWindows
def generate_doc(self, filename):
nodelist = self.nodes
self.docgenerator(nodelist).generate_into(filename)
def modules_exports(self):
res = set([])
for node in self.nodes:
res = res | node.exports
return res
import argparse
parser = argparse.ArgumentParser(prog=__file__)
parser.add_argument('--autocopy', help="[PRIVATE OPTION] A directory used to find missing COM interface")
args = parser.parse_args()
# Copy Flag code
shutil.copy(from_here(r"definitions\flag.py"), DEST_DIR)
print("== Generating defines ==")
# Generate defines
definemodulegenerator = ModuleGenerator("windef", DefinitionParsedFile, DefineCtypesGenerator, DefineDocGenerator, from_here(r"definitions\defines"))
definemodulegenerator.generate()
definemodulegenerator.generate_doc(from_here(r"..\docs\source\windef_generated.rst"))
print("== Generating NTSTATUS ==")
# Generate Ntstatus
ntstatus_module_generator = ModuleGenerator("ntstatus", NtStatusParsedFile, NtStatusCtypesGenerator, NtStatusDocGenerator, from_here(r"definitions\ntstatus.txt"))
# Hardcoded template file (no dir for ntstatus) -- Need one dir ?
ntstatus_module_generator.get_template_filename = lambda : from_here(r"definitions\ntstatus_template.py")
ntstatus_module_generator.generate()
ntstatus_module_generator.generate_doc(from_here(r"..\docs\source\ntstatus_generated.rst"))
print("== Generating structures ==")
# Parse the simple type file
stfilename = from_here(r"definitions\simple_types.txt")
struct_parser.SimpleTypesParser(open(stfilename).read()).parse()
ss = SimpleTypeParsedFile(stfilename)
# Generate struct + simple types
structure_module_generator = ModuleGenerator("winstructs", StructureParsedFile, StructureCtypesGenerator, StructureDocGenerator, from_here(r"definitions\structures"))
structure_module_generator.parse_source_directory()
# Add the simple type file to the know structures (for dep resolve + generation)
structure_module_generator.nodes.append(ss)
structure_module_generator.add_module_dependancy(definemodulegenerator)
structure_module_generator.resolve_dep_and_generate([BasicTypeNodes()])
structure_module_generator.generate_doc(from_here(r"..\docs\source\winstructs_generated.rst"))
print("== Generating COM interfaces ==")
# Generate COM interfaces
com_module_generator = ModuleGenerator("interfaces", COMParsedFile, COMCtypesGenerator, None, from_here(r"definitions\com"))
# Load the interface_to_iid file needed by the 'COMCtypesGenerator'
com_module_generator.after_ctypes_generator_init = lambda cgen: cgen.parse_iid_file(from_here("definitions\\interface_to_iid.txt"))
com_module_generator.parse_source_directory(recurse=True)
com_module_generator.add_module_dependancy(structure_module_generator)
com_module_generator.resolve_dependancies = com_module_generator.check_dependancies_without_flattening # No real flattening as we have circular dep in Interfaces VTBL
com_module_generator.resolve_dep_and_generate([BasicTypeNodes()])
print("== Generating functions ==")
# Generate function
functions_module_generator = ModuleGenerator("winfuncs", FunctionParsedFile, FunctionCtypesGenerator, None, from_here(r"definitions\functions"))
# no template file
functions_module_generator.get_template_filename = lambda : None
functions_module_generator.parse_source_directory()
functions_module_generator.add_module_dependancy(structure_module_generator)
functions_module_generator.add_module_dependancy(com_module_generator)
functions_module_generator.resolve_dep_and_generate([BasicTypeNodes()])
print("== Generating META file ==")
# Meta-file generator
enums_exports = set()
structs_exports = set()
# Extract enums export & structures exports
for node in structure_module_generator.nodes:
if isinstance(node, SimpleTypeParsedFile):
continue # Generate META for simple type ?
structs, enums = node.data
for struct in structs:
structs_exports.add(struct.name)
structs_exports.update(struct.typedef)
for enum in enums:
enums_exports.add(enum.name)
enums_exports.update(enum.typedef)
meta = MetaFileGenerator()
meta.add_exportlist("windef", definemodulegenerator.name, definemodulegenerator.modules_exports() | ntstatus_module_generator.modules_exports())
# Add structs / enums as 2 differents lists
meta.add_exportlist("structs", structure_module_generator.name, structs_exports)
meta.add_exportlist("enums", structure_module_generator.name, enums_exports)
meta.add_exportlist("functions", functions_module_generator.name, functions_module_generator.modules_exports())
meta.add_exportlist("interfaces", com_module_generator.name, com_module_generator.modules_exports())
meta.generate_into(to_dest("meta.py"))
print("DONE !") |
marpie/PythonForWindows | windows/generated_def/winfuncs.py | from interfaces import *
from winstructs import *
#def ObjectFromLresult(lResult, riid, wParam, ppvObject):
# return ObjectFromLresult.ctypes_function(lResult, riid, wParam, ppvObject)
ObjectFromLresultPrototype = WINFUNCTYPE(HRESULT, LRESULT, REFIID, WPARAM, POINTER(PVOID))
ObjectFromLresultParams = ((1, 'lResult'), (1, 'riid'), (1, 'wParam'), (1, 'ppvObject'))
#def NtOpenKey(KeyHandle, DesiredAccess, ObjectAttributes):
# return NtOpenKey.ctypes_function(KeyHandle, DesiredAccess, ObjectAttributes)
NtOpenKeyPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES)
NtOpenKeyParams = ((1, 'KeyHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'))
#def NtCreateKey(pKeyHandle, DesiredAccess, ObjectAttributes, TitleIndex, Class, CreateOptions, Disposition):
# return NtCreateKey.ctypes_function(pKeyHandle, DesiredAccess, ObjectAttributes, TitleIndex, Class, CreateOptions, Disposition)
NtCreateKeyPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES, ULONG, PUNICODE_STRING, ULONG, PULONG)
NtCreateKeyParams = ((1, 'pKeyHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'), (1, 'TitleIndex'), (1, 'Class'), (1, 'CreateOptions'), (1, 'Disposition'))
#def NtSetValueKey(KeyHandle, ValueName, TitleIndex, Type, Data, DataSize):
# return NtSetValueKey.ctypes_function(KeyHandle, ValueName, TitleIndex, Type, Data, DataSize)
NtSetValueKeyPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PUNICODE_STRING, ULONG, ULONG, PVOID, ULONG)
NtSetValueKeyParams = ((1, 'KeyHandle'), (1, 'ValueName'), (1, 'TitleIndex'), (1, 'Type'), (1, 'Data'), (1, 'DataSize'))
#def NtQueryValueKey(KeyHandle, ValueName, KeyValueInformationClass, KeyValueInformation, Length, ResultLength):
# return NtQueryValueKey.ctypes_function(KeyHandle, ValueName, KeyValueInformationClass, KeyValueInformation, Length, ResultLength)
NtQueryValueKeyPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PUNICODE_STRING, KEY_VALUE_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtQueryValueKeyParams = ((1, 'KeyHandle'), (1, 'ValueName'), (1, 'KeyValueInformationClass'), (1, 'KeyValueInformation'), (1, 'Length'), (1, 'ResultLength'))
#def NtEnumerateValueKey(KeyHandle, Index, KeyValueInformationClass, KeyValueInformation, Length, ResultLength):
# return NtEnumerateValueKey.ctypes_function(KeyHandle, Index, KeyValueInformationClass, KeyValueInformation, Length, ResultLength)
NtEnumerateValueKeyPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, KEY_VALUE_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtEnumerateValueKeyParams = ((1, 'KeyHandle'), (1, 'Index'), (1, 'KeyValueInformationClass'), (1, 'KeyValueInformation'), (1, 'Length'), (1, 'ResultLength'))
#def CreateFileTransactedA(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile, hTransaction, pusMiniVersion, pExtendedParameter):
# return CreateFileTransactedA.ctypes_function(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile, hTransaction, pusMiniVersion, pExtendedParameter)
CreateFileTransactedAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE, HANDLE, PUSHORT, PVOID)
CreateFileTransactedAParams = ((1, 'lpFileName'), (1, 'dwDesiredAccess'), (1, 'dwShareMode'), (1, 'lpSecurityAttributes'), (1, 'dwCreationDisposition'), (1, 'dwFlagsAndAttributes'), (1, 'hTemplateFile'), (1, 'hTransaction'), (1, 'pusMiniVersion'), (1, 'pExtendedParameter'))
#def CreateFileTransactedW(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile, hTransaction, pusMiniVersion, pExtendedParameter):
# return CreateFileTransactedW.ctypes_function(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile, hTransaction, pusMiniVersion, pExtendedParameter)
CreateFileTransactedWPrototype = WINFUNCTYPE(HANDLE, LPWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE, HANDLE, PUSHORT, PVOID)
CreateFileTransactedWParams = ((1, 'lpFileName'), (1, 'dwDesiredAccess'), (1, 'dwShareMode'), (1, 'lpSecurityAttributes'), (1, 'dwCreationDisposition'), (1, 'dwFlagsAndAttributes'), (1, 'hTemplateFile'), (1, 'hTransaction'), (1, 'pusMiniVersion'), (1, 'pExtendedParameter'))
#def CommitTransaction(TransactionHandle):
# return CommitTransaction.ctypes_function(TransactionHandle)
CommitTransactionPrototype = WINFUNCTYPE(BOOL, HANDLE)
CommitTransactionParams = ((1, 'TransactionHandle'),)
#def CreateTransaction(lpTransactionAttributes, UOW, CreateOptions, IsolationLevel, IsolationFlags, Timeout, Description):
# return CreateTransaction.ctypes_function(lpTransactionAttributes, UOW, CreateOptions, IsolationLevel, IsolationFlags, Timeout, Description)
CreateTransactionPrototype = WINFUNCTYPE(HANDLE, LPSECURITY_ATTRIBUTES, LPGUID, DWORD, DWORD, DWORD, DWORD, LPWSTR)
CreateTransactionParams = ((1, 'lpTransactionAttributes'), (1, 'UOW'), (1, 'CreateOptions'), (1, 'IsolationLevel'), (1, 'IsolationFlags'), (1, 'Timeout'), (1, 'Description'))
#def RollbackTransaction(TransactionHandle):
# return RollbackTransaction.ctypes_function(TransactionHandle)
RollbackTransactionPrototype = WINFUNCTYPE(BOOL, HANDLE)
RollbackTransactionParams = ((1, 'TransactionHandle'),)
#def OpenTransaction(dwDesiredAccess, TransactionId):
# return OpenTransaction.ctypes_function(dwDesiredAccess, TransactionId)
OpenTransactionPrototype = WINFUNCTYPE(HANDLE, DWORD, LPGUID)
OpenTransactionParams = ((1, 'dwDesiredAccess'), (1, 'TransactionId'))
#def TpCallbackSendAlpcMessageOnCompletion(TpHandle, PortHandle, Flags, SendMessage):
# return TpCallbackSendAlpcMessageOnCompletion.ctypes_function(TpHandle, PortHandle, Flags, SendMessage)
TpCallbackSendAlpcMessageOnCompletionPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, HANDLE, ULONG, PPORT_MESSAGE)
TpCallbackSendAlpcMessageOnCompletionParams = ((1, 'TpHandle'), (1, 'PortHandle'), (1, 'Flags'), (1, 'SendMessage'))
#def NtQueryLicenseValue(Name, Type, Buffer, Length, DataLength):
# return NtQueryLicenseValue.ctypes_function(Name, Type, Buffer, Length, DataLength)
NtQueryLicenseValuePrototype = WINFUNCTYPE(NTSTATUS, PUNICODE_STRING, POINTER(ULONG), PVOID, ULONG, POINTER(ULONG))
NtQueryLicenseValueParams = ((1, 'Name'), (1, 'Type'), (1, 'Buffer'), (1, 'Length'), (1, 'DataLength'))
#def CoInitializeEx(pvReserved, dwCoInit):
# return CoInitializeEx.ctypes_function(pvReserved, dwCoInit)
CoInitializeExPrototype = WINFUNCTYPE(HRESULT, LPVOID, DWORD)
CoInitializeExParams = ((1, 'pvReserved'), (1, 'dwCoInit'))
#def CoInitializeSecurity(pSecDesc, cAuthSvc, asAuthSvc, pReserved1, dwAuthnLevel, dwImpLevel, pAuthList, dwCapabilities, pReserved3):
# return CoInitializeSecurity.ctypes_function(pSecDesc, cAuthSvc, asAuthSvc, pReserved1, dwAuthnLevel, dwImpLevel, pAuthList, dwCapabilities, pReserved3)
CoInitializeSecurityPrototype = WINFUNCTYPE(HRESULT, PSECURITY_DESCRIPTOR, LONG, POINTER(SOLE_AUTHENTICATION_SERVICE), PVOID, DWORD, DWORD, PVOID, DWORD, PVOID)
CoInitializeSecurityParams = ((1, 'pSecDesc'), (1, 'cAuthSvc'), (1, 'asAuthSvc'), (1, 'pReserved1'), (1, 'dwAuthnLevel'), (1, 'dwImpLevel'), (1, 'pAuthList'), (1, 'dwCapabilities'), (1, 'pReserved3'))
#def CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv):
# return CoCreateInstance.ctypes_function(rclsid, pUnkOuter, dwClsContext, riid, ppv)
CoCreateInstancePrototype = WINFUNCTYPE(HRESULT, REFCLSID, LPUNKNOWN, DWORD, REFIID, POINTER(LPVOID))
CoCreateInstanceParams = ((1, 'rclsid'), (1, 'pUnkOuter'), (1, 'dwClsContext'), (1, 'riid'), (1, 'ppv'))
#def CoCreateInstanceEx(rclsid, punkOuter, dwClsCtx, pServerInfo, dwCount, pResults):
# return CoCreateInstanceEx.ctypes_function(rclsid, punkOuter, dwClsCtx, pServerInfo, dwCount, pResults)
CoCreateInstanceExPrototype = WINFUNCTYPE(HRESULT, REFCLSID, POINTER(IUnknown), DWORD, POINTER(COSERVERINFO), DWORD, POINTER(MULTI_QI))
CoCreateInstanceExParams = ((1, 'rclsid'), (1, 'punkOuter'), (1, 'dwClsCtx'), (1, 'pServerInfo'), (1, 'dwCount'), (1, 'pResults'))
#def CoGetInterceptor(iidIntercepted, punkOuter, iid, ppv):
# return CoGetInterceptor.ctypes_function(iidIntercepted, punkOuter, iid, ppv)
CoGetInterceptorPrototype = WINFUNCTYPE(HRESULT, REFIID, POINTER(IUnknown), REFIID, POINTER(PVOID))
CoGetInterceptorParams = ((1, 'iidIntercepted'), (1, 'punkOuter'), (1, 'iid'), (1, 'ppv'))
#def CLSIDFromProgID(lpszProgID, lpclsid):
# return CLSIDFromProgID.ctypes_function(lpszProgID, lpclsid)
CLSIDFromProgIDPrototype = WINFUNCTYPE(HRESULT, LPCOLESTR, LPCLSID)
CLSIDFromProgIDParams = ((1, 'lpszProgID'), (1, 'lpclsid'))
#def NtAlpcCreatePort(PortHandle, ObjectAttributes, PortAttributes):
# return NtAlpcCreatePort.ctypes_function(PortHandle, ObjectAttributes, PortAttributes)
NtAlpcCreatePortPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, POBJECT_ATTRIBUTES, PALPC_PORT_ATTRIBUTES)
NtAlpcCreatePortParams = ((1, 'PortHandle'), (1, 'ObjectAttributes'), (1, 'PortAttributes'))
#def NtAlpcQueryInformation(PortHandle, PortInformationClass, PortInformation, Length, ReturnLength):
# return NtAlpcQueryInformation.ctypes_function(PortHandle, PortInformationClass, PortInformation, Length, ReturnLength)
NtAlpcQueryInformationPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ALPC_PORT_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtAlpcQueryInformationParams = ((1, 'PortHandle'), (1, 'PortInformationClass'), (1, 'PortInformation'), (1, 'Length'), (1, 'ReturnLength'))
#def NtAlpcQueryInformationMessage(PortHandle, PortMessage, MessageInformationClass, MessageInformation, Length, ReturnLength):
# return NtAlpcQueryInformationMessage.ctypes_function(PortHandle, PortMessage, MessageInformationClass, MessageInformation, Length, ReturnLength)
NtAlpcQueryInformationMessagePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PPORT_MESSAGE, ALPC_MESSAGE_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtAlpcQueryInformationMessageParams = ((1, 'PortHandle'), (1, 'PortMessage'), (1, 'MessageInformationClass'), (1, 'MessageInformation'), (1, 'Length'), (1, 'ReturnLength'))
#def NtAlpcConnectPort(PortHandle, PortName, ObjectAttributes, PortAttributes, Flags, RequiredServerSid, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout):
# return NtAlpcConnectPort.ctypes_function(PortHandle, PortName, ObjectAttributes, PortAttributes, Flags, RequiredServerSid, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout)
NtAlpcConnectPortPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, PUNICODE_STRING, POBJECT_ATTRIBUTES, PALPC_PORT_ATTRIBUTES, ULONG, PSID, PPORT_MESSAGE, PULONG, PALPC_MESSAGE_ATTRIBUTES, PALPC_MESSAGE_ATTRIBUTES, PLARGE_INTEGER)
NtAlpcConnectPortParams = ((1, 'PortHandle'), (1, 'PortName'), (1, 'ObjectAttributes'), (1, 'PortAttributes'), (1, 'Flags'), (1, 'RequiredServerSid'), (1, 'ConnectionMessage'), (1, 'BufferLength'), (1, 'OutMessageAttributes'), (1, 'InMessageAttributes'), (1, 'Timeout'))
#def NtAlpcConnectPortEx(PortHandle, ConnectionPortObjectAttributes, ClientPortObjectAttributes, PortAttributes, Flags, ServerSecurityRequirements, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout):
# return NtAlpcConnectPortEx.ctypes_function(PortHandle, ConnectionPortObjectAttributes, ClientPortObjectAttributes, PortAttributes, Flags, ServerSecurityRequirements, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout)
NtAlpcConnectPortExPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, POBJECT_ATTRIBUTES, POBJECT_ATTRIBUTES, PALPC_PORT_ATTRIBUTES, ULONG, PSECURITY_DESCRIPTOR, PPORT_MESSAGE, PSIZE_T, PALPC_MESSAGE_ATTRIBUTES, PALPC_MESSAGE_ATTRIBUTES, PLARGE_INTEGER)
NtAlpcConnectPortExParams = ((1, 'PortHandle'), (1, 'ConnectionPortObjectAttributes'), (1, 'ClientPortObjectAttributes'), (1, 'PortAttributes'), (1, 'Flags'), (1, 'ServerSecurityRequirements'), (1, 'ConnectionMessage'), (1, 'BufferLength'), (1, 'OutMessageAttributes'), (1, 'InMessageAttributes'), (1, 'Timeout'))
#def NtAlpcAcceptConnectPort(PortHandle, ConnectionPortHandle, Flags, ObjectAttributes, PortAttributes, PortContext, ConnectionRequest, ConnectionMessageAttributes, AcceptConnection):
# return NtAlpcAcceptConnectPort.ctypes_function(PortHandle, ConnectionPortHandle, Flags, ObjectAttributes, PortAttributes, PortContext, ConnectionRequest, ConnectionMessageAttributes, AcceptConnection)
NtAlpcAcceptConnectPortPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, HANDLE, ULONG, POBJECT_ATTRIBUTES, PALPC_PORT_ATTRIBUTES, PVOID, PPORT_MESSAGE, PALPC_MESSAGE_ATTRIBUTES, BOOLEAN)
NtAlpcAcceptConnectPortParams = ((1, 'PortHandle'), (1, 'ConnectionPortHandle'), (1, 'Flags'), (1, 'ObjectAttributes'), (1, 'PortAttributes'), (1, 'PortContext'), (1, 'ConnectionRequest'), (1, 'ConnectionMessageAttributes'), (1, 'AcceptConnection'))
#def AlpcInitializeMessageAttribute(AttributeFlags, Buffer, BufferSize, RequiredBufferSize):
# return AlpcInitializeMessageAttribute.ctypes_function(AttributeFlags, Buffer, BufferSize, RequiredBufferSize)
AlpcInitializeMessageAttributePrototype = WINFUNCTYPE(NTSTATUS, ULONG, PALPC_MESSAGE_ATTRIBUTES, ULONG, PULONG)
AlpcInitializeMessageAttributeParams = ((1, 'AttributeFlags'), (1, 'Buffer'), (1, 'BufferSize'), (1, 'RequiredBufferSize'))
#def AlpcGetMessageAttribute(Buffer, AttributeFlag):
# return AlpcGetMessageAttribute.ctypes_function(Buffer, AttributeFlag)
AlpcGetMessageAttributePrototype = WINFUNCTYPE(PVOID, PALPC_MESSAGE_ATTRIBUTES, ULONG)
AlpcGetMessageAttributeParams = ((1, 'Buffer'), (1, 'AttributeFlag'))
#def NtAlpcSendWaitReceivePort(PortHandle, Flags, SendMessage, SendMessageAttributes, ReceiveMessage, BufferLength, ReceiveMessageAttributes, Timeout):
# return NtAlpcSendWaitReceivePort.ctypes_function(PortHandle, Flags, SendMessage, SendMessageAttributes, ReceiveMessage, BufferLength, ReceiveMessageAttributes, Timeout)
NtAlpcSendWaitReceivePortPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, PPORT_MESSAGE, PALPC_MESSAGE_ATTRIBUTES, PPORT_MESSAGE, PSIZE_T, PALPC_MESSAGE_ATTRIBUTES, PLARGE_INTEGER)
NtAlpcSendWaitReceivePortParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'SendMessage'), (1, 'SendMessageAttributes'), (1, 'ReceiveMessage'), (1, 'BufferLength'), (1, 'ReceiveMessageAttributes'), (1, 'Timeout'))
#def NtAlpcDisconnectPort(PortHandle, Flags):
# return NtAlpcDisconnectPort.ctypes_function(PortHandle, Flags)
NtAlpcDisconnectPortPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG)
NtAlpcDisconnectPortParams = ((1, 'PortHandle'), (1, 'Flags'))
#def NtAlpcCreatePortSection(PortHandle, Flags, SectionHandle, SectionSize, AlpcSectionHandle, ActualSectionSize):
# return NtAlpcCreatePortSection.ctypes_function(PortHandle, Flags, SectionHandle, SectionSize, AlpcSectionHandle, ActualSectionSize)
NtAlpcCreatePortSectionPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, HANDLE, SIZE_T, PALPC_HANDLE, PSIZE_T)
NtAlpcCreatePortSectionParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'SectionHandle'), (1, 'SectionSize'), (1, 'AlpcSectionHandle'), (1, 'ActualSectionSize'))
#def NtAlpcDeletePortSection(PortHandle, Flags, SectionHandle):
# return NtAlpcDeletePortSection.ctypes_function(PortHandle, Flags, SectionHandle)
NtAlpcDeletePortSectionPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, ALPC_HANDLE)
NtAlpcDeletePortSectionParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'SectionHandle'))
#def NtAlpcCreateResourceReserve(PortHandle, Flags, MessageSize, ResourceId):
# return NtAlpcCreateResourceReserve.ctypes_function(PortHandle, Flags, MessageSize, ResourceId)
NtAlpcCreateResourceReservePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, SIZE_T, PALPC_HANDLE)
NtAlpcCreateResourceReserveParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'MessageSize'), (1, 'ResourceId'))
#def NtAlpcDeleteResourceReserve(PortHandle, Flags, ResourceId):
# return NtAlpcDeleteResourceReserve.ctypes_function(PortHandle, Flags, ResourceId)
NtAlpcDeleteResourceReservePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, ALPC_HANDLE)
NtAlpcDeleteResourceReserveParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'ResourceId'))
#def NtAlpcCreateSectionView(PortHandle, Flags, ViewAttributes):
# return NtAlpcCreateSectionView.ctypes_function(PortHandle, Flags, ViewAttributes)
NtAlpcCreateSectionViewPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, PALPC_DATA_VIEW_ATTR)
NtAlpcCreateSectionViewParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'ViewAttributes'))
#def NtAlpcDeleteSectionView(PortHandle, Flags, ViewBase):
# return NtAlpcDeleteSectionView.ctypes_function(PortHandle, Flags, ViewBase)
NtAlpcDeleteSectionViewPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, PVOID)
NtAlpcDeleteSectionViewParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'ViewBase'))
#def NtAlpcCreateSecurityContext(PortHandle, Flags, SecurityAttribute):
# return NtAlpcCreateSecurityContext.ctypes_function(PortHandle, Flags, SecurityAttribute)
NtAlpcCreateSecurityContextPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, PALPC_SECURITY_ATTR)
NtAlpcCreateSecurityContextParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'SecurityAttribute'))
#def NtAlpcDeleteSecurityContext(PortHandle, Flags, ContextHandle):
# return NtAlpcDeleteSecurityContext.ctypes_function(PortHandle, Flags, ContextHandle)
NtAlpcDeleteSecurityContextPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, ALPC_HANDLE)
NtAlpcDeleteSecurityContextParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'ContextHandle'))
#def NtAlpcRevokeSecurityContext(PortHandle, Flags, ContextHandle):
# return NtAlpcRevokeSecurityContext.ctypes_function(PortHandle, Flags, ContextHandle)
NtAlpcRevokeSecurityContextPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG, ALPC_HANDLE)
NtAlpcRevokeSecurityContextParams = ((1, 'PortHandle'), (1, 'Flags'), (1, 'ContextHandle'))
#def CreatePipe(hReadPipe, hWritePipe, lpPipeAttributes, nSize):
# return CreatePipe.ctypes_function(hReadPipe, hWritePipe, lpPipeAttributes, nSize)
CreatePipePrototype = WINFUNCTYPE(BOOL, PHANDLE, PHANDLE, LPSECURITY_ATTRIBUTES, DWORD)
CreatePipeParams = ((1, 'hReadPipe'), (1, 'hWritePipe'), (1, 'lpPipeAttributes'), (1, 'nSize'))
#def CreateNamedPipeA(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes):
# return CreateNamedPipeA.ctypes_function(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes)
CreateNamedPipeAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, LPSECURITY_ATTRIBUTES)
CreateNamedPipeAParams = ((1, 'lpName'), (1, 'dwOpenMode'), (1, 'dwPipeMode'), (1, 'nMaxInstances'), (1, 'nOutBufferSize'), (1, 'nInBufferSize'), (1, 'nDefaultTimeOut'), (1, 'lpSecurityAttributes'))
#def CreateNamedPipeW(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes):
# return CreateNamedPipeW.ctypes_function(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes)
CreateNamedPipeWPrototype = WINFUNCTYPE(HANDLE, LPWSTR, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, LPSECURITY_ATTRIBUTES)
CreateNamedPipeWParams = ((1, 'lpName'), (1, 'dwOpenMode'), (1, 'dwPipeMode'), (1, 'nMaxInstances'), (1, 'nOutBufferSize'), (1, 'nInBufferSize'), (1, 'nDefaultTimeOut'), (1, 'lpSecurityAttributes'))
#def ConnectNamedPipe(hNamedPipe, lpOverlapped):
# return ConnectNamedPipe.ctypes_function(hNamedPipe, lpOverlapped)
ConnectNamedPipePrototype = WINFUNCTYPE(BOOL, HANDLE, LPOVERLAPPED)
ConnectNamedPipeParams = ((1, 'hNamedPipe'), (1, 'lpOverlapped'))
#def SetNamedPipeHandleState(hNamedPipe, lpMode, lpMaxCollectionCount, lpCollectDataTimeout):
# return SetNamedPipeHandleState.ctypes_function(hNamedPipe, lpMode, lpMaxCollectionCount, lpCollectDataTimeout)
SetNamedPipeHandleStatePrototype = WINFUNCTYPE(BOOL, HANDLE, LPDWORD, LPDWORD, LPDWORD)
SetNamedPipeHandleStateParams = ((1, 'hNamedPipe'), (1, 'lpMode'), (1, 'lpMaxCollectionCount'), (1, 'lpCollectDataTimeout'))
#def PeekNamedPipe(hNamedPipe, lpBuffer, nBufferSize, lpBytesRead, lpTotalBytesAvail, lpBytesLeftThisMessage):
# return PeekNamedPipe.ctypes_function(hNamedPipe, lpBuffer, nBufferSize, lpBytesRead, lpTotalBytesAvail, lpBytesLeftThisMessage)
PeekNamedPipePrototype = WINFUNCTYPE(BOOL, HANDLE, LPVOID, DWORD, LPDWORD, LPDWORD, LPDWORD)
PeekNamedPipeParams = ((1, 'hNamedPipe'), (1, 'lpBuffer'), (1, 'nBufferSize'), (1, 'lpBytesRead'), (1, 'lpTotalBytesAvail'), (1, 'lpBytesLeftThisMessage'))
#def CryptCATAdminCalcHashFromFileHandle(hFile, pcbHash, pbHash, dwFlags):
# return CryptCATAdminCalcHashFromFileHandle.ctypes_function(hFile, pcbHash, pbHash, dwFlags)
CryptCATAdminCalcHashFromFileHandlePrototype = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD), POINTER(BYTE), DWORD)
CryptCATAdminCalcHashFromFileHandleParams = ((1, 'hFile'), (1, 'pcbHash'), (1, 'pbHash'), (1, 'dwFlags'))
#def CryptCATAdminCalcHashFromFileHandle2(hCatAdmin, hFile, pcbHash, pbHash, dwFlags):
# return CryptCATAdminCalcHashFromFileHandle2.ctypes_function(hCatAdmin, hFile, pcbHash, pbHash, dwFlags)
CryptCATAdminCalcHashFromFileHandle2Prototype = WINFUNCTYPE(BOOL, HCATADMIN, HANDLE, POINTER(DWORD), POINTER(BYTE), DWORD)
CryptCATAdminCalcHashFromFileHandle2Params = ((1, 'hCatAdmin'), (1, 'hFile'), (1, 'pcbHash'), (1, 'pbHash'), (1, 'dwFlags'))
#def CryptCATAdminEnumCatalogFromHash(hCatAdmin, pbHash, cbHash, dwFlags, phPrevCatInfo):
# return CryptCATAdminEnumCatalogFromHash.ctypes_function(hCatAdmin, pbHash, cbHash, dwFlags, phPrevCatInfo)
CryptCATAdminEnumCatalogFromHashPrototype = WINFUNCTYPE(HCATINFO, HCATADMIN, POINTER(BYTE), DWORD, DWORD, POINTER(HCATINFO))
CryptCATAdminEnumCatalogFromHashParams = ((1, 'hCatAdmin'), (1, 'pbHash'), (1, 'cbHash'), (1, 'dwFlags'), (1, 'phPrevCatInfo'))
#def CryptCATAdminAcquireContext(phCatAdmin, pgSubsystem, dwFlags):
# return CryptCATAdminAcquireContext.ctypes_function(phCatAdmin, pgSubsystem, dwFlags)
CryptCATAdminAcquireContextPrototype = WINFUNCTYPE(BOOL, POINTER(HCATADMIN), POINTER(GUID), DWORD)
CryptCATAdminAcquireContextParams = ((1, 'phCatAdmin'), (1, 'pgSubsystem'), (1, 'dwFlags'))
#def CryptCATAdminAcquireContext2(phCatAdmin, pgSubsystem, pwszHashAlgorithm, pStrongHashPolicy, dwFlags):
# return CryptCATAdminAcquireContext2.ctypes_function(phCatAdmin, pgSubsystem, pwszHashAlgorithm, pStrongHashPolicy, dwFlags)
CryptCATAdminAcquireContext2Prototype = WINFUNCTYPE(BOOL, POINTER(HCATADMIN), POINTER(GUID), PCWSTR, PCCERT_STRONG_SIGN_PARA, DWORD)
CryptCATAdminAcquireContext2Params = ((1, 'phCatAdmin'), (1, 'pgSubsystem'), (1, 'pwszHashAlgorithm'), (1, 'pStrongHashPolicy'), (1, 'dwFlags'))
#def CryptCATCatalogInfoFromContext(hCatInfo, psCatInfo, dwFlags):
# return CryptCATCatalogInfoFromContext.ctypes_function(hCatInfo, psCatInfo, dwFlags)
CryptCATCatalogInfoFromContextPrototype = WINFUNCTYPE(BOOL, HCATINFO, POINTER(CATALOG_INFO), DWORD)
CryptCATCatalogInfoFromContextParams = ((1, 'hCatInfo'), (1, 'psCatInfo'), (1, 'dwFlags'))
#def CryptCATAdminReleaseCatalogContext(hCatAdmin, hCatInfo, dwFlags):
# return CryptCATAdminReleaseCatalogContext.ctypes_function(hCatAdmin, hCatInfo, dwFlags)
CryptCATAdminReleaseCatalogContextPrototype = WINFUNCTYPE(BOOL, HCATADMIN, HCATINFO, DWORD)
CryptCATAdminReleaseCatalogContextParams = ((1, 'hCatAdmin'), (1, 'hCatInfo'), (1, 'dwFlags'))
#def CryptCATAdminReleaseContext(hCatAdmin, dwFlags):
# return CryptCATAdminReleaseContext.ctypes_function(hCatAdmin, dwFlags)
CryptCATAdminReleaseContextPrototype = WINFUNCTYPE(BOOL, HCATADMIN, DWORD)
CryptCATAdminReleaseContextParams = ((1, 'hCatAdmin'), (1, 'dwFlags'))
#def CryptCATGetAttrInfo(hCatalog, pCatMember, pwszReferenceTag):
# return CryptCATGetAttrInfo.ctypes_function(hCatalog, pCatMember, pwszReferenceTag)
CryptCATGetAttrInfoPrototype = WINFUNCTYPE(POINTER(CRYPTCATATTRIBUTE), HANDLE, POINTER(CRYPTCATMEMBER), LPWSTR)
CryptCATGetAttrInfoParams = ((1, 'hCatalog'), (1, 'pCatMember'), (1, 'pwszReferenceTag'))
#def CryptCATGetMemberInfo(hCatalog, pwszReferenceTag):
# return CryptCATGetMemberInfo.ctypes_function(hCatalog, pwszReferenceTag)
CryptCATGetMemberInfoPrototype = WINFUNCTYPE(POINTER(CRYPTCATMEMBER), HANDLE, LPWSTR)
CryptCATGetMemberInfoParams = ((1, 'hCatalog'), (1, 'pwszReferenceTag'))
#def CryptCATGetAttrInfo(hCatalog, pCatMember, pwszReferenceTag):
# return CryptCATGetAttrInfo.ctypes_function(hCatalog, pCatMember, pwszReferenceTag)
CryptCATGetAttrInfoPrototype = WINFUNCTYPE(POINTER(CRYPTCATATTRIBUTE), HANDLE, POINTER(CRYPTCATMEMBER), LPWSTR)
CryptCATGetAttrInfoParams = ((1, 'hCatalog'), (1, 'pCatMember'), (1, 'pwszReferenceTag'))
#def CryptCATEnumerateCatAttr(hCatalog, pPrevAttr):
# return CryptCATEnumerateCatAttr.ctypes_function(hCatalog, pPrevAttr)
CryptCATEnumerateCatAttrPrototype = WINFUNCTYPE(POINTER(CRYPTCATATTRIBUTE), HANDLE, POINTER(CRYPTCATATTRIBUTE))
CryptCATEnumerateCatAttrParams = ((1, 'hCatalog'), (1, 'pPrevAttr'))
#def CryptCATEnumerateAttr(hCatalog, pCatMember, pPrevAttr):
# return CryptCATEnumerateAttr.ctypes_function(hCatalog, pCatMember, pPrevAttr)
CryptCATEnumerateAttrPrototype = WINFUNCTYPE(POINTER(CRYPTCATATTRIBUTE), HANDLE, POINTER(CRYPTCATMEMBER), POINTER(CRYPTCATATTRIBUTE))
CryptCATEnumerateAttrParams = ((1, 'hCatalog'), (1, 'pCatMember'), (1, 'pPrevAttr'))
#def CryptCATEnumerateMember(hCatalog, pPrevMember):
# return CryptCATEnumerateMember.ctypes_function(hCatalog, pPrevMember)
CryptCATEnumerateMemberPrototype = WINFUNCTYPE(POINTER(CRYPTCATMEMBER), HANDLE, POINTER(CRYPTCATMEMBER))
CryptCATEnumerateMemberParams = ((1, 'hCatalog'), (1, 'pPrevMember'))
#def CryptQueryObject(dwObjectType, pvObject, dwExpectedContentTypeFlags, dwExpectedFormatTypeFlags, dwFlags, pdwMsgAndCertEncodingType, pdwContentType, pdwFormatType, phCertStore, phMsg, ppvContext):
# return CryptQueryObject.ctypes_function(dwObjectType, pvObject, dwExpectedContentTypeFlags, dwExpectedFormatTypeFlags, dwFlags, pdwMsgAndCertEncodingType, pdwContentType, pdwFormatType, phCertStore, phMsg, ppvContext)
CryptQueryObjectPrototype = WINFUNCTYPE(BOOL, DWORD, PVOID, DWORD, DWORD, DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(HCERTSTORE), POINTER(HCRYPTMSG), POINTER(PVOID))
CryptQueryObjectParams = ((1, 'dwObjectType'), (1, 'pvObject'), (1, 'dwExpectedContentTypeFlags'), (1, 'dwExpectedFormatTypeFlags'), (1, 'dwFlags'), (1, 'pdwMsgAndCertEncodingType'), (1, 'pdwContentType'), (1, 'pdwFormatType'), (1, 'phCertStore'), (1, 'phMsg'), (1, 'ppvContext'))
#def CryptMsgGetParam(hCryptMsg, dwParamType, dwIndex, pvData, pcbData):
# return CryptMsgGetParam.ctypes_function(hCryptMsg, dwParamType, dwIndex, pvData, pcbData)
CryptMsgGetParamPrototype = WINFUNCTYPE(BOOL, HCRYPTMSG, DWORD, DWORD, PVOID, POINTER(DWORD))
CryptMsgGetParamParams = ((1, 'hCryptMsg'), (1, 'dwParamType'), (1, 'dwIndex'), (1, 'pvData'), (1, 'pcbData'))
#def CryptDecodeObject(dwCertEncodingType, lpszStructType, pbEncoded, cbEncoded, dwFlags, pvStructInfo, pcbStructInfo):
# return CryptDecodeObject.ctypes_function(dwCertEncodingType, lpszStructType, pbEncoded, cbEncoded, dwFlags, pvStructInfo, pcbStructInfo)
CryptDecodeObjectPrototype = WINFUNCTYPE(BOOL, DWORD, LPCSTR, POINTER(BYTE), DWORD, DWORD, PVOID, POINTER(DWORD))
CryptDecodeObjectParams = ((1, 'dwCertEncodingType'), (1, 'lpszStructType'), (1, 'pbEncoded'), (1, 'cbEncoded'), (1, 'dwFlags'), (1, 'pvStructInfo'), (1, 'pcbStructInfo'))
#def CertFindCertificateInStore(hCertStore, dwCertEncodingType, dwFindFlags, dwFindType, pvFindPara, pPrevCertContext):
# return CertFindCertificateInStore.ctypes_function(hCertStore, dwCertEncodingType, dwFindFlags, dwFindType, pvFindPara, pPrevCertContext)
CertFindCertificateInStorePrototype = WINFUNCTYPE(PCCERT_CONTEXT, HCERTSTORE, DWORD, DWORD, DWORD, PVOID, PCCERT_CONTEXT)
CertFindCertificateInStoreParams = ((1, 'hCertStore'), (1, 'dwCertEncodingType'), (1, 'dwFindFlags'), (1, 'dwFindType'), (1, 'pvFindPara'), (1, 'pPrevCertContext'))
#def CertGetNameStringA(pCertContext, dwType, dwFlags, pvTypePara, pszNameString, cchNameString):
# return CertGetNameStringA.ctypes_function(pCertContext, dwType, dwFlags, pvTypePara, pszNameString, cchNameString)
CertGetNameStringAPrototype = WINFUNCTYPE(DWORD, PCCERT_CONTEXT, DWORD, DWORD, PVOID, LPCSTR, DWORD)
CertGetNameStringAParams = ((1, 'pCertContext'), (1, 'dwType'), (1, 'dwFlags'), (1, 'pvTypePara'), (1, 'pszNameString'), (1, 'cchNameString'))
#def CertGetNameStringW(pCertContext, dwType, dwFlags, pvTypePara, pszNameString, cchNameString):
# return CertGetNameStringW.ctypes_function(pCertContext, dwType, dwFlags, pvTypePara, pszNameString, cchNameString)
CertGetNameStringWPrototype = WINFUNCTYPE(DWORD, PCCERT_CONTEXT, DWORD, DWORD, PVOID, LPWSTR, DWORD)
CertGetNameStringWParams = ((1, 'pCertContext'), (1, 'dwType'), (1, 'dwFlags'), (1, 'pvTypePara'), (1, 'pszNameString'), (1, 'cchNameString'))
#def CertGetCertificateChain(hChainEngine, pCertContext, pTime, hAdditionalStore, pChainPara, dwFlags, pvReserved, ppChainContext):
# return CertGetCertificateChain.ctypes_function(hChainEngine, pCertContext, pTime, hAdditionalStore, pChainPara, dwFlags, pvReserved, ppChainContext)
CertGetCertificateChainPrototype = WINFUNCTYPE(BOOL, HCERTCHAINENGINE, PCCERT_CONTEXT, LPFILETIME, HCERTSTORE, PCERT_CHAIN_PARA, DWORD, LPVOID, POINTER(PCCERT_CHAIN_CONTEXT))
CertGetCertificateChainParams = ((1, 'hChainEngine'), (1, 'pCertContext'), (1, 'pTime'), (1, 'hAdditionalStore'), (1, 'pChainPara'), (1, 'dwFlags'), (1, 'pvReserved'), (1, 'ppChainContext'))
#def CertCreateSelfSignCertificate(hCryptProvOrNCryptKey, pSubjectIssuerBlob, dwFlags, pKeyProvInfo, pSignatureAlgorithm, pStartTime, pEndTime, pExtensions):
# return CertCreateSelfSignCertificate.ctypes_function(hCryptProvOrNCryptKey, pSubjectIssuerBlob, dwFlags, pKeyProvInfo, pSignatureAlgorithm, pStartTime, pEndTime, pExtensions)
CertCreateSelfSignCertificatePrototype = WINFUNCTYPE(PCCERT_CONTEXT, HCRYPTPROV_OR_NCRYPT_KEY_HANDLE, PCERT_NAME_BLOB, DWORD, PCRYPT_KEY_PROV_INFO, PCRYPT_ALGORITHM_IDENTIFIER, PSYSTEMTIME, PSYSTEMTIME, PCERT_EXTENSIONS)
CertCreateSelfSignCertificateParams = ((1, 'hCryptProvOrNCryptKey'), (1, 'pSubjectIssuerBlob'), (1, 'dwFlags'), (1, 'pKeyProvInfo'), (1, 'pSignatureAlgorithm'), (1, 'pStartTime'), (1, 'pEndTime'), (1, 'pExtensions'))
#def CertStrToNameA(dwCertEncodingType, pszX500, dwStrType, pvReserved, pbEncoded, pcbEncoded, ppszError):
# return CertStrToNameA.ctypes_function(dwCertEncodingType, pszX500, dwStrType, pvReserved, pbEncoded, pcbEncoded, ppszError)
CertStrToNameAPrototype = WINFUNCTYPE(BOOL, DWORD, LPCSTR, DWORD, PVOID, POINTER(BYTE), POINTER(DWORD), POINTER(LPCSTR))
CertStrToNameAParams = ((1, 'dwCertEncodingType'), (1, 'pszX500'), (1, 'dwStrType'), (1, 'pvReserved'), (1, 'pbEncoded'), (1, 'pcbEncoded'), (1, 'ppszError'))
#def CertStrToNameW(dwCertEncodingType, pszX500, dwStrType, pvReserved, pbEncoded, pcbEncoded, ppszError):
# return CertStrToNameW.ctypes_function(dwCertEncodingType, pszX500, dwStrType, pvReserved, pbEncoded, pcbEncoded, ppszError)
CertStrToNameWPrototype = WINFUNCTYPE(BOOL, DWORD, LPWSTR, DWORD, PVOID, POINTER(BYTE), POINTER(DWORD), POINTER(LPWSTR))
CertStrToNameWParams = ((1, 'dwCertEncodingType'), (1, 'pszX500'), (1, 'dwStrType'), (1, 'pvReserved'), (1, 'pbEncoded'), (1, 'pcbEncoded'), (1, 'ppszError'))
#def CertOpenStore(lpszStoreProvider, dwMsgAndCertEncodingType, hCryptProv, dwFlags, pvPara):
# return CertOpenStore.ctypes_function(lpszStoreProvider, dwMsgAndCertEncodingType, hCryptProv, dwFlags, pvPara)
CertOpenStorePrototype = WINFUNCTYPE(HCERTSTORE, LPCSTR, DWORD, HCRYPTPROV_LEGACY, DWORD, PVOID)
CertOpenStoreParams = ((1, 'lpszStoreProvider'), (1, 'dwMsgAndCertEncodingType'), (1, 'hCryptProv'), (1, 'dwFlags'), (1, 'pvPara'))
#def CertAddCertificateContextToStore(hCertStore, pCertContext, dwAddDisposition, ppStoreContext):
# return CertAddCertificateContextToStore.ctypes_function(hCertStore, pCertContext, dwAddDisposition, ppStoreContext)
CertAddCertificateContextToStorePrototype = WINFUNCTYPE(BOOL, HCERTSTORE, PCCERT_CONTEXT, DWORD, POINTER(PCCERT_CONTEXT))
CertAddCertificateContextToStoreParams = ((1, 'hCertStore'), (1, 'pCertContext'), (1, 'dwAddDisposition'), (1, 'ppStoreContext'))
#def CertFreeCertificateContext(pCertContext):
# return CertFreeCertificateContext.ctypes_function(pCertContext)
CertFreeCertificateContextPrototype = WINFUNCTYPE(BOOL, PCCERT_CONTEXT)
CertFreeCertificateContextParams = ((1, 'pCertContext'),)
#def PFXExportCertStoreEx(hStore, pPFX, szPassword, pvPara, dwFlags):
# return PFXExportCertStoreEx.ctypes_function(hStore, pPFX, szPassword, pvPara, dwFlags)
PFXExportCertStoreExPrototype = WINFUNCTYPE(BOOL, HCERTSTORE, POINTER(CRYPT_DATA_BLOB), LPCWSTR, PVOID, DWORD)
PFXExportCertStoreExParams = ((1, 'hStore'), (1, 'pPFX'), (1, 'szPassword'), (1, 'pvPara'), (1, 'dwFlags'))
#def PFXImportCertStore(pPFX, szPassword, dwFlags):
# return PFXImportCertStore.ctypes_function(pPFX, szPassword, dwFlags)
PFXImportCertStorePrototype = WINFUNCTYPE(HCERTSTORE, POINTER(CRYPT_DATA_BLOB), LPCWSTR, DWORD)
PFXImportCertStoreParams = ((1, 'pPFX'), (1, 'szPassword'), (1, 'dwFlags'))
#def CryptGenKey(hProv, Algid, dwFlags, phKey):
# return CryptGenKey.ctypes_function(hProv, Algid, dwFlags, phKey)
CryptGenKeyPrototype = WINFUNCTYPE(BOOL, HCRYPTPROV, ALG_ID, DWORD, POINTER(HCRYPTKEY))
CryptGenKeyParams = ((1, 'hProv'), (1, 'Algid'), (1, 'dwFlags'), (1, 'phKey'))
#def CryptDestroyKey(hKey):
# return CryptDestroyKey.ctypes_function(hKey)
CryptDestroyKeyPrototype = WINFUNCTYPE(BOOL, HCRYPTKEY)
CryptDestroyKeyParams = ((1, 'hKey'),)
#def CryptAcquireContextA(phProv, pszContainer, pszProvider, dwProvType, dwFlags):
# return CryptAcquireContextA.ctypes_function(phProv, pszContainer, pszProvider, dwProvType, dwFlags)
CryptAcquireContextAPrototype = WINFUNCTYPE(BOOL, POINTER(HCRYPTPROV), LPCSTR, LPCSTR, DWORD, DWORD)
CryptAcquireContextAParams = ((1, 'phProv'), (1, 'pszContainer'), (1, 'pszProvider'), (1, 'dwProvType'), (1, 'dwFlags'))
#def CryptAcquireContextW(phProv, pszContainer, pszProvider, dwProvType, dwFlags):
# return CryptAcquireContextW.ctypes_function(phProv, pszContainer, pszProvider, dwProvType, dwFlags)
CryptAcquireContextWPrototype = WINFUNCTYPE(BOOL, POINTER(HCRYPTPROV), LPWSTR, LPWSTR, DWORD, DWORD)
CryptAcquireContextWParams = ((1, 'phProv'), (1, 'pszContainer'), (1, 'pszProvider'), (1, 'dwProvType'), (1, 'dwFlags'))
#def CryptReleaseContext(hProv, dwFlags):
# return CryptReleaseContext.ctypes_function(hProv, dwFlags)
CryptReleaseContextPrototype = WINFUNCTYPE(BOOL, HCRYPTPROV, DWORD)
CryptReleaseContextParams = ((1, 'hProv'), (1, 'dwFlags'))
#def CryptExportKey(hKey, hExpKey, dwBlobType, dwFlags, pbData, pdwDataLen):
# return CryptExportKey.ctypes_function(hKey, hExpKey, dwBlobType, dwFlags, pbData, pdwDataLen)
CryptExportKeyPrototype = WINFUNCTYPE(BOOL, HCRYPTKEY, HCRYPTKEY, DWORD, DWORD, POINTER(BYTE), POINTER(DWORD))
CryptExportKeyParams = ((1, 'hKey'), (1, 'hExpKey'), (1, 'dwBlobType'), (1, 'dwFlags'), (1, 'pbData'), (1, 'pdwDataLen'))
#def CertGetCertificateContextProperty(pCertContext, dwPropId, pvData, pcbData):
# return CertGetCertificateContextProperty.ctypes_function(pCertContext, dwPropId, pvData, pcbData)
CertGetCertificateContextPropertyPrototype = WINFUNCTYPE(BOOL, PCCERT_CONTEXT, DWORD, PVOID, POINTER(DWORD))
CertGetCertificateContextPropertyParams = ((1, 'pCertContext'), (1, 'dwPropId'), (1, 'pvData'), (1, 'pcbData'))
#def CertEnumCertificateContextProperties(pCertContext, dwPropId):
# return CertEnumCertificateContextProperties.ctypes_function(pCertContext, dwPropId)
CertEnumCertificateContextPropertiesPrototype = WINFUNCTYPE(DWORD, PCCERT_CONTEXT, DWORD)
CertEnumCertificateContextPropertiesParams = ((1, 'pCertContext'), (1, 'dwPropId'))
#def CryptEncryptMessage(pEncryptPara, cRecipientCert, rgpRecipientCert, pbToBeEncrypted, cbToBeEncrypted, pbEncryptedBlob, pcbEncryptedBlob):
# return CryptEncryptMessage.ctypes_function(pEncryptPara, cRecipientCert, rgpRecipientCert, pbToBeEncrypted, cbToBeEncrypted, pbEncryptedBlob, pcbEncryptedBlob)
CryptEncryptMessagePrototype = WINFUNCTYPE(BOOL, PCRYPT_ENCRYPT_MESSAGE_PARA, DWORD, POINTER(PCCERT_CONTEXT), POINTER(BYTE), DWORD, POINTER(BYTE), POINTER(DWORD))
CryptEncryptMessageParams = ((1, 'pEncryptPara'), (1, 'cRecipientCert'), (1, 'rgpRecipientCert'), (1, 'pbToBeEncrypted'), (1, 'cbToBeEncrypted'), (1, 'pbEncryptedBlob'), (1, 'pcbEncryptedBlob'))
#def CryptDecryptMessage(pDecryptPara, pbEncryptedBlob, cbEncryptedBlob, pbDecrypted, pcbDecrypted, ppXchgCert):
# return CryptDecryptMessage.ctypes_function(pDecryptPara, pbEncryptedBlob, cbEncryptedBlob, pbDecrypted, pcbDecrypted, ppXchgCert)
CryptDecryptMessagePrototype = WINFUNCTYPE(BOOL, PCRYPT_DECRYPT_MESSAGE_PARA, POINTER(BYTE), DWORD, POINTER(BYTE), POINTER(DWORD), POINTER(PCCERT_CONTEXT))
CryptDecryptMessageParams = ((1, 'pDecryptPara'), (1, 'pbEncryptedBlob'), (1, 'cbEncryptedBlob'), (1, 'pbDecrypted'), (1, 'pcbDecrypted'), (1, 'ppXchgCert'))
#def CryptAcquireCertificatePrivateKey(pCert, dwFlags, pvParameters, phCryptProvOrNCryptKey, pdwKeySpec, pfCallerFreeProvOrNCryptKey):
# return CryptAcquireCertificatePrivateKey.ctypes_function(pCert, dwFlags, pvParameters, phCryptProvOrNCryptKey, pdwKeySpec, pfCallerFreeProvOrNCryptKey)
CryptAcquireCertificatePrivateKeyPrototype = WINFUNCTYPE(BOOL, PCCERT_CONTEXT, DWORD, PVOID, POINTER(HCRYPTPROV_OR_NCRYPT_KEY_HANDLE), POINTER(DWORD), POINTER(BOOL))
CryptAcquireCertificatePrivateKeyParams = ((1, 'pCert'), (1, 'dwFlags'), (1, 'pvParameters'), (1, 'phCryptProvOrNCryptKey'), (1, 'pdwKeySpec'), (1, 'pfCallerFreeProvOrNCryptKey'))
#def CertDuplicateCertificateContext(pCertContext):
# return CertDuplicateCertificateContext.ctypes_function(pCertContext)
CertDuplicateCertificateContextPrototype = WINFUNCTYPE(PCCERT_CONTEXT, PCCERT_CONTEXT)
CertDuplicateCertificateContextParams = ((1, 'pCertContext'),)
#def CertEnumCertificatesInStore(hCertStore, pPrevCertContext):
# return CertEnumCertificatesInStore.ctypes_function(hCertStore, pPrevCertContext)
CertEnumCertificatesInStorePrototype = WINFUNCTYPE(PCCERT_CONTEXT, HCERTSTORE, PCCERT_CONTEXT)
CertEnumCertificatesInStoreParams = ((1, 'hCertStore'), (1, 'pPrevCertContext'))
#def CryptEncodeObjectEx(dwCertEncodingType, lpszStructType, pvStructInfo, dwFlags, pEncodePara, pvEncoded, pcbEncoded):
# return CryptEncodeObjectEx.ctypes_function(dwCertEncodingType, lpszStructType, pvStructInfo, dwFlags, pEncodePara, pvEncoded, pcbEncoded)
CryptEncodeObjectExPrototype = WINFUNCTYPE(BOOL, DWORD, LPCSTR, PVOID, DWORD, PCRYPT_ENCODE_PARA, PVOID, POINTER(DWORD))
CryptEncodeObjectExParams = ((1, 'dwCertEncodingType'), (1, 'lpszStructType'), (1, 'pvStructInfo'), (1, 'dwFlags'), (1, 'pEncodePara'), (1, 'pvEncoded'), (1, 'pcbEncoded'))
#def CertCreateCertificateContext(dwCertEncodingType, pbCertEncoded, cbCertEncoded):
# return CertCreateCertificateContext.ctypes_function(dwCertEncodingType, pbCertEncoded, cbCertEncoded)
CertCreateCertificateContextPrototype = WINFUNCTYPE(PCCERT_CONTEXT, DWORD, POINTER(BYTE), DWORD)
CertCreateCertificateContextParams = ((1, 'dwCertEncodingType'), (1, 'pbCertEncoded'), (1, 'cbCertEncoded'))
#def CertCompareCertificate(dwCertEncodingType, pCertId1, pCertId2):
# return CertCompareCertificate.ctypes_function(dwCertEncodingType, pCertId1, pCertId2)
CertCompareCertificatePrototype = WINFUNCTYPE(BOOL, DWORD, PCERT_INFO, PCERT_INFO)
CertCompareCertificateParams = ((1, 'dwCertEncodingType'), (1, 'pCertId1'), (1, 'pCertId2'))
#def CertEnumCTLsInStore(hCertStore, pPrevCtlContext):
# return CertEnumCTLsInStore.ctypes_function(hCertStore, pPrevCtlContext)
CertEnumCTLsInStorePrototype = WINFUNCTYPE(PCCTL_CONTEXT, HCERTSTORE, PCCTL_CONTEXT)
CertEnumCTLsInStoreParams = ((1, 'hCertStore'), (1, 'pPrevCtlContext'))
#def CertDuplicateCTLContext(pCtlContext):
# return CertDuplicateCTLContext.ctypes_function(pCtlContext)
CertDuplicateCTLContextPrototype = WINFUNCTYPE(PCCTL_CONTEXT, PCCTL_CONTEXT)
CertDuplicateCTLContextParams = ((1, 'pCtlContext'),)
#def CertFreeCTLContext(pCtlContext):
# return CertFreeCTLContext.ctypes_function(pCtlContext)
CertFreeCTLContextPrototype = WINFUNCTYPE(BOOL, PCCTL_CONTEXT)
CertFreeCTLContextParams = ((1, 'pCtlContext'),)
#def CryptUIDlgViewContext(dwContextType, pvContext, hwnd, pwszTitle, dwFlags, pvReserved):
# return CryptUIDlgViewContext.ctypes_function(dwContextType, pvContext, hwnd, pwszTitle, dwFlags, pvReserved)
CryptUIDlgViewContextPrototype = WINFUNCTYPE(BOOL, DWORD, PVOID, HWND, LPCWSTR, DWORD, PVOID)
CryptUIDlgViewContextParams = ((1, 'dwContextType'), (1, 'pvContext'), (1, 'hwnd'), (1, 'pwszTitle'), (1, 'dwFlags'), (1, 'pvReserved'))
#def CryptMsgVerifyCountersignatureEncoded(hCryptProv, dwEncodingType, pbSignerInfo, cbSignerInfo, pbSignerInfoCountersignature, cbSignerInfoCountersignature, pciCountersigner):
# return CryptMsgVerifyCountersignatureEncoded.ctypes_function(hCryptProv, dwEncodingType, pbSignerInfo, cbSignerInfo, pbSignerInfoCountersignature, cbSignerInfoCountersignature, pciCountersigner)
CryptMsgVerifyCountersignatureEncodedPrototype = WINFUNCTYPE(BOOL, HCRYPTPROV_LEGACY, DWORD, PBYTE, DWORD, PBYTE, DWORD, PCERT_INFO)
CryptMsgVerifyCountersignatureEncodedParams = ((1, 'hCryptProv'), (1, 'dwEncodingType'), (1, 'pbSignerInfo'), (1, 'cbSignerInfo'), (1, 'pbSignerInfoCountersignature'), (1, 'cbSignerInfoCountersignature'), (1, 'pciCountersigner'))
#def CryptMsgVerifyCountersignatureEncodedEx(hCryptProv, dwEncodingType, pbSignerInfo, cbSignerInfo, pbSignerInfoCountersignature, cbSignerInfoCountersignature, dwSignerType, pvSigner, dwFlags, pvExtra):
# return CryptMsgVerifyCountersignatureEncodedEx.ctypes_function(hCryptProv, dwEncodingType, pbSignerInfo, cbSignerInfo, pbSignerInfoCountersignature, cbSignerInfoCountersignature, dwSignerType, pvSigner, dwFlags, pvExtra)
CryptMsgVerifyCountersignatureEncodedExPrototype = WINFUNCTYPE(BOOL, HCRYPTPROV_LEGACY, DWORD, PBYTE, DWORD, PBYTE, DWORD, DWORD, PVOID, DWORD, PVOID)
CryptMsgVerifyCountersignatureEncodedExParams = ((1, 'hCryptProv'), (1, 'dwEncodingType'), (1, 'pbSignerInfo'), (1, 'cbSignerInfo'), (1, 'pbSignerInfoCountersignature'), (1, 'cbSignerInfoCountersignature'), (1, 'dwSignerType'), (1, 'pvSigner'), (1, 'dwFlags'), (1, 'pvExtra'))
#def CryptHashCertificate(hCryptProv, Algid, dwFlags, pbEncoded, cbEncoded, pbComputedHash, pcbComputedHash):
# return CryptHashCertificate.ctypes_function(hCryptProv, Algid, dwFlags, pbEncoded, cbEncoded, pbComputedHash, pcbComputedHash)
CryptHashCertificatePrototype = WINFUNCTYPE(BOOL, HCRYPTPROV_LEGACY, ALG_ID, DWORD, POINTER(BYTE), DWORD, POINTER(BYTE), POINTER(DWORD))
CryptHashCertificateParams = ((1, 'hCryptProv'), (1, 'Algid'), (1, 'dwFlags'), (1, 'pbEncoded'), (1, 'cbEncoded'), (1, 'pbComputedHash'), (1, 'pcbComputedHash'))
#def StrStrIW(pszFirst, pszSrch):
# return StrStrIW.ctypes_function(pszFirst, pszSrch)
StrStrIWPrototype = WINFUNCTYPE(PWSTR, PWSTR, PWSTR)
StrStrIWParams = ((1, 'pszFirst'), (1, 'pszSrch'))
#def StrStrIA(pszFirst, pszSrch):
# return StrStrIA.ctypes_function(pszFirst, pszSrch)
StrStrIAPrototype = WINFUNCTYPE(PCSTR, PCSTR, PCSTR)
StrStrIAParams = ((1, 'pszFirst'), (1, 'pszSrch'))
#def IsOS(dwOS):
# return IsOS.ctypes_function(dwOS)
IsOSPrototype = WINFUNCTYPE(BOOL, DWORD)
IsOSParams = ((1, 'dwOS'),)
#def OpenEventLogA(lpUNCServerName, lpSourceName):
# return OpenEventLogA.ctypes_function(lpUNCServerName, lpSourceName)
OpenEventLogAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, LPCSTR)
OpenEventLogAParams = ((1, 'lpUNCServerName'), (1, 'lpSourceName'))
#def OpenEventLogW(lpUNCServerName, lpSourceName):
# return OpenEventLogW.ctypes_function(lpUNCServerName, lpSourceName)
OpenEventLogWPrototype = WINFUNCTYPE(HANDLE, LPWSTR, LPWSTR)
OpenEventLogWParams = ((1, 'lpUNCServerName'), (1, 'lpSourceName'))
#def OpenBackupEventLogA(lpUNCServerName, lpSourceName):
# return OpenBackupEventLogA.ctypes_function(lpUNCServerName, lpSourceName)
OpenBackupEventLogAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, LPCSTR)
OpenBackupEventLogAParams = ((1, 'lpUNCServerName'), (1, 'lpSourceName'))
#def OpenBackupEventLogW(lpUNCServerName, lpSourceName):
# return OpenBackupEventLogW.ctypes_function(lpUNCServerName, lpSourceName)
OpenBackupEventLogWPrototype = WINFUNCTYPE(HANDLE, LPWSTR, LPWSTR)
OpenBackupEventLogWParams = ((1, 'lpUNCServerName'), (1, 'lpSourceName'))
#def ReadEventLogA(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded):
# return ReadEventLogA.ctypes_function(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded)
ReadEventLogAPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, DWORD, LPVOID, DWORD, POINTER(DWORD), POINTER(DWORD))
ReadEventLogAParams = ((1, 'hEventLog'), (1, 'dwReadFlags'), (1, 'dwRecordOffset'), (1, 'lpBuffer'), (1, 'nNumberOfBytesToRead'), (1, 'pnBytesRead'), (1, 'pnMinNumberOfBytesNeeded'))
#def ReadEventLogW(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded):
# return ReadEventLogW.ctypes_function(hEventLog, dwReadFlags, dwRecordOffset, lpBuffer, nNumberOfBytesToRead, pnBytesRead, pnMinNumberOfBytesNeeded)
ReadEventLogWPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, DWORD, LPVOID, DWORD, POINTER(DWORD), POINTER(DWORD))
ReadEventLogWParams = ((1, 'hEventLog'), (1, 'dwReadFlags'), (1, 'dwRecordOffset'), (1, 'lpBuffer'), (1, 'nNumberOfBytesToRead'), (1, 'pnBytesRead'), (1, 'pnMinNumberOfBytesNeeded'))
#def GetEventLogInformation(hEventLog, dwInfoLevel, lpBuffer, cbBufSize, pcbBytesNeeded):
# return GetEventLogInformation.ctypes_function(hEventLog, dwInfoLevel, lpBuffer, cbBufSize, pcbBytesNeeded)
GetEventLogInformationPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, LPVOID, DWORD, LPDWORD)
GetEventLogInformationParams = ((1, 'hEventLog'), (1, 'dwInfoLevel'), (1, 'lpBuffer'), (1, 'cbBufSize'), (1, 'pcbBytesNeeded'))
#def GetNumberOfEventLogRecords(hEventLog, NumberOfRecords):
# return GetNumberOfEventLogRecords.ctypes_function(hEventLog, NumberOfRecords)
GetNumberOfEventLogRecordsPrototype = WINFUNCTYPE(BOOL, HANDLE, PDWORD)
GetNumberOfEventLogRecordsParams = ((1, 'hEventLog'), (1, 'NumberOfRecords'))
#def CloseEventLog(hEventLog):
# return CloseEventLog.ctypes_function(hEventLog)
CloseEventLogPrototype = WINFUNCTYPE(BOOL, HANDLE)
CloseEventLogParams = ((1, 'hEventLog'),)
#def EvtOpenLog(Session, Path, Flags):
# return EvtOpenLog.ctypes_function(Session, Path, Flags)
EvtOpenLogPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, LPCWSTR, DWORD)
EvtOpenLogParams = ((1, 'Session'), (1, 'Path'), (1, 'Flags'))
#def EvtQuery(Session, Path, Query, Flags):
# return EvtQuery.ctypes_function(Session, Path, Query, Flags)
EvtQueryPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, LPCWSTR, LPCWSTR, DWORD)
EvtQueryParams = ((1, 'Session'), (1, 'Path'), (1, 'Query'), (1, 'Flags'))
#def EvtNext(ResultSet, EventArraySize, EventArray, Timeout, Flags, Returned):
# return EvtNext.ctypes_function(ResultSet, EventArraySize, EventArray, Timeout, Flags, Returned)
EvtNextPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, DWORD, POINTER(EVT_HANDLE), DWORD, DWORD, PDWORD)
EvtNextParams = ((1, 'ResultSet'), (1, 'EventArraySize'), (1, 'EventArray'), (1, 'Timeout'), (1, 'Flags'), (1, 'Returned'))
#def EvtCreateRenderContext(ValuePathsCount, ValuePaths, Flags):
# return EvtCreateRenderContext.ctypes_function(ValuePathsCount, ValuePaths, Flags)
EvtCreateRenderContextPrototype = WINFUNCTYPE(EVT_HANDLE, DWORD, POINTER(LPCWSTR), DWORD)
EvtCreateRenderContextParams = ((1, 'ValuePathsCount'), (1, 'ValuePaths'), (1, 'Flags'))
#def EvtRender(Context, Fragment, Flags, BufferSize, Buffer, BufferUsed, PropertyCount):
# return EvtRender.ctypes_function(Context, Fragment, Flags, BufferSize, Buffer, BufferUsed, PropertyCount)
EvtRenderPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_HANDLE, DWORD, DWORD, PVOID, PDWORD, PDWORD)
EvtRenderParams = ((1, 'Context'), (1, 'Fragment'), (1, 'Flags'), (1, 'BufferSize'), (1, 'Buffer'), (1, 'BufferUsed'), (1, 'PropertyCount'))
#def EvtClose(Object):
# return EvtClose.ctypes_function(Object)
EvtClosePrototype = WINFUNCTYPE(BOOL, EVT_HANDLE)
EvtCloseParams = ((1, 'Object'),)
#def EvtOpenChannelEnum(Session, Flags):
# return EvtOpenChannelEnum.ctypes_function(Session, Flags)
EvtOpenChannelEnumPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, DWORD)
EvtOpenChannelEnumParams = ((1, 'Session'), (1, 'Flags'))
#def EvtNextChannelPath(ChannelEnum, ChannelPathBufferSize, ChannelPathBuffer, ChannelPathBufferUsed):
# return EvtNextChannelPath.ctypes_function(ChannelEnum, ChannelPathBufferSize, ChannelPathBuffer, ChannelPathBufferUsed)
EvtNextChannelPathPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, DWORD, LPWSTR, PDWORD)
EvtNextChannelPathParams = ((1, 'ChannelEnum'), (1, 'ChannelPathBufferSize'), (1, 'ChannelPathBuffer'), (1, 'ChannelPathBufferUsed'))
#def EvtOpenPublisherEnum(Session, Flags):
# return EvtOpenPublisherEnum.ctypes_function(Session, Flags)
EvtOpenPublisherEnumPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, DWORD)
EvtOpenPublisherEnumParams = ((1, 'Session'), (1, 'Flags'))
#def EvtNextPublisherId(PublisherEnum, PublisherIdBufferSize, PublisherIdBuffer, PublisherIdBufferUsed):
# return EvtNextPublisherId.ctypes_function(PublisherEnum, PublisherIdBufferSize, PublisherIdBuffer, PublisherIdBufferUsed)
EvtNextPublisherIdPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, DWORD, LPWSTR, PDWORD)
EvtNextPublisherIdParams = ((1, 'PublisherEnum'), (1, 'PublisherIdBufferSize'), (1, 'PublisherIdBuffer'), (1, 'PublisherIdBufferUsed'))
#def EvtGetLogInfo(Log, PropertyId, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed):
# return EvtGetLogInfo.ctypes_function(Log, PropertyId, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed)
EvtGetLogInfoPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_LOG_PROPERTY_ID, DWORD, PEVT_VARIANT, PDWORD)
EvtGetLogInfoParams = ((1, 'Log'), (1, 'PropertyId'), (1, 'PropertyValueBufferSize'), (1, 'PropertyValueBuffer'), (1, 'PropertyValueBufferUsed'))
#def EvtOpenChannelConfig(Session, ChannelPath, Flags):
# return EvtOpenChannelConfig.ctypes_function(Session, ChannelPath, Flags)
EvtOpenChannelConfigPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, LPCWSTR, DWORD)
EvtOpenChannelConfigParams = ((1, 'Session'), (1, 'ChannelPath'), (1, 'Flags'))
#def EvtGetChannelConfigProperty(ChannelConfig, PropertyId, Flags, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed):
# return EvtGetChannelConfigProperty.ctypes_function(ChannelConfig, PropertyId, Flags, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed)
EvtGetChannelConfigPropertyPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_CHANNEL_CONFIG_PROPERTY_ID, DWORD, DWORD, PEVT_VARIANT, PDWORD)
EvtGetChannelConfigPropertyParams = ((1, 'ChannelConfig'), (1, 'PropertyId'), (1, 'Flags'), (1, 'PropertyValueBufferSize'), (1, 'PropertyValueBuffer'), (1, 'PropertyValueBufferUsed'))
#def EvtOpenPublisherMetadata(Session, PublisherIdentity, LogFilePath, Locale, Flags):
# return EvtOpenPublisherMetadata.ctypes_function(Session, PublisherIdentity, LogFilePath, Locale, Flags)
EvtOpenPublisherMetadataPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, LPCWSTR, LPCWSTR, LCID, DWORD)
EvtOpenPublisherMetadataParams = ((1, 'Session'), (1, 'PublisherIdentity'), (1, 'LogFilePath'), (1, 'Locale'), (1, 'Flags'))
#def EvtOpenEventMetadataEnum(PublisherMetadata, Flags):
# return EvtOpenEventMetadataEnum.ctypes_function(PublisherMetadata, Flags)
EvtOpenEventMetadataEnumPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, DWORD)
EvtOpenEventMetadataEnumParams = ((1, 'PublisherMetadata'), (1, 'Flags'))
#def EvtNextEventMetadata(EventMetadataEnum, Flags):
# return EvtNextEventMetadata.ctypes_function(EventMetadataEnum, Flags)
EvtNextEventMetadataPrototype = WINFUNCTYPE(EVT_HANDLE, EVT_HANDLE, DWORD)
EvtNextEventMetadataParams = ((1, 'EventMetadataEnum'), (1, 'Flags'))
#def EvtGetEventMetadataProperty(EventMetadata, PropertyId, Flags, EventMetadataPropertyBufferSize, EventMetadataPropertyBuffer, EventMetadataPropertyBufferUsed):
# return EvtGetEventMetadataProperty.ctypes_function(EventMetadata, PropertyId, Flags, EventMetadataPropertyBufferSize, EventMetadataPropertyBuffer, EventMetadataPropertyBufferUsed)
EvtGetEventMetadataPropertyPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_EVENT_METADATA_PROPERTY_ID, DWORD, DWORD, PEVT_VARIANT, PDWORD)
EvtGetEventMetadataPropertyParams = ((1, 'EventMetadata'), (1, 'PropertyId'), (1, 'Flags'), (1, 'EventMetadataPropertyBufferSize'), (1, 'EventMetadataPropertyBuffer'), (1, 'EventMetadataPropertyBufferUsed'))
#def EvtGetPublisherMetadataProperty(PublisherMetadata, PropertyId, Flags, PublisherMetadataPropertyBufferSize, PublisherMetadataPropertyBuffer, PublisherMetadataPropertyBufferUsed):
# return EvtGetPublisherMetadataProperty.ctypes_function(PublisherMetadata, PropertyId, Flags, PublisherMetadataPropertyBufferSize, PublisherMetadataPropertyBuffer, PublisherMetadataPropertyBufferUsed)
EvtGetPublisherMetadataPropertyPrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_PUBLISHER_METADATA_PROPERTY_ID, DWORD, DWORD, PEVT_VARIANT, PDWORD)
EvtGetPublisherMetadataPropertyParams = ((1, 'PublisherMetadata'), (1, 'PropertyId'), (1, 'Flags'), (1, 'PublisherMetadataPropertyBufferSize'), (1, 'PublisherMetadataPropertyBuffer'), (1, 'PublisherMetadataPropertyBufferUsed'))
#def EvtGetObjectArraySize(ObjectArray, ObjectArraySize):
# return EvtGetObjectArraySize.ctypes_function(ObjectArray, ObjectArraySize)
EvtGetObjectArraySizePrototype = WINFUNCTYPE(BOOL, EVT_OBJECT_ARRAY_PROPERTY_HANDLE, PDWORD)
EvtGetObjectArraySizeParams = ((1, 'ObjectArray'), (1, 'ObjectArraySize'))
#def EvtGetObjectArrayProperty(ObjectArray, PropertyId, ArrayIndex, Flags, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed):
# return EvtGetObjectArrayProperty.ctypes_function(ObjectArray, PropertyId, ArrayIndex, Flags, PropertyValueBufferSize, PropertyValueBuffer, PropertyValueBufferUsed)
EvtGetObjectArrayPropertyPrototype = WINFUNCTYPE(BOOL, EVT_OBJECT_ARRAY_PROPERTY_HANDLE, DWORD, DWORD, DWORD, DWORD, PEVT_VARIANT, PDWORD)
EvtGetObjectArrayPropertyParams = ((1, 'ObjectArray'), (1, 'PropertyId'), (1, 'ArrayIndex'), (1, 'Flags'), (1, 'PropertyValueBufferSize'), (1, 'PropertyValueBuffer'), (1, 'PropertyValueBufferUsed'))
#def EvtFormatMessage(PublisherMetadata, Event, MessageId, ValueCount, Values, Flags, BufferSize, Buffer, BufferUsed):
# return EvtFormatMessage.ctypes_function(PublisherMetadata, Event, MessageId, ValueCount, Values, Flags, BufferSize, Buffer, BufferUsed)
EvtFormatMessagePrototype = WINFUNCTYPE(BOOL, EVT_HANDLE, EVT_HANDLE, DWORD, DWORD, PEVT_VARIANT, DWORD, DWORD, LPWSTR, PDWORD)
EvtFormatMessageParams = ((1, 'PublisherMetadata'), (1, 'Event'), (1, 'MessageId'), (1, 'ValueCount'), (1, 'Values'), (1, 'Flags'), (1, 'BufferSize'), (1, 'Buffer'), (1, 'BufferUsed'))
#def GetCursorPos(lpPoint):
# return GetCursorPos.ctypes_function(lpPoint)
GetCursorPosPrototype = WINFUNCTYPE(BOOL, LPPOINT)
GetCursorPosParams = ((1, 'lpPoint'),)
#def WindowFromPoint(Point):
# return WindowFromPoint.ctypes_function(Point)
WindowFromPointPrototype = WINFUNCTYPE(HWND, POINT)
WindowFromPointParams = ((1, 'Point'),)
#def GetWindowRect(hWnd, lpRect):
# return GetWindowRect.ctypes_function(hWnd, lpRect)
GetWindowRectPrototype = WINFUNCTYPE(BOOL, HWND, LPRECT)
GetWindowRectParams = ((1, 'hWnd'), (1, 'lpRect'))
#def EnumWindows(lpEnumFunc, lParam):
# return EnumWindows.ctypes_function(lpEnumFunc, lParam)
EnumWindowsPrototype = WINFUNCTYPE(BOOL, WNDENUMPROC, LPARAM)
EnumWindowsParams = ((1, 'lpEnumFunc'), (1, 'lParam'))
#def GetWindowTextA(hWnd, lpString, nMaxCount):
# return GetWindowTextA.ctypes_function(hWnd, lpString, nMaxCount)
GetWindowTextAPrototype = WINFUNCTYPE(INT, HWND, LPSTR, INT)
GetWindowTextAParams = ((1, 'hWnd'), (1, 'lpString'), (1, 'nMaxCount'))
#def GetParent(hWnd):
# return GetParent.ctypes_function(hWnd)
GetParentPrototype = WINFUNCTYPE(HWND, HWND)
GetParentParams = ((1, 'hWnd'),)
#def GetWindowTextW(hWnd, lpString, nMaxCount):
# return GetWindowTextW.ctypes_function(hWnd, lpString, nMaxCount)
GetWindowTextWPrototype = WINFUNCTYPE(INT, HWND, LPWSTR, INT)
GetWindowTextWParams = ((1, 'hWnd'), (1, 'lpString'), (1, 'nMaxCount'))
#def GetWindowModuleFileNameA(hwnd, pszFileName, cchFileNameMax):
# return GetWindowModuleFileNameA.ctypes_function(hwnd, pszFileName, cchFileNameMax)
GetWindowModuleFileNameAPrototype = WINFUNCTYPE(UINT, HWND, LPSTR, UINT)
GetWindowModuleFileNameAParams = ((1, 'hwnd'), (1, 'pszFileName'), (1, 'cchFileNameMax'))
#def GetWindowModuleFileNameW(hwnd, pszFileName, cchFileNameMax):
# return GetWindowModuleFileNameW.ctypes_function(hwnd, pszFileName, cchFileNameMax)
GetWindowModuleFileNameWPrototype = WINFUNCTYPE(UINT, HWND, LPWSTR, UINT)
GetWindowModuleFileNameWParams = ((1, 'hwnd'), (1, 'pszFileName'), (1, 'cchFileNameMax'))
#def EnumChildWindows(hWndParent, lpEnumFunc, lParam):
# return EnumChildWindows.ctypes_function(hWndParent, lpEnumFunc, lParam)
EnumChildWindowsPrototype = WINFUNCTYPE(BOOL, HWND, WNDENUMPROC, LPARAM)
EnumChildWindowsParams = ((1, 'hWndParent'), (1, 'lpEnumFunc'), (1, 'lParam'))
#def CloseWindow(hWnd):
# return CloseWindow.ctypes_function(hWnd)
CloseWindowPrototype = WINFUNCTYPE(BOOL, HWND)
CloseWindowParams = ((1, 'hWnd'),)
#def GetDesktopWindow():
# return GetDesktopWindow.ctypes_function()
GetDesktopWindowPrototype = WINFUNCTYPE(HWND)
GetDesktopWindowParams = ()
#def GetForegroundWindow():
# return GetForegroundWindow.ctypes_function()
GetForegroundWindowPrototype = WINFUNCTYPE(HWND)
GetForegroundWindowParams = ()
#def BringWindowToTop(hWnd):
# return BringWindowToTop.ctypes_function(hWnd)
BringWindowToTopPrototype = WINFUNCTYPE(BOOL, HWND)
BringWindowToTopParams = ((1, 'hWnd'),)
#def MoveWindow(hWnd, X, Y, nWidth, nHeight, bRepaint):
# return MoveWindow.ctypes_function(hWnd, X, Y, nWidth, nHeight, bRepaint)
MoveWindowPrototype = WINFUNCTYPE(BOOL, HWND, INT, INT, INT, INT, BOOL)
MoveWindowParams = ((1, 'hWnd'), (1, 'X'), (1, 'Y'), (1, 'nWidth'), (1, 'nHeight'), (1, 'bRepaint'))
#def SetWindowPos(hWnd, hWndInsertAfter, X, Y, cx, cy, uFlags):
# return SetWindowPos.ctypes_function(hWnd, hWndInsertAfter, X, Y, cx, cy, uFlags)
SetWindowPosPrototype = WINFUNCTYPE(BOOL, HWND, HWND, INT, INT, INT, INT, UINT)
SetWindowPosParams = ((1, 'hWnd'), (1, 'hWndInsertAfter'), (1, 'X'), (1, 'Y'), (1, 'cx'), (1, 'cy'), (1, 'uFlags'))
#def SetWindowTextA(hWnd, lpString):
# return SetWindowTextA.ctypes_function(hWnd, lpString)
SetWindowTextAPrototype = WINFUNCTYPE(BOOL, HWND, LPCSTR)
SetWindowTextAParams = ((1, 'hWnd'), (1, 'lpString'))
#def SetWindowTextW(hWnd, lpString):
# return SetWindowTextW.ctypes_function(hWnd, lpString)
SetWindowTextWPrototype = WINFUNCTYPE(BOOL, HWND, LPWSTR)
SetWindowTextWParams = ((1, 'hWnd'), (1, 'lpString'))
#def RealGetWindowClassA(hwnd, pszType, cchType):
# return RealGetWindowClassA.ctypes_function(hwnd, pszType, cchType)
RealGetWindowClassAPrototype = WINFUNCTYPE(UINT, HWND, LPCSTR, UINT)
RealGetWindowClassAParams = ((1, 'hwnd'), (1, 'pszType'), (1, 'cchType'))
#def RealGetWindowClassW(hwnd, pszType, cchType):
# return RealGetWindowClassW.ctypes_function(hwnd, pszType, cchType)
RealGetWindowClassWPrototype = WINFUNCTYPE(UINT, HWND, LPWSTR, UINT)
RealGetWindowClassWParams = ((1, 'hwnd'), (1, 'pszType'), (1, 'cchType'))
#def GetClassInfoExA(hinst, lpszClass, lpwcx):
# return GetClassInfoExA.ctypes_function(hinst, lpszClass, lpwcx)
GetClassInfoExAPrototype = WINFUNCTYPE(BOOL, HINSTANCE, LPCSTR, LPWNDCLASSEXA)
GetClassInfoExAParams = ((1, 'hinst'), (1, 'lpszClass'), (1, 'lpwcx'))
#def GetClassInfoExW(hinst, lpszClass, lpwcx):
# return GetClassInfoExW.ctypes_function(hinst, lpszClass, lpwcx)
GetClassInfoExWPrototype = WINFUNCTYPE(BOOL, HINSTANCE, LPCWSTR, LPWNDCLASSEXW)
GetClassInfoExWParams = ((1, 'hinst'), (1, 'lpszClass'), (1, 'lpwcx'))
#def GetClassNameA(hWnd, lpClassName, nMaxCount):
# return GetClassNameA.ctypes_function(hWnd, lpClassName, nMaxCount)
GetClassNameAPrototype = WINFUNCTYPE(INT, HWND, LPCSTR, INT)
GetClassNameAParams = ((1, 'hWnd'), (1, 'lpClassName'), (1, 'nMaxCount'))
#def GetClassNameW(hWnd, lpClassName, nMaxCount):
# return GetClassNameW.ctypes_function(hWnd, lpClassName, nMaxCount)
GetClassNameWPrototype = WINFUNCTYPE(INT, HWND, LPWSTR, INT)
GetClassNameWParams = ((1, 'hWnd'), (1, 'lpClassName'), (1, 'nMaxCount'))
#def GetWindowThreadProcessId(hWnd, lpdwProcessId):
# return GetWindowThreadProcessId.ctypes_function(hWnd, lpdwProcessId)
GetWindowThreadProcessIdPrototype = WINFUNCTYPE(DWORD, HWND, LPDWORD)
GetWindowThreadProcessIdParams = ((1, 'hWnd'), (1, 'lpdwProcessId'))
#def ExitProcess(uExitCode):
# return ExitProcess.ctypes_function(uExitCode)
ExitProcessPrototype = WINFUNCTYPE(VOID, UINT)
ExitProcessParams = ((1, 'uExitCode'),)
#def TerminateProcess(hProcess, uExitCode):
# return TerminateProcess.ctypes_function(hProcess, uExitCode)
TerminateProcessPrototype = WINFUNCTYPE(BOOL, HANDLE, UINT)
TerminateProcessParams = ((1, 'hProcess'), (1, 'uExitCode'))
#def GetLastError():
# return GetLastError.ctypes_function()
GetLastErrorPrototype = WINFUNCTYPE(DWORD)
GetLastErrorParams = ()
#def GetCurrentProcess():
# return GetCurrentProcess.ctypes_function()
GetCurrentProcessPrototype = WINFUNCTYPE(HANDLE)
GetCurrentProcessParams = ()
#def CreateFileA(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile):
# return CreateFileA.ctypes_function(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile)
CreateFileAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE)
CreateFileAParams = ((1, 'lpFileName'), (1, 'dwDesiredAccess'), (1, 'dwShareMode'), (1, 'lpSecurityAttributes'), (1, 'dwCreationDisposition'), (1, 'dwFlagsAndAttributes'), (1, 'hTemplateFile'))
#def CreateFileW(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile):
# return CreateFileW.ctypes_function(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile)
CreateFileWPrototype = WINFUNCTYPE(HANDLE, LPCWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE)
CreateFileWParams = ((1, 'lpFileName'), (1, 'dwDesiredAccess'), (1, 'dwShareMode'), (1, 'lpSecurityAttributes'), (1, 'dwCreationDisposition'), (1, 'dwFlagsAndAttributes'), (1, 'hTemplateFile'))
#def NtCreateFile(FileHandle, DesiredAccess, ObjectAttributes, IoStatusBlock, AllocationSize, FileAttributes, ShareAccess, CreateDisposition, CreateOptions, EaBuffer, EaLength):
# return NtCreateFile.ctypes_function(FileHandle, DesiredAccess, ObjectAttributes, IoStatusBlock, AllocationSize, FileAttributes, ShareAccess, CreateDisposition, CreateOptions, EaBuffer, EaLength)
NtCreateFilePrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES, PIO_STATUS_BLOCK, PLARGE_INTEGER, ULONG, ULONG, ULONG, ULONG, PVOID, ULONG)
NtCreateFileParams = ((1, 'FileHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'), (1, 'IoStatusBlock'), (1, 'AllocationSize'), (1, 'FileAttributes'), (1, 'ShareAccess'), (1, 'CreateDisposition'), (1, 'CreateOptions'), (1, 'EaBuffer'), (1, 'EaLength'))
#def LdrLoadDll(PathToFile, Flags, ModuleFileName, ModuleHandle):
# return LdrLoadDll.ctypes_function(PathToFile, Flags, ModuleFileName, ModuleHandle)
LdrLoadDllPrototype = WINFUNCTYPE(NTSTATUS, LPCWSTR, ULONG, PUNICODE_STRING, PHANDLE)
LdrLoadDllParams = ((1, 'PathToFile'), (1, 'Flags'), (1, 'ModuleFileName'), (1, 'ModuleHandle'))
#def NtQuerySystemInformation(SystemInformationClass, SystemInformation, SystemInformationLength, ReturnLength):
# return NtQuerySystemInformation.ctypes_function(SystemInformationClass, SystemInformation, SystemInformationLength, ReturnLength)
NtQuerySystemInformationPrototype = WINFUNCTYPE(NTSTATUS, SYSTEM_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtQuerySystemInformationParams = ((1, 'SystemInformationClass'), (1, 'SystemInformation'), (1, 'SystemInformationLength'), (1, 'ReturnLength'))
#def NtQueryInformationProcess(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength, ReturnLength):
# return NtQueryInformationProcess.ctypes_function(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength, ReturnLength)
NtQueryInformationProcessPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PROCESSINFOCLASS, PVOID, ULONG, PULONG)
NtQueryInformationProcessParams = ((1, 'ProcessHandle'), (1, 'ProcessInformationClass'), (1, 'ProcessInformation'), (1, 'ProcessInformationLength'), (1, 'ReturnLength'))
#def NtSetInformationProcess(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength):
# return NtSetInformationProcess.ctypes_function(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength)
NtSetInformationProcessPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PROCESS_INFORMATION_CLASS, PVOID, ULONG)
NtSetInformationProcessParams = ((1, 'ProcessHandle'), (1, 'ProcessInformationClass'), (1, 'ProcessInformation'), (1, 'ProcessInformationLength'))
#def NtQueryVirtualMemory(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation, MemoryInformationLength, ReturnLength):
# return NtQueryVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation, MemoryInformationLength, ReturnLength)
NtQueryVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PVOID, MEMORY_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T)
NtQueryVirtualMemoryParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'MemoryInformationClass'), (1, 'MemoryInformation'), (1, 'MemoryInformationLength'), (1, 'ReturnLength'))
#def NtQueryVolumeInformationFile(FileHandle, IoStatusBlock, FsInformation, Length, FsInformationClass):
# return NtQueryVolumeInformationFile.ctypes_function(FileHandle, IoStatusBlock, FsInformation, Length, FsInformationClass)
NtQueryVolumeInformationFilePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PIO_STATUS_BLOCK, PVOID, ULONG, FS_INFORMATION_CLASS)
NtQueryVolumeInformationFileParams = ((1, 'FileHandle'), (1, 'IoStatusBlock'), (1, 'FsInformation'), (1, 'Length'), (1, 'FsInformationClass'))
#def NtCreateThreadEx(ThreadHandle, DesiredAccess, ObjectAttributes, ProcessHandle, lpStartAddress, lpParameter, CreateSuspended, dwStackSize, Unknown1, Unknown2, Unknown3):
# return NtCreateThreadEx.ctypes_function(ThreadHandle, DesiredAccess, ObjectAttributes, ProcessHandle, lpStartAddress, lpParameter, CreateSuspended, dwStackSize, Unknown1, Unknown2, Unknown3)
NtCreateThreadExPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, LPVOID, HANDLE, LPTHREAD_START_ROUTINE, LPVOID, BOOL, DWORD, DWORD, DWORD, LPVOID)
NtCreateThreadExParams = ((1, 'ThreadHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'), (1, 'ProcessHandle'), (1, 'lpStartAddress'), (1, 'lpParameter'), (1, 'CreateSuspended'), (1, 'dwStackSize'), (1, 'Unknown1'), (1, 'Unknown2'), (1, 'Unknown3'))
#def NtQueryInformationThread(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength, ReturnLength):
# return NtQueryInformationThread.ctypes_function(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength, ReturnLength)
NtQueryInformationThreadPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtQueryInformationThreadParams = ((1, 'ThreadHandle'), (1, 'ThreadInformationClass'), (1, 'ThreadInformation'), (1, 'ThreadInformationLength'), (1, 'ReturnLength'))
#def GetExitCodeThread(hThread, lpExitCode):
# return GetExitCodeThread.ctypes_function(hThread, lpExitCode)
GetExitCodeThreadPrototype = WINFUNCTYPE(BOOL, HANDLE, LPDWORD)
GetExitCodeThreadParams = ((1, 'hThread'), (1, 'lpExitCode'))
#def GetExitCodeProcess(hProcess, lpExitCode):
# return GetExitCodeProcess.ctypes_function(hProcess, lpExitCode)
GetExitCodeProcessPrototype = WINFUNCTYPE(BOOL, HANDLE, LPDWORD)
GetExitCodeProcessParams = ((1, 'hProcess'), (1, 'lpExitCode'))
#def SetPriorityClass(hProcess, dwPriorityClass):
# return SetPriorityClass.ctypes_function(hProcess, dwPriorityClass)
SetPriorityClassPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD)
SetPriorityClassParams = ((1, 'hProcess'), (1, 'dwPriorityClass'))
#def GetPriorityClass(hProcess):
# return GetPriorityClass.ctypes_function(hProcess)
GetPriorityClassPrototype = WINFUNCTYPE(DWORD, HANDLE)
GetPriorityClassParams = ((1, 'hProcess'),)
#def VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect):
# return VirtualAlloc.ctypes_function(lpAddress, dwSize, flAllocationType, flProtect)
VirtualAllocPrototype = WINFUNCTYPE(LPVOID, LPVOID, SIZE_T, DWORD, DWORD)
VirtualAllocParams = ((1, 'lpAddress'), (1, 'dwSize'), (1, 'flAllocationType'), (1, 'flProtect'))
#def VirtualAllocEx(hProcess, lpAddress, dwSize, flAllocationType, flProtect):
# return VirtualAllocEx.ctypes_function(hProcess, lpAddress, dwSize, flAllocationType, flProtect)
VirtualAllocExPrototype = WINFUNCTYPE(LPVOID, HANDLE, LPVOID, SIZE_T, DWORD, DWORD)
VirtualAllocExParams = ((1, 'hProcess'), (1, 'lpAddress'), (1, 'dwSize'), (1, 'flAllocationType'), (1, 'flProtect'))
#def NtAllocateVirtualMemory(ProcessHandle, BaseAddress, ZeroBits, RegionSize, AllocationType, Protect):
# return NtAllocateVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, ZeroBits, RegionSize, AllocationType, Protect)
NtAllocateVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, POINTER(PVOID), ULONG_PTR, PSIZE_T, ULONG, ULONG)
NtAllocateVirtualMemoryParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'ZeroBits'), (1, 'RegionSize'), (1, 'AllocationType'), (1, 'Protect'))
#def NtProtectVirtualMemory(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection):
# return NtProtectVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection)
NtProtectVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, POINTER(PVOID), PULONG, ULONG, PULONG)
NtProtectVirtualMemoryParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'NumberOfBytesToProtect'), (1, 'NewAccessProtection'), (1, 'OldAccessProtection'))
#def VirtualFree(lpAddress, dwSize, dwFreeType):
# return VirtualFree.ctypes_function(lpAddress, dwSize, dwFreeType)
VirtualFreePrototype = WINFUNCTYPE(BOOL, LPVOID, SIZE_T, DWORD)
VirtualFreeParams = ((1, 'lpAddress'), (1, 'dwSize'), (1, 'dwFreeType'))
#def VirtualFreeEx(hProcess, lpAddress, dwSize, dwFreeType):
# return VirtualFreeEx.ctypes_function(hProcess, lpAddress, dwSize, dwFreeType)
VirtualFreeExPrototype = WINFUNCTYPE(BOOL, HANDLE, LPVOID, SIZE_T, DWORD)
VirtualFreeExParams = ((1, 'hProcess'), (1, 'lpAddress'), (1, 'dwSize'), (1, 'dwFreeType'))
#def NtFreeVirtualMemory(ProcessHandle, BaseAddress, RegionSize, FreeType):
# return NtFreeVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, RegionSize, FreeType)
NtFreeVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, POINTER(PVOID), PSIZE_T, ULONG)
NtFreeVirtualMemoryParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'RegionSize'), (1, 'FreeType'))
#def VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect):
# return VirtualProtect.ctypes_function(lpAddress, dwSize, flNewProtect, lpflOldProtect)
VirtualProtectPrototype = WINFUNCTYPE(BOOL, LPVOID, SIZE_T, DWORD, PDWORD)
VirtualProtectParams = ((1, 'lpAddress'), (1, 'dwSize'), (1, 'flNewProtect'), (1, 'lpflOldProtect'))
#def VirtualProtectEx(hProcess, lpAddress, dwSize, flNewProtect, lpflOldProtect):
# return VirtualProtectEx.ctypes_function(hProcess, lpAddress, dwSize, flNewProtect, lpflOldProtect)
VirtualProtectExPrototype = WINFUNCTYPE(BOOL, HANDLE, LPVOID, SIZE_T, DWORD, PDWORD)
VirtualProtectExParams = ((1, 'hProcess'), (1, 'lpAddress'), (1, 'dwSize'), (1, 'flNewProtect'), (1, 'lpflOldProtect'))
#def VirtualQuery(lpAddress, lpBuffer, dwLength):
# return VirtualQuery.ctypes_function(lpAddress, lpBuffer, dwLength)
VirtualQueryPrototype = WINFUNCTYPE(DWORD, LPCVOID, PMEMORY_BASIC_INFORMATION, DWORD)
VirtualQueryParams = ((1, 'lpAddress'), (1, 'lpBuffer'), (1, 'dwLength'))
#def VirtualQueryEx(hProcess, lpAddress, lpBuffer, dwLength):
# return VirtualQueryEx.ctypes_function(hProcess, lpAddress, lpBuffer, dwLength)
VirtualQueryExPrototype = WINFUNCTYPE(SIZE_T, HANDLE, LPCVOID, PMEMORY_BASIC_INFORMATION, SIZE_T)
VirtualQueryExParams = ((1, 'hProcess'), (1, 'lpAddress'), (1, 'lpBuffer'), (1, 'dwLength'))
#def QueryWorkingSet(hProcess, pv, cb):
# return QueryWorkingSet.ctypes_function(hProcess, pv, cb)
QueryWorkingSetPrototype = WINFUNCTYPE(BOOL, HANDLE, PVOID, DWORD)
QueryWorkingSetParams = ((1, 'hProcess'), (1, 'pv'), (1, 'cb'))
#def QueryWorkingSetEx(hProcess, pv, cb):
# return QueryWorkingSetEx.ctypes_function(hProcess, pv, cb)
QueryWorkingSetExPrototype = WINFUNCTYPE(BOOL, HANDLE, PVOID, DWORD)
QueryWorkingSetExParams = ((1, 'hProcess'), (1, 'pv'), (1, 'cb'))
#def GetModuleFileNameA(hModule, lpFilename, nSize):
# return GetModuleFileNameA.ctypes_function(hModule, lpFilename, nSize)
GetModuleFileNameAPrototype = WINFUNCTYPE(DWORD, HMODULE, LPSTR, DWORD)
GetModuleFileNameAParams = ((1, 'hModule'), (1, 'lpFilename'), (1, 'nSize'))
#def GetModuleFileNameW(hModule, lpFilename, nSize):
# return GetModuleFileNameW.ctypes_function(hModule, lpFilename, nSize)
GetModuleFileNameWPrototype = WINFUNCTYPE(DWORD, HMODULE, LPWSTR, DWORD)
GetModuleFileNameWParams = ((1, 'hModule'), (1, 'lpFilename'), (1, 'nSize'))
#def CreateThread(lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, lpThreadId):
# return CreateThread.ctypes_function(lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, lpThreadId)
CreateThreadPrototype = WINFUNCTYPE(HANDLE, LPSECURITY_ATTRIBUTES, SIZE_T, LPTHREAD_START_ROUTINE, LPVOID, DWORD, LPDWORD)
CreateThreadParams = ((1, 'lpThreadAttributes'), (1, 'dwStackSize'), (1, 'lpStartAddress'), (1, 'lpParameter'), (1, 'dwCreationFlags'), (1, 'lpThreadId'))
#def CreateRemoteThread(hProcess, lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, lpThreadId):
# return CreateRemoteThread.ctypes_function(hProcess, lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, lpThreadId)
CreateRemoteThreadPrototype = WINFUNCTYPE(HANDLE, HANDLE, LPSECURITY_ATTRIBUTES, SIZE_T, LPTHREAD_START_ROUTINE, LPVOID, DWORD, LPDWORD)
CreateRemoteThreadParams = ((1, 'hProcess'), (1, 'lpThreadAttributes'), (1, 'dwStackSize'), (1, 'lpStartAddress'), (1, 'lpParameter'), (1, 'dwCreationFlags'), (1, 'lpThreadId'))
#def VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect):
# return VirtualProtect.ctypes_function(lpAddress, dwSize, flNewProtect, lpflOldProtect)
VirtualProtectPrototype = WINFUNCTYPE(BOOL, LPVOID, SIZE_T, DWORD, PDWORD)
VirtualProtectParams = ((1, 'lpAddress'), (1, 'dwSize'), (1, 'flNewProtect'), (1, 'lpflOldProtect'))
#def CreateProcessA(lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation):
# return CreateProcessA.ctypes_function(lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
CreateProcessAPrototype = WINFUNCTYPE(BOOL, LPCSTR, LPSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCSTR, LPSTARTUPINFOA, LPPROCESS_INFORMATION)
CreateProcessAParams = ((1, 'lpApplicationName'), (1, 'lpCommandLine'), (1, 'lpProcessAttributes'), (1, 'lpThreadAttributes'), (1, 'bInheritHandles'), (1, 'dwCreationFlags'), (1, 'lpEnvironment'), (1, 'lpCurrentDirectory'), (1, 'lpStartupInfo'), (1, 'lpProcessInformation'))
#def CreateProcessW(lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation):
# return CreateProcessW.ctypes_function(lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
CreateProcessWPrototype = WINFUNCTYPE(BOOL, LPCWSTR, LPWSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFOW, LPPROCESS_INFORMATION)
CreateProcessWParams = ((1, 'lpApplicationName'), (1, 'lpCommandLine'), (1, 'lpProcessAttributes'), (1, 'lpThreadAttributes'), (1, 'bInheritHandles'), (1, 'dwCreationFlags'), (1, 'lpEnvironment'), (1, 'lpCurrentDirectory'), (1, 'lpStartupInfo'), (1, 'lpProcessInformation'))
#def CreateProcessAsUserA(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation):
# return CreateProcessAsUserA.ctypes_function(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
CreateProcessAsUserAPrototype = WINFUNCTYPE(BOOL, HANDLE, LPSTR, LPSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPSTR, LPSTARTUPINFOA, LPPROCESS_INFORMATION)
CreateProcessAsUserAParams = ((1, 'hToken'), (1, 'lpApplicationName'), (1, 'lpCommandLine'), (1, 'lpProcessAttributes'), (1, 'lpThreadAttributes'), (1, 'bInheritHandles'), (1, 'dwCreationFlags'), (1, 'lpEnvironment'), (1, 'lpCurrentDirectory'), (1, 'lpStartupInfo'), (1, 'lpProcessInformation'))
#def CreateProcessAsUserW(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation):
# return CreateProcessAsUserW.ctypes_function(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bInheritHandles, dwCreationFlags, lpEnvironment, lpCurrentDirectory, lpStartupInfo, lpProcessInformation)
CreateProcessAsUserWPrototype = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, LPWSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPWSTR, LPSTARTUPINFOW, LPPROCESS_INFORMATION)
CreateProcessAsUserWParams = ((1, 'hToken'), (1, 'lpApplicationName'), (1, 'lpCommandLine'), (1, 'lpProcessAttributes'), (1, 'lpThreadAttributes'), (1, 'bInheritHandles'), (1, 'dwCreationFlags'), (1, 'lpEnvironment'), (1, 'lpCurrentDirectory'), (1, 'lpStartupInfo'), (1, 'lpProcessInformation'))
#def GetThreadContext(hThread, lpContext):
# return GetThreadContext.ctypes_function(hThread, lpContext)
GetThreadContextPrototype = WINFUNCTYPE(BOOL, HANDLE, LPCONTEXT)
GetThreadContextParams = ((1, 'hThread'), (1, 'lpContext'))
#def NtGetContextThread(hThread, lpContext):
# return NtGetContextThread.ctypes_function(hThread, lpContext)
NtGetContextThreadPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, LPCONTEXT)
NtGetContextThreadParams = ((1, 'hThread'), (1, 'lpContext'))
#def SetThreadContext(hThread, lpContext):
# return SetThreadContext.ctypes_function(hThread, lpContext)
SetThreadContextPrototype = WINFUNCTYPE(BOOL, HANDLE, LPCONTEXT)
SetThreadContextParams = ((1, 'hThread'), (1, 'lpContext'))
#def NtSetContextThread(hThread, lpContext):
# return NtSetContextThread.ctypes_function(hThread, lpContext)
NtSetContextThreadPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, LPCONTEXT)
NtSetContextThreadParams = ((1, 'hThread'), (1, 'lpContext'))
#def OpenThread(dwDesiredAccess, bInheritHandle, dwThreadId):
# return OpenThread.ctypes_function(dwDesiredAccess, bInheritHandle, dwThreadId)
OpenThreadPrototype = WINFUNCTYPE(HANDLE, DWORD, BOOL, DWORD)
OpenThreadParams = ((1, 'dwDesiredAccess'), (1, 'bInheritHandle'), (1, 'dwThreadId'))
#def OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId):
# return OpenProcess.ctypes_function(dwDesiredAccess, bInheritHandle, dwProcessId)
OpenProcessPrototype = WINFUNCTYPE(HANDLE, DWORD, BOOL, DWORD)
OpenProcessParams = ((1, 'dwDesiredAccess'), (1, 'bInheritHandle'), (1, 'dwProcessId'))
#def CloseHandle(hObject):
# return CloseHandle.ctypes_function(hObject)
CloseHandlePrototype = WINFUNCTYPE(BOOL, HANDLE)
CloseHandleParams = ((1, 'hObject'),)
#def ReadProcessMemory(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead):
# return ReadProcessMemory.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead)
ReadProcessMemoryPrototype = WINFUNCTYPE(BOOL, HANDLE, LPCVOID, LPVOID, SIZE_T, POINTER(SIZE_T))
ReadProcessMemoryParams = ((1, 'hProcess'), (1, 'lpBaseAddress'), (1, 'lpBuffer'), (1, 'nSize'), (1, 'lpNumberOfBytesRead'))
#def NtWow64ReadVirtualMemory64(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead):
# return NtWow64ReadVirtualMemory64.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead)
NtWow64ReadVirtualMemory64Prototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG64, LPVOID, ULONG64, PULONG64)
NtWow64ReadVirtualMemory64Params = ((1, 'hProcess'), (1, 'lpBaseAddress'), (1, 'lpBuffer'), (1, 'nSize'), (1, 'lpNumberOfBytesRead'))
#def NtReadVirtualMemory(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead):
# return NtReadVirtualMemory.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead)
NtReadVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PVOID, LPVOID, ULONG, PULONG)
NtReadVirtualMemoryParams = ((1, 'hProcess'), (1, 'lpBaseAddress'), (1, 'lpBuffer'), (1, 'nSize'), (1, 'lpNumberOfBytesRead'))
#def WriteProcessMemory(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten):
# return WriteProcessMemory.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten)
WriteProcessMemoryPrototype = WINFUNCTYPE(BOOL, HANDLE, LPVOID, LPCVOID, SIZE_T, POINTER(SIZE_T))
WriteProcessMemoryParams = ((1, 'hProcess'), (1, 'lpBaseAddress'), (1, 'lpBuffer'), (1, 'nSize'), (1, 'lpNumberOfBytesWritten'))
#def NtWow64WriteVirtualMemory64(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten):
# return NtWow64WriteVirtualMemory64.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten)
NtWow64WriteVirtualMemory64Prototype = WINFUNCTYPE(NTSTATUS, HANDLE, ULONG64, LPVOID, ULONG64, PULONG64)
NtWow64WriteVirtualMemory64Params = ((1, 'hProcess'), (1, 'lpBaseAddress'), (1, 'lpBuffer'), (1, 'nSize'), (1, 'lpNumberOfBytesWritten'))
#def NtWriteVirtualMemory(ProcessHandle, BaseAddress, Buffer, NumberOfBytesToWrite, NumberOfBytesWritten):
# return NtWriteVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, Buffer, NumberOfBytesToWrite, NumberOfBytesWritten)
NtWriteVirtualMemoryPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PVOID, PVOID, ULONG, PULONG)
NtWriteVirtualMemoryParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'Buffer'), (1, 'NumberOfBytesToWrite'), (1, 'NumberOfBytesWritten'))
#def CreateToolhelp32Snapshot(dwFlags, th32ProcessID):
# return CreateToolhelp32Snapshot.ctypes_function(dwFlags, th32ProcessID)
CreateToolhelp32SnapshotPrototype = WINFUNCTYPE(HANDLE, DWORD, DWORD)
CreateToolhelp32SnapshotParams = ((1, 'dwFlags'), (1, 'th32ProcessID'))
#def Thread32First(hSnapshot, lpte):
# return Thread32First.ctypes_function(hSnapshot, lpte)
Thread32FirstPrototype = WINFUNCTYPE(BOOL, HANDLE, LPTHREADENTRY32)
Thread32FirstParams = ((1, 'hSnapshot'), (1, 'lpte'))
#def Thread32Next(hSnapshot, lpte):
# return Thread32Next.ctypes_function(hSnapshot, lpte)
Thread32NextPrototype = WINFUNCTYPE(BOOL, HANDLE, LPTHREADENTRY32)
Thread32NextParams = ((1, 'hSnapshot'), (1, 'lpte'))
#def Process32First(hSnapshot, lppe):
# return Process32First.ctypes_function(hSnapshot, lppe)
Process32FirstPrototype = WINFUNCTYPE(BOOL, HANDLE, LPPROCESSENTRY32)
Process32FirstParams = ((1, 'hSnapshot'), (1, 'lppe'))
#def Process32Next(hSnapshot, lppe):
# return Process32Next.ctypes_function(hSnapshot, lppe)
Process32NextPrototype = WINFUNCTYPE(BOOL, HANDLE, LPPROCESSENTRY32)
Process32NextParams = ((1, 'hSnapshot'), (1, 'lppe'))
#def Process32FirstW(hSnapshot, lppe):
# return Process32FirstW.ctypes_function(hSnapshot, lppe)
Process32FirstWPrototype = WINFUNCTYPE(BOOL, HANDLE, LPPROCESSENTRY32W)
Process32FirstWParams = ((1, 'hSnapshot'), (1, 'lppe'))
#def Process32NextW(hSnapshot, lppe):
# return Process32NextW.ctypes_function(hSnapshot, lppe)
Process32NextWPrototype = WINFUNCTYPE(BOOL, HANDLE, LPPROCESSENTRY32W)
Process32NextWParams = ((1, 'hSnapshot'), (1, 'lppe'))
#def GetProcAddress(hModule, lpProcName):
# return GetProcAddress.ctypes_function(hModule, lpProcName)
GetProcAddressPrototype = WINFUNCTYPE(FARPROC, HMODULE, LPCSTR)
GetProcAddressParams = ((1, 'hModule'), (1, 'lpProcName'))
#def LoadLibraryA(lpFileName):
# return LoadLibraryA.ctypes_function(lpFileName)
LoadLibraryAPrototype = WINFUNCTYPE(HMODULE, LPCSTR)
LoadLibraryAParams = ((1, 'lpFileName'),)
#def LoadLibraryW(lpFileName):
# return LoadLibraryW.ctypes_function(lpFileName)
LoadLibraryWPrototype = WINFUNCTYPE(HMODULE, LPCWSTR)
LoadLibraryWParams = ((1, 'lpFileName'),)
#def OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle):
# return OpenProcessToken.ctypes_function(ProcessHandle, DesiredAccess, TokenHandle)
OpenProcessTokenPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, PHANDLE)
OpenProcessTokenParams = ((1, 'ProcessHandle'), (1, 'DesiredAccess'), (1, 'TokenHandle'))
#def DuplicateToken(ExistingTokenHandle, ImpersonationLevel, DuplicateTokenHandle):
# return DuplicateToken.ctypes_function(ExistingTokenHandle, ImpersonationLevel, DuplicateTokenHandle)
DuplicateTokenPrototype = WINFUNCTYPE(BOOL, HANDLE, SECURITY_IMPERSONATION_LEVEL, PHANDLE)
DuplicateTokenParams = ((1, 'ExistingTokenHandle'), (1, 'ImpersonationLevel'), (1, 'DuplicateTokenHandle'))
#def DuplicateTokenEx(hExistingToken, dwDesiredAccess, lpTokenAttributes, ImpersonationLevel, TokenType, phNewToken):
# return DuplicateTokenEx.ctypes_function(hExistingToken, dwDesiredAccess, lpTokenAttributes, ImpersonationLevel, TokenType, phNewToken)
DuplicateTokenExPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, LPSECURITY_ATTRIBUTES, SECURITY_IMPERSONATION_LEVEL, TOKEN_TYPE, PHANDLE)
DuplicateTokenExParams = ((1, 'hExistingToken'), (1, 'dwDesiredAccess'), (1, 'lpTokenAttributes'), (1, 'ImpersonationLevel'), (1, 'TokenType'), (1, 'phNewToken'))
#def OpenThreadToken(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle):
# return OpenThreadToken.ctypes_function(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle)
OpenThreadTokenPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, BOOL, PHANDLE)
OpenThreadTokenParams = ((1, 'ThreadHandle'), (1, 'DesiredAccess'), (1, 'OpenAsSelf'), (1, 'TokenHandle'))
#def SetThreadToken(Thread, Token):
# return SetThreadToken.ctypes_function(Thread, Token)
SetThreadTokenPrototype = WINFUNCTYPE(BOOL, PHANDLE, HANDLE)
SetThreadTokenParams = ((1, 'Thread'), (1, 'Token'))
#def LookupPrivilegeValueA(lpSystemName, lpName, lpLuid):
# return LookupPrivilegeValueA.ctypes_function(lpSystemName, lpName, lpLuid)
LookupPrivilegeValueAPrototype = WINFUNCTYPE(BOOL, LPCSTR, LPCSTR, PLUID)
LookupPrivilegeValueAParams = ((1, 'lpSystemName'), (1, 'lpName'), (1, 'lpLuid'))
#def LookupPrivilegeValueW(lpSystemName, lpName, lpLuid):
# return LookupPrivilegeValueW.ctypes_function(lpSystemName, lpName, lpLuid)
LookupPrivilegeValueWPrototype = WINFUNCTYPE(BOOL, LPCWSTR, LPCWSTR, PLUID)
LookupPrivilegeValueWParams = ((1, 'lpSystemName'), (1, 'lpName'), (1, 'lpLuid'))
#def LookupPrivilegeNameA(lpSystemName, lpLuid, lpName, cchName):
# return LookupPrivilegeNameA.ctypes_function(lpSystemName, lpLuid, lpName, cchName)
LookupPrivilegeNameAPrototype = WINFUNCTYPE(BOOL, LPCSTR, PLUID, LPCSTR, LPDWORD)
LookupPrivilegeNameAParams = ((1, 'lpSystemName'), (1, 'lpLuid'), (1, 'lpName'), (1, 'cchName'))
#def LookupPrivilegeNameW(lpSystemName, lpLuid, lpName, cchName):
# return LookupPrivilegeNameW.ctypes_function(lpSystemName, lpLuid, lpName, cchName)
LookupPrivilegeNameWPrototype = WINFUNCTYPE(BOOL, LPCWSTR, PLUID, LPCWSTR, LPDWORD)
LookupPrivilegeNameWParams = ((1, 'lpSystemName'), (1, 'lpLuid'), (1, 'lpName'), (1, 'cchName'))
#def AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, BufferLength, PreviousState, ReturnLength):
# return AdjustTokenPrivileges.ctypes_function(TokenHandle, DisableAllPrivileges, NewState, BufferLength, PreviousState, ReturnLength)
AdjustTokenPrivilegesPrototype = WINFUNCTYPE(BOOL, HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD)
AdjustTokenPrivilegesParams = ((1, 'TokenHandle'), (1, 'DisableAllPrivileges'), (1, 'NewState'), (1, 'BufferLength'), (1, 'PreviousState'), (1, 'ReturnLength'))
#def FindResourceA(hModule, lpName, lpType):
# return FindResourceA.ctypes_function(hModule, lpName, lpType)
FindResourceAPrototype = WINFUNCTYPE(HRSRC, HMODULE, LPCSTR, LPCSTR)
FindResourceAParams = ((1, 'hModule'), (1, 'lpName'), (1, 'lpType'))
#def FindResourceW(hModule, lpName, lpType):
# return FindResourceW.ctypes_function(hModule, lpName, lpType)
FindResourceWPrototype = WINFUNCTYPE(HRSRC, HMODULE, LPCWSTR, LPCWSTR)
FindResourceWParams = ((1, 'hModule'), (1, 'lpName'), (1, 'lpType'))
#def SizeofResource(hModule, hResInfo):
# return SizeofResource.ctypes_function(hModule, hResInfo)
SizeofResourcePrototype = WINFUNCTYPE(DWORD, HMODULE, HRSRC)
SizeofResourceParams = ((1, 'hModule'), (1, 'hResInfo'))
#def LoadResource(hModule, hResInfo):
# return LoadResource.ctypes_function(hModule, hResInfo)
LoadResourcePrototype = WINFUNCTYPE(HGLOBAL, HMODULE, HRSRC)
LoadResourceParams = ((1, 'hModule'), (1, 'hResInfo'))
#def LockResource(hResData):
# return LockResource.ctypes_function(hResData)
LockResourcePrototype = WINFUNCTYPE(LPVOID, HGLOBAL)
LockResourceParams = ((1, 'hResData'),)
#def GetVersionExA(lpVersionInformation):
# return GetVersionExA.ctypes_function(lpVersionInformation)
GetVersionExAPrototype = WINFUNCTYPE(BOOL, LPOSVERSIONINFOA)
GetVersionExAParams = ((1, 'lpVersionInformation'),)
#def GetVersionExW(lpVersionInformation):
# return GetVersionExW.ctypes_function(lpVersionInformation)
GetVersionExWPrototype = WINFUNCTYPE(BOOL, LPOSVERSIONINFOW)
GetVersionExWParams = ((1, 'lpVersionInformation'),)
#def GetVersion():
# return GetVersion.ctypes_function()
GetVersionPrototype = WINFUNCTYPE(DWORD)
GetVersionParams = ()
#def GetCurrentThread():
# return GetCurrentThread.ctypes_function()
GetCurrentThreadPrototype = WINFUNCTYPE(HANDLE)
GetCurrentThreadParams = ()
#def GetCurrentThreadId():
# return GetCurrentThreadId.ctypes_function()
GetCurrentThreadIdPrototype = WINFUNCTYPE(DWORD)
GetCurrentThreadIdParams = ()
#def GetCurrentProcessorNumber():
# return GetCurrentProcessorNumber.ctypes_function()
GetCurrentProcessorNumberPrototype = WINFUNCTYPE(DWORD)
GetCurrentProcessorNumberParams = ()
#def AllocConsole():
# return AllocConsole.ctypes_function()
AllocConsolePrototype = WINFUNCTYPE(BOOL)
AllocConsoleParams = ()
#def FreeConsole():
# return FreeConsole.ctypes_function()
FreeConsolePrototype = WINFUNCTYPE(BOOL)
FreeConsoleParams = ()
#def GetStdHandle(nStdHandle):
# return GetStdHandle.ctypes_function(nStdHandle)
GetStdHandlePrototype = WINFUNCTYPE(HANDLE, DWORD)
GetStdHandleParams = ((1, 'nStdHandle'),)
#def SetStdHandle(nStdHandle, hHandle):
# return SetStdHandle.ctypes_function(nStdHandle, hHandle)
SetStdHandlePrototype = WINFUNCTYPE(BOOL, DWORD, HANDLE)
SetStdHandleParams = ((1, 'nStdHandle'), (1, 'hHandle'))
#def SetThreadAffinityMask(hThread, dwThreadAffinityMask):
# return SetThreadAffinityMask.ctypes_function(hThread, dwThreadAffinityMask)
SetThreadAffinityMaskPrototype = WINFUNCTYPE(DWORD, HANDLE, DWORD)
SetThreadAffinityMaskParams = ((1, 'hThread'), (1, 'dwThreadAffinityMask'))
#def ReadFile(hFile, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, lpOverlapped):
# return ReadFile.ctypes_function(hFile, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, lpOverlapped)
ReadFilePrototype = WINFUNCTYPE(BOOL, HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED)
ReadFileParams = ((1, 'hFile'), (1, 'lpBuffer'), (1, 'nNumberOfBytesToRead'), (1, 'lpNumberOfBytesRead'), (1, 'lpOverlapped'))
#def WriteFile(hFile, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten, lpOverlapped):
# return WriteFile.ctypes_function(hFile, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten, lpOverlapped)
WriteFilePrototype = WINFUNCTYPE(BOOL, HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED)
WriteFileParams = ((1, 'hFile'), (1, 'lpBuffer'), (1, 'nNumberOfBytesToWrite'), (1, 'lpNumberOfBytesWritten'), (1, 'lpOverlapped'))
#def GetExtendedTcpTable(pTcpTable, pdwSize, bOrder, ulAf, TableClass, Reserved):
# return GetExtendedTcpTable.ctypes_function(pTcpTable, pdwSize, bOrder, ulAf, TableClass, Reserved)
GetExtendedTcpTablePrototype = WINFUNCTYPE(DWORD, PVOID, PDWORD, BOOL, ULONG, TCP_TABLE_CLASS, ULONG)
GetExtendedTcpTableParams = ((1, 'pTcpTable'), (1, 'pdwSize'), (1, 'bOrder'), (1, 'ulAf'), (1, 'TableClass'), (1, 'Reserved'))
#def GetExtendedUdpTable(pUdpTable, pdwSize, bOrder, ulAf, TableClass, Reserved):
# return GetExtendedUdpTable.ctypes_function(pUdpTable, pdwSize, bOrder, ulAf, TableClass, Reserved)
GetExtendedUdpTablePrototype = WINFUNCTYPE(DWORD, PVOID, PDWORD, BOOL, ULONG, UDP_TABLE_CLASS, ULONG)
GetExtendedUdpTableParams = ((1, 'pUdpTable'), (1, 'pdwSize'), (1, 'bOrder'), (1, 'ulAf'), (1, 'TableClass'), (1, 'Reserved'))
#def SetTcpEntry(pTcpRow):
# return SetTcpEntry.ctypes_function(pTcpRow)
SetTcpEntryPrototype = WINFUNCTYPE(DWORD, PMIB_TCPROW)
SetTcpEntryParams = ((1, 'pTcpRow'),)
#def AddVectoredContinueHandler(FirstHandler, VectoredHandler):
# return AddVectoredContinueHandler.ctypes_function(FirstHandler, VectoredHandler)
AddVectoredContinueHandlerPrototype = WINFUNCTYPE(PVOID, ULONG, PVECTORED_EXCEPTION_HANDLER)
AddVectoredContinueHandlerParams = ((1, 'FirstHandler'), (1, 'VectoredHandler'))
#def AddVectoredExceptionHandler(FirstHandler, VectoredHandler):
# return AddVectoredExceptionHandler.ctypes_function(FirstHandler, VectoredHandler)
AddVectoredExceptionHandlerPrototype = WINFUNCTYPE(PVOID, ULONG, PVECTORED_EXCEPTION_HANDLER)
AddVectoredExceptionHandlerParams = ((1, 'FirstHandler'), (1, 'VectoredHandler'))
#def TerminateThread(hThread, dwExitCode):
# return TerminateThread.ctypes_function(hThread, dwExitCode)
TerminateThreadPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD)
TerminateThreadParams = ((1, 'hThread'), (1, 'dwExitCode'))
#def ExitThread(dwExitCode):
# return ExitThread.ctypes_function(dwExitCode)
ExitThreadPrototype = WINFUNCTYPE(VOID, DWORD)
ExitThreadParams = ((1, 'dwExitCode'),)
#def RemoveVectoredExceptionHandler(Handler):
# return RemoveVectoredExceptionHandler.ctypes_function(Handler)
RemoveVectoredExceptionHandlerPrototype = WINFUNCTYPE(ULONG, PVOID)
RemoveVectoredExceptionHandlerParams = ((1, 'Handler'),)
#def ResumeThread(hThread):
# return ResumeThread.ctypes_function(hThread)
ResumeThreadPrototype = WINFUNCTYPE(DWORD, HANDLE)
ResumeThreadParams = ((1, 'hThread'),)
#def SuspendThread(hThread):
# return SuspendThread.ctypes_function(hThread)
SuspendThreadPrototype = WINFUNCTYPE(DWORD, HANDLE)
SuspendThreadParams = ((1, 'hThread'),)
#def WaitForSingleObject(hHandle, dwMilliseconds):
# return WaitForSingleObject.ctypes_function(hHandle, dwMilliseconds)
WaitForSingleObjectPrototype = WINFUNCTYPE(DWORD, HANDLE, DWORD)
WaitForSingleObjectParams = ((1, 'hHandle'), (1, 'dwMilliseconds'))
#def GetThreadId(Thread):
# return GetThreadId.ctypes_function(Thread)
GetThreadIdPrototype = WINFUNCTYPE(DWORD, HANDLE)
GetThreadIdParams = ((1, 'Thread'),)
#def LoadLibraryExA(lpFileName, hFile, dwFlags):
# return LoadLibraryExA.ctypes_function(lpFileName, hFile, dwFlags)
LoadLibraryExAPrototype = WINFUNCTYPE(HMODULE, LPCSTR, HANDLE, DWORD)
LoadLibraryExAParams = ((1, 'lpFileName'), (1, 'hFile'), (1, 'dwFlags'))
#def LoadLibraryExW(lpFileName, hFile, dwFlags):
# return LoadLibraryExW.ctypes_function(lpFileName, hFile, dwFlags)
LoadLibraryExWPrototype = WINFUNCTYPE(HMODULE, LPCWSTR, HANDLE, DWORD)
LoadLibraryExWParams = ((1, 'lpFileName'), (1, 'hFile'), (1, 'dwFlags'))
#def SymInitialize(hProcess, UserSearchPath, fInvadeProcess):
# return SymInitialize.ctypes_function(hProcess, UserSearchPath, fInvadeProcess)
SymInitializePrototype = WINFUNCTYPE(BOOL, HANDLE, LPCSTR, BOOL)
SymInitializeParams = ((1, 'hProcess'), (1, 'UserSearchPath'), (1, 'fInvadeProcess'))
#def SymFromName(hProcess, Name, Symbol):
# return SymFromName.ctypes_function(hProcess, Name, Symbol)
SymFromNamePrototype = WINFUNCTYPE(BOOL, HANDLE, LPCSTR, PSYMBOL_INFO)
SymFromNameParams = ((1, 'hProcess'), (1, 'Name'), (1, 'Symbol'))
#def SymLoadModuleEx(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
# return SymLoadModuleEx.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
SymLoadModuleExPrototype = WINFUNCTYPE(DWORD64, HANDLE, HANDLE, LPCSTR, LPCSTR, DWORD64, DWORD, PMODLOAD_DATA, DWORD)
SymLoadModuleExParams = ((1, 'hProcess'), (1, 'hFile'), (1, 'ImageName'), (1, 'ModuleName'), (1, 'BaseOfDll'), (1, 'DllSize'), (1, 'Data'), (1, 'Flags'))
#def SymSetOptions(SymOptions):
# return SymSetOptions.ctypes_function(SymOptions)
SymSetOptionsPrototype = WINFUNCTYPE(DWORD, DWORD)
SymSetOptionsParams = ((1, 'SymOptions'),)
#def SymGetTypeInfo(hProcess, ModBase, TypeId, GetType, pInfo):
# return SymGetTypeInfo.ctypes_function(hProcess, ModBase, TypeId, GetType, pInfo)
SymGetTypeInfoPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD64, ULONG, IMAGEHLP_SYMBOL_TYPE_INFO, PVOID)
SymGetTypeInfoParams = ((1, 'hProcess'), (1, 'ModBase'), (1, 'TypeId'), (1, 'GetType'), (1, 'pInfo'))
#def DeviceIoControl(hDevice, dwIoControlCode, lpInBuffer, nInBufferSize, lpOutBuffer, nOutBufferSize, lpBytesReturned, lpOverlapped):
# return DeviceIoControl.ctypes_function(hDevice, dwIoControlCode, lpInBuffer, nInBufferSize, lpOutBuffer, nOutBufferSize, lpBytesReturned, lpOverlapped)
DeviceIoControlPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD, LPOVERLAPPED)
DeviceIoControlParams = ((1, 'hDevice'), (1, 'dwIoControlCode'), (1, 'lpInBuffer'), (1, 'nInBufferSize'), (1, 'lpOutBuffer'), (1, 'nOutBufferSize'), (1, 'lpBytesReturned'), (1, 'lpOverlapped'))
#def GetTokenInformation(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength, ReturnLength):
# return GetTokenInformation.ctypes_function(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength, ReturnLength)
GetTokenInformationPrototype = WINFUNCTYPE(BOOL, HANDLE, TOKEN_INFORMATION_CLASS, LPVOID, DWORD, PDWORD)
GetTokenInformationParams = ((1, 'TokenHandle'), (1, 'TokenInformationClass'), (1, 'TokenInformation'), (1, 'TokenInformationLength'), (1, 'ReturnLength'))
#def RegOpenKeyExA(hKey, lpSubKey, ulOptions, samDesired, phkResult):
# return RegOpenKeyExA.ctypes_function(hKey, lpSubKey, ulOptions, samDesired, phkResult)
RegOpenKeyExAPrototype = WINFUNCTYPE(LONG, HKEY, LPCSTR, DWORD, REGSAM, PHKEY)
RegOpenKeyExAParams = ((1, 'hKey'), (1, 'lpSubKey'), (1, 'ulOptions'), (1, 'samDesired'), (1, 'phkResult'))
#def RegOpenKeyExW(hKey, lpSubKey, ulOptions, samDesired, phkResult):
# return RegOpenKeyExW.ctypes_function(hKey, lpSubKey, ulOptions, samDesired, phkResult)
RegOpenKeyExWPrototype = WINFUNCTYPE(LONG, HKEY, LPWSTR, DWORD, REGSAM, PHKEY)
RegOpenKeyExWParams = ((1, 'hKey'), (1, 'lpSubKey'), (1, 'ulOptions'), (1, 'samDesired'), (1, 'phkResult'))
#def RegGetValueA(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData):
# return RegGetValueA.ctypes_function(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData)
RegGetValueAPrototype = WINFUNCTYPE(LONG, HKEY, LPCSTR, LPCSTR, DWORD, LPDWORD, PVOID, LPDWORD)
RegGetValueAParams = ((1, 'hkey'), (1, 'lpSubKey'), (1, 'lpValue'), (1, 'dwFlags'), (1, 'pdwType'), (1, 'pvData'), (1, 'pcbData'))
#def RegGetValueW(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData):
# return RegGetValueW.ctypes_function(hkey, lpSubKey, lpValue, dwFlags, pdwType, pvData, pcbData)
RegGetValueWPrototype = WINFUNCTYPE(LONG, HKEY, LPWSTR, LPWSTR, DWORD, LPDWORD, PVOID, LPDWORD)
RegGetValueWParams = ((1, 'hkey'), (1, 'lpSubKey'), (1, 'lpValue'), (1, 'dwFlags'), (1, 'pdwType'), (1, 'pvData'), (1, 'pcbData'))
#def RegCloseKey(hKey):
# return RegCloseKey.ctypes_function(hKey)
RegCloseKeyPrototype = WINFUNCTYPE(LONG, HKEY)
RegCloseKeyParams = ((1, 'hKey'),)
#def Wow64DisableWow64FsRedirection(OldValue):
# return Wow64DisableWow64FsRedirection.ctypes_function(OldValue)
Wow64DisableWow64FsRedirectionPrototype = WINFUNCTYPE(BOOL, POINTER(PVOID))
Wow64DisableWow64FsRedirectionParams = ((1, 'OldValue'),)
#def Wow64RevertWow64FsRedirection(OldValue):
# return Wow64RevertWow64FsRedirection.ctypes_function(OldValue)
Wow64RevertWow64FsRedirectionPrototype = WINFUNCTYPE(BOOL, PVOID)
Wow64RevertWow64FsRedirectionParams = ((1, 'OldValue'),)
#def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection):
# return Wow64EnableWow64FsRedirection.ctypes_function(Wow64FsEnableRedirection)
Wow64EnableWow64FsRedirectionPrototype = WINFUNCTYPE(BOOLEAN, BOOLEAN)
Wow64EnableWow64FsRedirectionParams = ((1, 'Wow64FsEnableRedirection'),)
#def Wow64GetThreadContext(hThread, lpContext):
# return Wow64GetThreadContext.ctypes_function(hThread, lpContext)
Wow64GetThreadContextPrototype = WINFUNCTYPE(BOOL, HANDLE, PWOW64_CONTEXT)
Wow64GetThreadContextParams = ((1, 'hThread'), (1, 'lpContext'))
#def SetConsoleCtrlHandler(HandlerRoutine, Add):
# return SetConsoleCtrlHandler.ctypes_function(HandlerRoutine, Add)
SetConsoleCtrlHandlerPrototype = WINFUNCTYPE(BOOL, PHANDLER_ROUTINE, BOOL)
SetConsoleCtrlHandlerParams = ((1, 'HandlerRoutine'), (1, 'Add'))
#def WinVerifyTrust(hwnd, pgActionID, pWVTData):
# return WinVerifyTrust.ctypes_function(hwnd, pgActionID, pWVTData)
WinVerifyTrustPrototype = WINFUNCTYPE(LONG, HWND, POINTER(GUID), LPVOID)
WinVerifyTrustParams = ((1, 'hwnd'), (1, 'pgActionID'), (1, 'pWVTData'))
#def GlobalAlloc(uFlags, dwBytes):
# return GlobalAlloc.ctypes_function(uFlags, dwBytes)
GlobalAllocPrototype = WINFUNCTYPE(HGLOBAL, UINT, SIZE_T)
GlobalAllocParams = ((1, 'uFlags'), (1, 'dwBytes'))
#def GlobalFree(hMem):
# return GlobalFree.ctypes_function(hMem)
GlobalFreePrototype = WINFUNCTYPE(HGLOBAL, HGLOBAL)
GlobalFreeParams = ((1, 'hMem'),)
#def GlobalUnlock(hMem):
# return GlobalUnlock.ctypes_function(hMem)
GlobalUnlockPrototype = WINFUNCTYPE(BOOL, HGLOBAL)
GlobalUnlockParams = ((1, 'hMem'),)
#def GlobalLock(hMem):
# return GlobalLock.ctypes_function(hMem)
GlobalLockPrototype = WINFUNCTYPE(LPVOID, HGLOBAL)
GlobalLockParams = ((1, 'hMem'),)
#def OpenClipboard(hWndNewOwner):
# return OpenClipboard.ctypes_function(hWndNewOwner)
OpenClipboardPrototype = WINFUNCTYPE(BOOL, HWND)
OpenClipboardParams = ((1, 'hWndNewOwner'),)
#def EmptyClipboard():
# return EmptyClipboard.ctypes_function()
EmptyClipboardPrototype = WINFUNCTYPE(BOOL)
EmptyClipboardParams = ()
#def CloseClipboard():
# return CloseClipboard.ctypes_function()
CloseClipboardPrototype = WINFUNCTYPE(BOOL)
CloseClipboardParams = ()
#def SetClipboardData(uFormat, hMem):
# return SetClipboardData.ctypes_function(uFormat, hMem)
SetClipboardDataPrototype = WINFUNCTYPE(HANDLE, UINT, HANDLE)
SetClipboardDataParams = ((1, 'uFormat'), (1, 'hMem'))
#def GetClipboardData(uFormat):
# return GetClipboardData.ctypes_function(uFormat)
GetClipboardDataPrototype = WINFUNCTYPE(HANDLE, UINT)
GetClipboardDataParams = ((1, 'uFormat'),)
#def EnumClipboardFormats(format):
# return EnumClipboardFormats.ctypes_function(format)
EnumClipboardFormatsPrototype = WINFUNCTYPE(UINT, UINT)
EnumClipboardFormatsParams = ((1, 'format'),)
#def GetClipboardFormatNameA(format, lpszFormatName, cchMaxCount):
# return GetClipboardFormatNameA.ctypes_function(format, lpszFormatName, cchMaxCount)
GetClipboardFormatNameAPrototype = WINFUNCTYPE(INT, UINT, LPCSTR, INT)
GetClipboardFormatNameAParams = ((1, 'format'), (1, 'lpszFormatName'), (1, 'cchMaxCount'))
#def GetClipboardFormatNameW(format, lpszFormatName, cchMaxCount):
# return GetClipboardFormatNameW.ctypes_function(format, lpszFormatName, cchMaxCount)
GetClipboardFormatNameWPrototype = WINFUNCTYPE(INT, UINT, LPCWSTR, INT)
GetClipboardFormatNameWParams = ((1, 'format'), (1, 'lpszFormatName'), (1, 'cchMaxCount'))
#def WinVerifyTrust(hWnd, pgActionID, pWVTData):
# return WinVerifyTrust.ctypes_function(hWnd, pgActionID, pWVTData)
WinVerifyTrustPrototype = WINFUNCTYPE(LONG, HWND, POINTER(GUID), LPVOID)
WinVerifyTrustParams = ((1, 'hWnd'), (1, 'pgActionID'), (1, 'pWVTData'))
#def OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle):
# return OpenProcessToken.ctypes_function(ProcessHandle, DesiredAccess, TokenHandle)
OpenProcessTokenPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, PHANDLE)
OpenProcessTokenParams = ((1, 'ProcessHandle'), (1, 'DesiredAccess'), (1, 'TokenHandle'))
#def OpenThreadToken(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle):
# return OpenThreadToken.ctypes_function(ThreadHandle, DesiredAccess, OpenAsSelf, TokenHandle)
OpenThreadTokenPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD, BOOL, PHANDLE)
OpenThreadTokenParams = ((1, 'ThreadHandle'), (1, 'DesiredAccess'), (1, 'OpenAsSelf'), (1, 'TokenHandle'))
#def GetTokenInformation(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength, ReturnLength):
# return GetTokenInformation.ctypes_function(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength, ReturnLength)
GetTokenInformationPrototype = WINFUNCTYPE(BOOL, HANDLE, TOKEN_INFORMATION_CLASS, LPVOID, DWORD, PDWORD)
GetTokenInformationParams = ((1, 'TokenHandle'), (1, 'TokenInformationClass'), (1, 'TokenInformation'), (1, 'TokenInformationLength'), (1, 'ReturnLength'))
#def SetTokenInformation(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength):
# return SetTokenInformation.ctypes_function(TokenHandle, TokenInformationClass, TokenInformation, TokenInformationLength)
SetTokenInformationPrototype = WINFUNCTYPE(BOOL, HANDLE, TOKEN_INFORMATION_CLASS, LPVOID, DWORD)
SetTokenInformationParams = ((1, 'TokenHandle'), (1, 'TokenInformationClass'), (1, 'TokenInformation'), (1, 'TokenInformationLength'))
#def GetSidIdentifierAuthority(pSid):
# return GetSidIdentifierAuthority.ctypes_function(pSid)
GetSidIdentifierAuthorityPrototype = WINFUNCTYPE(PSID_IDENTIFIER_AUTHORITY, PSID)
GetSidIdentifierAuthorityParams = ((1, 'pSid'),)
#def GetSidSubAuthority(pSid, nSubAuthority):
# return GetSidSubAuthority.ctypes_function(pSid, nSubAuthority)
GetSidSubAuthorityPrototype = WINFUNCTYPE(PDWORD, PSID, DWORD)
GetSidSubAuthorityParams = ((1, 'pSid'), (1, 'nSubAuthority'))
#def GetSidSubAuthorityCount(pSid):
# return GetSidSubAuthorityCount.ctypes_function(pSid)
GetSidSubAuthorityCountPrototype = WINFUNCTYPE(LPBYTE, PSID)
GetSidSubAuthorityCountParams = ((1, 'pSid'),)
#def GetLengthSid(pSid):
# return GetLengthSid.ctypes_function(pSid)
GetLengthSidPrototype = WINFUNCTYPE(DWORD, PSID)
GetLengthSidParams = ((1, 'pSid'),)
#def CreateWellKnownSid(WellKnownSidType, DomainSid, pSid, cbSid):
# return CreateWellKnownSid.ctypes_function(WellKnownSidType, DomainSid, pSid, cbSid)
CreateWellKnownSidPrototype = WINFUNCTYPE(BOOL, WELL_KNOWN_SID_TYPE, PSID, PSID, POINTER(DWORD))
CreateWellKnownSidParams = ((1, 'WellKnownSidType'), (1, 'DomainSid'), (1, 'pSid'), (1, 'cbSid'))
#def DebugBreak():
# return DebugBreak.ctypes_function()
DebugBreakPrototype = WINFUNCTYPE(VOID)
DebugBreakParams = ()
#def WaitForDebugEvent(lpDebugEvent, dwMilliseconds):
# return WaitForDebugEvent.ctypes_function(lpDebugEvent, dwMilliseconds)
WaitForDebugEventPrototype = WINFUNCTYPE(BOOL, LPDEBUG_EVENT, DWORD)
WaitForDebugEventParams = ((1, 'lpDebugEvent'), (1, 'dwMilliseconds'))
#def ContinueDebugEvent(dwProcessId, dwThreadId, dwContinueStatus):
# return ContinueDebugEvent.ctypes_function(dwProcessId, dwThreadId, dwContinueStatus)
ContinueDebugEventPrototype = WINFUNCTYPE(BOOL, DWORD, DWORD, DWORD)
ContinueDebugEventParams = ((1, 'dwProcessId'), (1, 'dwThreadId'), (1, 'dwContinueStatus'))
#def DebugActiveProcess(dwProcessId):
# return DebugActiveProcess.ctypes_function(dwProcessId)
DebugActiveProcessPrototype = WINFUNCTYPE(BOOL, DWORD)
DebugActiveProcessParams = ((1, 'dwProcessId'),)
#def DebugActiveProcessStop(dwProcessId):
# return DebugActiveProcessStop.ctypes_function(dwProcessId)
DebugActiveProcessStopPrototype = WINFUNCTYPE(BOOL, DWORD)
DebugActiveProcessStopParams = ((1, 'dwProcessId'),)
#def DebugSetProcessKillOnExit(KillOnExit):
# return DebugSetProcessKillOnExit.ctypes_function(KillOnExit)
DebugSetProcessKillOnExitPrototype = WINFUNCTYPE(BOOL, BOOL)
DebugSetProcessKillOnExitParams = ((1, 'KillOnExit'),)
#def DebugBreakProcess(Process):
# return DebugBreakProcess.ctypes_function(Process)
DebugBreakProcessPrototype = WINFUNCTYPE(BOOL, HANDLE)
DebugBreakProcessParams = ((1, 'Process'),)
#def GetProcessId(Process):
# return GetProcessId.ctypes_function(Process)
GetProcessIdPrototype = WINFUNCTYPE(DWORD, HANDLE)
GetProcessIdParams = ((1, 'Process'),)
#def Wow64SetThreadContext(hThread, lpContext):
# return Wow64SetThreadContext.ctypes_function(hThread, lpContext)
Wow64SetThreadContextPrototype = WINFUNCTYPE(BOOL, HANDLE, POINTER(WOW64_CONTEXT))
Wow64SetThreadContextParams = ((1, 'hThread'), (1, 'lpContext'))
#def GetMappedFileNameW(hProcess, lpv, lpFilename, nSize):
# return GetMappedFileNameW.ctypes_function(hProcess, lpv, lpFilename, nSize)
GetMappedFileNameWPrototype = WINFUNCTYPE(DWORD, HANDLE, LPVOID, PVOID, DWORD)
GetMappedFileNameWParams = ((1, 'hProcess'), (1, 'lpv'), (1, 'lpFilename'), (1, 'nSize'))
#def GetMappedFileNameA(hProcess, lpv, lpFilename, nSize):
# return GetMappedFileNameA.ctypes_function(hProcess, lpv, lpFilename, nSize)
GetMappedFileNameAPrototype = WINFUNCTYPE(DWORD, HANDLE, LPVOID, PVOID, DWORD)
GetMappedFileNameAParams = ((1, 'hProcess'), (1, 'lpv'), (1, 'lpFilename'), (1, 'nSize'))
#def RtlInitString(DestinationString, SourceString):
# return RtlInitString.ctypes_function(DestinationString, SourceString)
RtlInitStringPrototype = WINFUNCTYPE(VOID, PSTRING, LPCSTR)
RtlInitStringParams = ((1, 'DestinationString'), (1, 'SourceString'))
#def RtlInitUnicodeString(DestinationString, SourceString):
# return RtlInitUnicodeString.ctypes_function(DestinationString, SourceString)
RtlInitUnicodeStringPrototype = WINFUNCTYPE(VOID, PUNICODE_STRING, LPCWSTR)
RtlInitUnicodeStringParams = ((1, 'DestinationString'), (1, 'SourceString'))
#def RtlAnsiStringToUnicodeString(DestinationString, SourceString, AllocateDestinationString):
# return RtlAnsiStringToUnicodeString.ctypes_function(DestinationString, SourceString, AllocateDestinationString)
RtlAnsiStringToUnicodeStringPrototype = WINFUNCTYPE(NTSTATUS, PUNICODE_STRING, PCANSI_STRING, BOOLEAN)
RtlAnsiStringToUnicodeStringParams = ((1, 'DestinationString'), (1, 'SourceString'), (1, 'AllocateDestinationString'))
#def RtlDecompressBuffer(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize):
# return RtlDecompressBuffer.ctypes_function(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize)
RtlDecompressBufferPrototype = WINFUNCTYPE(NTSTATUS, USHORT, PUCHAR, ULONG, PUCHAR, ULONG, PULONG)
RtlDecompressBufferParams = ((1, 'CompressionFormat'), (1, 'UncompressedBuffer'), (1, 'UncompressedBufferSize'), (1, 'CompressedBuffer'), (1, 'CompressedBufferSize'), (1, 'FinalUncompressedSize'))
#def RtlDecompressBufferEx(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize, WorkSpace):
# return RtlDecompressBufferEx.ctypes_function(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize, WorkSpace)
RtlDecompressBufferExPrototype = WINFUNCTYPE(NTSTATUS, USHORT, PUCHAR, ULONG, PUCHAR, ULONG, PULONG, PVOID)
RtlDecompressBufferExParams = ((1, 'CompressionFormat'), (1, 'UncompressedBuffer'), (1, 'UncompressedBufferSize'), (1, 'CompressedBuffer'), (1, 'CompressedBufferSize'), (1, 'FinalUncompressedSize'), (1, 'WorkSpace'))
#def RtlGetCompressionWorkSpaceSize(CompressionFormatAndEngine, CompressBufferWorkSpaceSize, CompressFragmentWorkSpaceSize):
# return RtlGetCompressionWorkSpaceSize.ctypes_function(CompressionFormatAndEngine, CompressBufferWorkSpaceSize, CompressFragmentWorkSpaceSize)
RtlGetCompressionWorkSpaceSizePrototype = WINFUNCTYPE(NTSTATUS, USHORT, PULONG, PULONG)
RtlGetCompressionWorkSpaceSizeParams = ((1, 'CompressionFormatAndEngine'), (1, 'CompressBufferWorkSpaceSize'), (1, 'CompressFragmentWorkSpaceSize'))
#def NtCreateSection(SectionHandle, DesiredAccess, ObjectAttributes, MaximumSize, SectionPageProtection, AllocationAttributes, FileHandle):
# return NtCreateSection.ctypes_function(SectionHandle, DesiredAccess, ObjectAttributes, MaximumSize, SectionPageProtection, AllocationAttributes, FileHandle)
NtCreateSectionPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES, PLARGE_INTEGER, ULONG, ULONG, HANDLE)
NtCreateSectionParams = ((1, 'SectionHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'), (1, 'MaximumSize'), (1, 'SectionPageProtection'), (1, 'AllocationAttributes'), (1, 'FileHandle'))
#def NtOpenSection(SectionHandle, DesiredAccess, ObjectAttributes):
# return NtOpenSection.ctypes_function(SectionHandle, DesiredAccess, ObjectAttributes)
NtOpenSectionPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES)
NtOpenSectionParams = ((1, 'SectionHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'))
#def NtMapViewOfSection(SectionHandle, ProcessHandle, BaseAddress, ZeroBits, CommitSize, SectionOffset, ViewSize, InheritDisposition, AllocationType, Win32Protect):
# return NtMapViewOfSection.ctypes_function(SectionHandle, ProcessHandle, BaseAddress, ZeroBits, CommitSize, SectionOffset, ViewSize, InheritDisposition, AllocationType, Win32Protect)
NtMapViewOfSectionPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, HANDLE, POINTER(PVOID), ULONG_PTR, SIZE_T, PLARGE_INTEGER, PSIZE_T, SECTION_INHERIT, ULONG, ULONG)
NtMapViewOfSectionParams = ((1, 'SectionHandle'), (1, 'ProcessHandle'), (1, 'BaseAddress'), (1, 'ZeroBits'), (1, 'CommitSize'), (1, 'SectionOffset'), (1, 'ViewSize'), (1, 'InheritDisposition'), (1, 'AllocationType'), (1, 'Win32Protect'))
#def NtUnmapViewOfSection(ProcessHandle, BaseAddress):
# return NtUnmapViewOfSection.ctypes_function(ProcessHandle, BaseAddress)
NtUnmapViewOfSectionPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PVOID)
NtUnmapViewOfSectionParams = ((1, 'ProcessHandle'), (1, 'BaseAddress'))
#def OpenEventA(dwDesiredAccess, bInheritHandle, lpName):
# return OpenEventA.ctypes_function(dwDesiredAccess, bInheritHandle, lpName)
OpenEventAPrototype = WINFUNCTYPE(HANDLE, DWORD, BOOL, LPCSTR)
OpenEventAParams = ((1, 'dwDesiredAccess'), (1, 'bInheritHandle'), (1, 'lpName'))
#def OpenEventW(dwDesiredAccess, bInheritHandle, lpName):
# return OpenEventW.ctypes_function(dwDesiredAccess, bInheritHandle, lpName)
OpenEventWPrototype = WINFUNCTYPE(HANDLE, DWORD, BOOL, LPCWSTR)
OpenEventWParams = ((1, 'dwDesiredAccess'), (1, 'bInheritHandle'), (1, 'lpName'))
#def NtOpenEvent(EventHandle, DesiredAccess, ObjectAttributes):
# return NtOpenEvent.ctypes_function(EventHandle, DesiredAccess, ObjectAttributes)
NtOpenEventPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES)
NtOpenEventParams = ((1, 'EventHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'))
#def lstrcmpA(lpString1, lpString2):
# return lstrcmpA.ctypes_function(lpString1, lpString2)
lstrcmpAPrototype = WINFUNCTYPE(INT, LPCSTR, LPCSTR)
lstrcmpAParams = ((1, 'lpString1'), (1, 'lpString2'))
#def lstrcmpW(lpString1, lpString2):
# return lstrcmpW.ctypes_function(lpString1, lpString2)
lstrcmpWPrototype = WINFUNCTYPE(INT, LPCWSTR, LPCWSTR)
lstrcmpWParams = ((1, 'lpString1'), (1, 'lpString2'))
#def CreateFileMappingA(hFile, lpFileMappingAttributes, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow, lpName):
# return CreateFileMappingA.ctypes_function(hFile, lpFileMappingAttributes, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow, lpName)
CreateFileMappingAPrototype = WINFUNCTYPE(HANDLE, HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCSTR)
CreateFileMappingAParams = ((1, 'hFile'), (1, 'lpFileMappingAttributes'), (1, 'flProtect'), (1, 'dwMaximumSizeHigh'), (1, 'dwMaximumSizeLow'), (1, 'lpName'))
#def CreateFileMappingW(hFile, lpFileMappingAttributes, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow, lpName):
# return CreateFileMappingW.ctypes_function(hFile, lpFileMappingAttributes, flProtect, dwMaximumSizeHigh, dwMaximumSizeLow, lpName)
CreateFileMappingWPrototype = WINFUNCTYPE(HANDLE, HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR)
CreateFileMappingWParams = ((1, 'hFile'), (1, 'lpFileMappingAttributes'), (1, 'flProtect'), (1, 'dwMaximumSizeHigh'), (1, 'dwMaximumSizeLow'), (1, 'lpName'))
#def MapViewOfFile(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh, dwFileOffsetLow, dwNumberOfBytesToMap):
# return MapViewOfFile.ctypes_function(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh, dwFileOffsetLow, dwNumberOfBytesToMap)
MapViewOfFilePrototype = WINFUNCTYPE(LPVOID, HANDLE, DWORD, DWORD, DWORD, SIZE_T)
MapViewOfFileParams = ((1, 'hFileMappingObject'), (1, 'dwDesiredAccess'), (1, 'dwFileOffsetHigh'), (1, 'dwFileOffsetLow'), (1, 'dwNumberOfBytesToMap'))
#def OpenSCManagerA(lpMachineName, lpDatabaseName, dwDesiredAccess):
# return OpenSCManagerA.ctypes_function(lpMachineName, lpDatabaseName, dwDesiredAccess)
OpenSCManagerAPrototype = WINFUNCTYPE(SC_HANDLE, LPCSTR, LPCSTR, DWORD)
OpenSCManagerAParams = ((1, 'lpMachineName'), (1, 'lpDatabaseName'), (1, 'dwDesiredAccess'))
#def OpenSCManagerW(lpMachineName, lpDatabaseName, dwDesiredAccess):
# return OpenSCManagerW.ctypes_function(lpMachineName, lpDatabaseName, dwDesiredAccess)
OpenSCManagerWPrototype = WINFUNCTYPE(SC_HANDLE, LPCWSTR, LPCWSTR, DWORD)
OpenSCManagerWParams = ((1, 'lpMachineName'), (1, 'lpDatabaseName'), (1, 'dwDesiredAccess'))
#def CloseServiceHandle(hSCObject):
# return CloseServiceHandle.ctypes_function(hSCObject)
CloseServiceHandlePrototype = WINFUNCTYPE(BOOL, SC_HANDLE)
CloseServiceHandleParams = ((1, 'hSCObject'),)
#def EnumServicesStatusExA(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName):
# return EnumServicesStatusExA.ctypes_function(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName)
EnumServicesStatusExAPrototype = WINFUNCTYPE(BOOL, SC_HANDLE, SC_ENUM_TYPE, DWORD, DWORD, LPBYTE, DWORD, LPDWORD, LPDWORD, LPDWORD, LPCSTR)
EnumServicesStatusExAParams = ((1, 'hSCManager'), (1, 'InfoLevel'), (1, 'dwServiceType'), (1, 'dwServiceState'), (1, 'lpServices'), (1, 'cbBufSize'), (1, 'pcbBytesNeeded'), (1, 'lpServicesReturned'), (1, 'lpResumeHandle'), (1, 'pszGroupName'))
#def EnumServicesStatusExW(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName):
# return EnumServicesStatusExW.ctypes_function(hSCManager, InfoLevel, dwServiceType, dwServiceState, lpServices, cbBufSize, pcbBytesNeeded, lpServicesReturned, lpResumeHandle, pszGroupName)
EnumServicesStatusExWPrototype = WINFUNCTYPE(BOOL, SC_HANDLE, SC_ENUM_TYPE, DWORD, DWORD, LPBYTE, DWORD, LPDWORD, LPDWORD, LPDWORD, LPCWSTR)
EnumServicesStatusExWParams = ((1, 'hSCManager'), (1, 'InfoLevel'), (1, 'dwServiceType'), (1, 'dwServiceState'), (1, 'lpServices'), (1, 'cbBufSize'), (1, 'pcbBytesNeeded'), (1, 'lpServicesReturned'), (1, 'lpResumeHandle'), (1, 'pszGroupName'))
#def StartServiceA(hService, dwNumServiceArgs, lpServiceArgVectors):
# return StartServiceA.ctypes_function(hService, dwNumServiceArgs, lpServiceArgVectors)
StartServiceAPrototype = WINFUNCTYPE(BOOL, SC_HANDLE, DWORD, POINTER(LPCSTR))
StartServiceAParams = ((1, 'hService'), (1, 'dwNumServiceArgs'), (1, 'lpServiceArgVectors'))
#def StartServiceW(hService, dwNumServiceArgs, lpServiceArgVectors):
# return StartServiceW.ctypes_function(hService, dwNumServiceArgs, lpServiceArgVectors)
StartServiceWPrototype = WINFUNCTYPE(BOOL, SC_HANDLE, DWORD, POINTER(LPCWSTR))
StartServiceWParams = ((1, 'hService'), (1, 'dwNumServiceArgs'), (1, 'lpServiceArgVectors'))
#def OpenServiceA(hSCManager, lpServiceName, dwDesiredAccess):
# return OpenServiceA.ctypes_function(hSCManager, lpServiceName, dwDesiredAccess)
OpenServiceAPrototype = WINFUNCTYPE(SC_HANDLE, SC_HANDLE, LPCSTR, DWORD)
OpenServiceAParams = ((1, 'hSCManager'), (1, 'lpServiceName'), (1, 'dwDesiredAccess'))
#def OpenServiceW(hSCManager, lpServiceName, dwDesiredAccess):
# return OpenServiceW.ctypes_function(hSCManager, lpServiceName, dwDesiredAccess)
OpenServiceWPrototype = WINFUNCTYPE(SC_HANDLE, SC_HANDLE, LPCWSTR, DWORD)
OpenServiceWParams = ((1, 'hSCManager'), (1, 'lpServiceName'), (1, 'dwDesiredAccess'))
#def GetLogicalDriveStringsA(nBufferLength, lpBuffer):
# return GetLogicalDriveStringsA.ctypes_function(nBufferLength, lpBuffer)
GetLogicalDriveStringsAPrototype = WINFUNCTYPE(DWORD, DWORD, LPCSTR)
GetLogicalDriveStringsAParams = ((1, 'nBufferLength'), (1, 'lpBuffer'))
#def GetLogicalDriveStringsW(nBufferLength, lpBuffer):
# return GetLogicalDriveStringsW.ctypes_function(nBufferLength, lpBuffer)
GetLogicalDriveStringsWPrototype = WINFUNCTYPE(DWORD, DWORD, LPWSTR)
GetLogicalDriveStringsWParams = ((1, 'nBufferLength'), (1, 'lpBuffer'))
#def GetVolumeInformationA(lpRootPathName, lpVolumeNameBuffer, nVolumeNameSize, lpVolumeSerialNumber, lpMaximumComponentLength, lpFileSystemFlags, lpFileSystemNameBuffer, nFileSystemNameSize):
# return GetVolumeInformationA.ctypes_function(lpRootPathName, lpVolumeNameBuffer, nVolumeNameSize, lpVolumeSerialNumber, lpMaximumComponentLength, lpFileSystemFlags, lpFileSystemNameBuffer, nFileSystemNameSize)
GetVolumeInformationAPrototype = WINFUNCTYPE(BOOL, LPCSTR, LPSTR, DWORD, LPDWORD, LPDWORD, LPDWORD, LPSTR, DWORD)
GetVolumeInformationAParams = ((1, 'lpRootPathName'), (1, 'lpVolumeNameBuffer'), (1, 'nVolumeNameSize'), (1, 'lpVolumeSerialNumber'), (1, 'lpMaximumComponentLength'), (1, 'lpFileSystemFlags'), (1, 'lpFileSystemNameBuffer'), (1, 'nFileSystemNameSize'))
#def GetVolumeInformationW(lpRootPathName, lpVolumeNameBuffer, nVolumeNameSize, lpVolumeSerialNumber, lpMaximumComponentLength, lpFileSystemFlags, lpFileSystemNameBuffer, nFileSystemNameSize):
# return GetVolumeInformationW.ctypes_function(lpRootPathName, lpVolumeNameBuffer, nVolumeNameSize, lpVolumeSerialNumber, lpMaximumComponentLength, lpFileSystemFlags, lpFileSystemNameBuffer, nFileSystemNameSize)
GetVolumeInformationWPrototype = WINFUNCTYPE(BOOL, LPWSTR, LPWSTR, DWORD, LPDWORD, LPDWORD, LPDWORD, LPWSTR, DWORD)
GetVolumeInformationWParams = ((1, 'lpRootPathName'), (1, 'lpVolumeNameBuffer'), (1, 'nVolumeNameSize'), (1, 'lpVolumeSerialNumber'), (1, 'lpMaximumComponentLength'), (1, 'lpFileSystemFlags'), (1, 'lpFileSystemNameBuffer'), (1, 'nFileSystemNameSize'))
#def GetVolumeNameForVolumeMountPointA(lpszVolumeMountPoint, lpszVolumeName, cchBufferLength):
# return GetVolumeNameForVolumeMountPointA.ctypes_function(lpszVolumeMountPoint, lpszVolumeName, cchBufferLength)
GetVolumeNameForVolumeMountPointAPrototype = WINFUNCTYPE(BOOL, LPCSTR, LPCSTR, DWORD)
GetVolumeNameForVolumeMountPointAParams = ((1, 'lpszVolumeMountPoint'), (1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def GetVolumeNameForVolumeMountPointW(lpszVolumeMountPoint, lpszVolumeName, cchBufferLength):
# return GetVolumeNameForVolumeMountPointW.ctypes_function(lpszVolumeMountPoint, lpszVolumeName, cchBufferLength)
GetVolumeNameForVolumeMountPointWPrototype = WINFUNCTYPE(BOOL, LPWSTR, LPWSTR, DWORD)
GetVolumeNameForVolumeMountPointWParams = ((1, 'lpszVolumeMountPoint'), (1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def GetDriveTypeA(lpRootPathName):
# return GetDriveTypeA.ctypes_function(lpRootPathName)
GetDriveTypeAPrototype = WINFUNCTYPE(UINT, LPCSTR)
GetDriveTypeAParams = ((1, 'lpRootPathName'),)
#def GetDriveTypeW(lpRootPathName):
# return GetDriveTypeW.ctypes_function(lpRootPathName)
GetDriveTypeWPrototype = WINFUNCTYPE(UINT, LPWSTR)
GetDriveTypeWParams = ((1, 'lpRootPathName'),)
#def QueryDosDeviceA(lpDeviceName, lpTargetPath, ucchMax):
# return QueryDosDeviceA.ctypes_function(lpDeviceName, lpTargetPath, ucchMax)
QueryDosDeviceAPrototype = WINFUNCTYPE(DWORD, LPCSTR, LPCSTR, DWORD)
QueryDosDeviceAParams = ((1, 'lpDeviceName'), (1, 'lpTargetPath'), (1, 'ucchMax'))
#def QueryDosDeviceW(lpDeviceName, lpTargetPath, ucchMax):
# return QueryDosDeviceW.ctypes_function(lpDeviceName, lpTargetPath, ucchMax)
QueryDosDeviceWPrototype = WINFUNCTYPE(DWORD, LPWSTR, LPWSTR, DWORD)
QueryDosDeviceWParams = ((1, 'lpDeviceName'), (1, 'lpTargetPath'), (1, 'ucchMax'))
#def FindFirstVolumeA(lpszVolumeName, cchBufferLength):
# return FindFirstVolumeA.ctypes_function(lpszVolumeName, cchBufferLength)
FindFirstVolumeAPrototype = WINFUNCTYPE(HANDLE, LPCSTR, DWORD)
FindFirstVolumeAParams = ((1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def FindFirstVolumeW(lpszVolumeName, cchBufferLength):
# return FindFirstVolumeW.ctypes_function(lpszVolumeName, cchBufferLength)
FindFirstVolumeWPrototype = WINFUNCTYPE(HANDLE, LPWSTR, DWORD)
FindFirstVolumeWParams = ((1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def FindNextVolumeA(hFindVolume, lpszVolumeName, cchBufferLength):
# return FindNextVolumeA.ctypes_function(hFindVolume, lpszVolumeName, cchBufferLength)
FindNextVolumeAPrototype = WINFUNCTYPE(BOOL, HANDLE, LPCSTR, DWORD)
FindNextVolumeAParams = ((1, 'hFindVolume'), (1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def FindNextVolumeW(hFindVolume, lpszVolumeName, cchBufferLength):
# return FindNextVolumeW.ctypes_function(hFindVolume, lpszVolumeName, cchBufferLength)
FindNextVolumeWPrototype = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, DWORD)
FindNextVolumeWParams = ((1, 'hFindVolume'), (1, 'lpszVolumeName'), (1, 'cchBufferLength'))
#def NtQueryObject(Handle, ObjectInformationClass, ObjectInformation, ObjectInformationLength, ReturnLength):
# return NtQueryObject.ctypes_function(Handle, ObjectInformationClass, ObjectInformation, ObjectInformationLength, ReturnLength)
NtQueryObjectPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, OBJECT_INFORMATION_CLASS, PVOID, ULONG, PULONG)
NtQueryObjectParams = ((1, 'Handle'), (1, 'ObjectInformationClass'), (1, 'ObjectInformation'), (1, 'ObjectInformationLength'), (1, 'ReturnLength'))
#def DuplicateHandle(hSourceProcessHandle, hSourceHandle, hTargetProcessHandle, lpTargetHandle, dwDesiredAccess, bInheritHandle, dwOptions):
# return DuplicateHandle.ctypes_function(hSourceProcessHandle, hSourceHandle, hTargetProcessHandle, lpTargetHandle, dwDesiredAccess, bInheritHandle, dwOptions)
DuplicateHandlePrototype = WINFUNCTYPE(BOOL, HANDLE, HANDLE, HANDLE, LPHANDLE, DWORD, BOOL, DWORD)
DuplicateHandleParams = ((1, 'hSourceProcessHandle'), (1, 'hSourceHandle'), (1, 'hTargetProcessHandle'), (1, 'lpTargetHandle'), (1, 'dwDesiredAccess'), (1, 'bInheritHandle'), (1, 'dwOptions'))
#def ZwDuplicateObject(SourceProcessHandle, SourceHandle, TargetProcessHandle, TargetHandle, DesiredAccess, HandleAttributes, Options):
# return ZwDuplicateObject.ctypes_function(SourceProcessHandle, SourceHandle, TargetProcessHandle, TargetHandle, DesiredAccess, HandleAttributes, Options)
ZwDuplicateObjectPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, HANDLE, HANDLE, PHANDLE, ACCESS_MASK, ULONG, ULONG)
ZwDuplicateObjectParams = ((1, 'SourceProcessHandle'), (1, 'SourceHandle'), (1, 'TargetProcessHandle'), (1, 'TargetHandle'), (1, 'DesiredAccess'), (1, 'HandleAttributes'), (1, 'Options'))
#def GetModuleBaseNameA(hProcess, hModule, lpBaseName, nSize):
# return GetModuleBaseNameA.ctypes_function(hProcess, hModule, lpBaseName, nSize)
GetModuleBaseNameAPrototype = WINFUNCTYPE(DWORD, HANDLE, HMODULE, LPCSTR, DWORD)
GetModuleBaseNameAParams = ((1, 'hProcess'), (1, 'hModule'), (1, 'lpBaseName'), (1, 'nSize'))
#def GetModuleBaseNameW(hProcess, hModule, lpBaseName, nSize):
# return GetModuleBaseNameW.ctypes_function(hProcess, hModule, lpBaseName, nSize)
GetModuleBaseNameWPrototype = WINFUNCTYPE(DWORD, HANDLE, HMODULE, LPWSTR, DWORD)
GetModuleBaseNameWParams = ((1, 'hProcess'), (1, 'hModule'), (1, 'lpBaseName'), (1, 'nSize'))
#def GetProcessImageFileNameA(hProcess, lpImageFileName, nSize):
# return GetProcessImageFileNameA.ctypes_function(hProcess, lpImageFileName, nSize)
GetProcessImageFileNameAPrototype = WINFUNCTYPE(DWORD, HANDLE, LPCSTR, DWORD)
GetProcessImageFileNameAParams = ((1, 'hProcess'), (1, 'lpImageFileName'), (1, 'nSize'))
#def GetProcessImageFileNameW(hProcess, lpImageFileName, nSize):
# return GetProcessImageFileNameW.ctypes_function(hProcess, lpImageFileName, nSize)
GetProcessImageFileNameWPrototype = WINFUNCTYPE(DWORD, HANDLE, LPWSTR, DWORD)
GetProcessImageFileNameWParams = ((1, 'hProcess'), (1, 'lpImageFileName'), (1, 'nSize'))
#def GetFileVersionInfoA(lptstrFilename, dwHandle, dwLen, lpData):
# return GetFileVersionInfoA.ctypes_function(lptstrFilename, dwHandle, dwLen, lpData)
GetFileVersionInfoAPrototype = WINFUNCTYPE(BOOL, LPCSTR, DWORD, DWORD, LPVOID)
GetFileVersionInfoAParams = ((1, 'lptstrFilename'), (1, 'dwHandle'), (1, 'dwLen'), (1, 'lpData'))
#def GetFileVersionInfoW(lptstrFilename, dwHandle, dwLen, lpData):
# return GetFileVersionInfoW.ctypes_function(lptstrFilename, dwHandle, dwLen, lpData)
GetFileVersionInfoWPrototype = WINFUNCTYPE(BOOL, LPWSTR, DWORD, DWORD, LPVOID)
GetFileVersionInfoWParams = ((1, 'lptstrFilename'), (1, 'dwHandle'), (1, 'dwLen'), (1, 'lpData'))
#def GetFileVersionInfoSizeA(lptstrFilename, lpdwHandle):
# return GetFileVersionInfoSizeA.ctypes_function(lptstrFilename, lpdwHandle)
GetFileVersionInfoSizeAPrototype = WINFUNCTYPE(DWORD, LPCSTR, LPDWORD)
GetFileVersionInfoSizeAParams = ((1, 'lptstrFilename'), (1, 'lpdwHandle'))
#def GetFileVersionInfoSizeW(lptstrFilename, lpdwHandle):
# return GetFileVersionInfoSizeW.ctypes_function(lptstrFilename, lpdwHandle)
GetFileVersionInfoSizeWPrototype = WINFUNCTYPE(DWORD, LPWSTR, LPDWORD)
GetFileVersionInfoSizeWParams = ((1, 'lptstrFilename'), (1, 'lpdwHandle'))
#def VerQueryValueA(pBlock, lpSubBlock, lplpBuffer, puLen):
# return VerQueryValueA.ctypes_function(pBlock, lpSubBlock, lplpBuffer, puLen)
VerQueryValueAPrototype = WINFUNCTYPE(BOOL, LPCVOID, LPCSTR, POINTER(LPVOID), PUINT)
VerQueryValueAParams = ((1, 'pBlock'), (1, 'lpSubBlock'), (1, 'lplpBuffer'), (1, 'puLen'))
#def VerQueryValueW(pBlock, lpSubBlock, lplpBuffer, puLen):
# return VerQueryValueW.ctypes_function(pBlock, lpSubBlock, lplpBuffer, puLen)
VerQueryValueWPrototype = WINFUNCTYPE(BOOL, LPCVOID, LPWSTR, POINTER(LPVOID), PUINT)
VerQueryValueWParams = ((1, 'pBlock'), (1, 'lpSubBlock'), (1, 'lplpBuffer'), (1, 'puLen'))
#def GetSystemMetrics(nIndex):
# return GetSystemMetrics.ctypes_function(nIndex)
GetSystemMetricsPrototype = WINFUNCTYPE(INT, INT)
GetSystemMetricsParams = ((1, 'nIndex'),)
#def GetComputerNameA(lpBuffer, lpnSize):
# return GetComputerNameA.ctypes_function(lpBuffer, lpnSize)
GetComputerNameAPrototype = WINFUNCTYPE(BOOL, LPCSTR, LPDWORD)
GetComputerNameAParams = ((1, 'lpBuffer'), (1, 'lpnSize'))
#def GetComputerNameW(lpBuffer, lpnSize):
# return GetComputerNameW.ctypes_function(lpBuffer, lpnSize)
GetComputerNameWPrototype = WINFUNCTYPE(BOOL, LPWSTR, LPDWORD)
GetComputerNameWParams = ((1, 'lpBuffer'), (1, 'lpnSize'))
#def LookupAccountSidA(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse):
# return LookupAccountSidA.ctypes_function(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse)
LookupAccountSidAPrototype = WINFUNCTYPE(BOOL, LPCSTR, PSID, LPCSTR, LPDWORD, LPCSTR, LPDWORD, PSID_NAME_USE)
LookupAccountSidAParams = ((1, 'lpSystemName'), (1, 'lpSid'), (1, 'lpName'), (1, 'cchName'), (1, 'lpReferencedDomainName'), (1, 'cchReferencedDomainName'), (1, 'peUse'))
#def LookupAccountSidW(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse):
# return LookupAccountSidW.ctypes_function(lpSystemName, lpSid, lpName, cchName, lpReferencedDomainName, cchReferencedDomainName, peUse)
LookupAccountSidWPrototype = WINFUNCTYPE(BOOL, LPWSTR, PSID, LPWSTR, LPDWORD, LPWSTR, LPDWORD, PSID_NAME_USE)
LookupAccountSidWParams = ((1, 'lpSystemName'), (1, 'lpSid'), (1, 'lpName'), (1, 'cchName'), (1, 'lpReferencedDomainName'), (1, 'cchReferencedDomainName'), (1, 'peUse'))
#def GetInterfaceInfo(pIfTable, dwOutBufLen):
# return GetInterfaceInfo.ctypes_function(pIfTable, dwOutBufLen)
GetInterfaceInfoPrototype = WINFUNCTYPE(DWORD, PIP_INTERFACE_INFO, PULONG)
GetInterfaceInfoParams = ((1, 'pIfTable'), (1, 'dwOutBufLen'))
#def GetIfTable(pIfTable, pdwSize, bOrder):
# return GetIfTable.ctypes_function(pIfTable, pdwSize, bOrder)
GetIfTablePrototype = WINFUNCTYPE(DWORD, PMIB_IFTABLE, PULONG, BOOL)
GetIfTableParams = ((1, 'pIfTable'), (1, 'pdwSize'), (1, 'bOrder'))
#def GetIpAddrTable(pIpAddrTable, pdwSize, bOrder):
# return GetIpAddrTable.ctypes_function(pIpAddrTable, pdwSize, bOrder)
GetIpAddrTablePrototype = WINFUNCTYPE(DWORD, PMIB_IPADDRTABLE, PULONG, BOOL)
GetIpAddrTableParams = ((1, 'pIpAddrTable'), (1, 'pdwSize'), (1, 'bOrder'))
#def NtOpenDirectoryObject(DirectoryHandle, DesiredAccess, ObjectAttributes):
# return NtOpenDirectoryObject.ctypes_function(DirectoryHandle, DesiredAccess, ObjectAttributes)
NtOpenDirectoryObjectPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES)
NtOpenDirectoryObjectParams = ((1, 'DirectoryHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'))
#def NtQueryDirectoryObject(DirectoryHandle, Buffer, Length, ReturnSingleEntry, RestartScan, Context, ReturnLength):
# return NtQueryDirectoryObject.ctypes_function(DirectoryHandle, Buffer, Length, ReturnSingleEntry, RestartScan, Context, ReturnLength)
NtQueryDirectoryObjectPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PVOID, ULONG, BOOLEAN, BOOLEAN, PULONG, PULONG)
NtQueryDirectoryObjectParams = ((1, 'DirectoryHandle'), (1, 'Buffer'), (1, 'Length'), (1, 'ReturnSingleEntry'), (1, 'RestartScan'), (1, 'Context'), (1, 'ReturnLength'))
#def NtQuerySymbolicLinkObject(LinkHandle, LinkTarget, ReturnedLength):
# return NtQuerySymbolicLinkObject.ctypes_function(LinkHandle, LinkTarget, ReturnedLength)
NtQuerySymbolicLinkObjectPrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PUNICODE_STRING, PULONG)
NtQuerySymbolicLinkObjectParams = ((1, 'LinkHandle'), (1, 'LinkTarget'), (1, 'ReturnedLength'))
#def NtOpenSymbolicLinkObject(LinkHandle, DesiredAccess, ObjectAttributes):
# return NtOpenSymbolicLinkObject.ctypes_function(LinkHandle, DesiredAccess, ObjectAttributes)
NtOpenSymbolicLinkObjectPrototype = WINFUNCTYPE(NTSTATUS, PHANDLE, ACCESS_MASK, POBJECT_ATTRIBUTES)
NtOpenSymbolicLinkObjectParams = ((1, 'LinkHandle'), (1, 'DesiredAccess'), (1, 'ObjectAttributes'))
#def GetProcessTimes(hProcess, lpCreationTime, lpExitTime, lpKernelTime, lpUserTime):
# return GetProcessTimes.ctypes_function(hProcess, lpCreationTime, lpExitTime, lpKernelTime, lpUserTime)
GetProcessTimesPrototype = WINFUNCTYPE(BOOL, HANDLE, LPFILETIME, LPFILETIME, LPFILETIME, LPFILETIME)
GetProcessTimesParams = ((1, 'hProcess'), (1, 'lpCreationTime'), (1, 'lpExitTime'), (1, 'lpKernelTime'), (1, 'lpUserTime'))
#def GetShortPathNameA(lpszLongPath, lpszShortPath, cchBuffer):
# return GetShortPathNameA.ctypes_function(lpszLongPath, lpszShortPath, cchBuffer)
GetShortPathNameAPrototype = WINFUNCTYPE(DWORD, LPCSTR, LPCSTR, DWORD)
GetShortPathNameAParams = ((1, 'lpszLongPath'), (1, 'lpszShortPath'), (1, 'cchBuffer'))
#def GetShortPathNameW(lpszLongPath, lpszShortPath, cchBuffer):
# return GetShortPathNameW.ctypes_function(lpszLongPath, lpszShortPath, cchBuffer)
GetShortPathNameWPrototype = WINFUNCTYPE(DWORD, LPWSTR, LPWSTR, DWORD)
GetShortPathNameWParams = ((1, 'lpszLongPath'), (1, 'lpszShortPath'), (1, 'cchBuffer'))
#def GetLongPathNameA(lpszShortPath, lpszLongPath, cchBuffer):
# return GetLongPathNameA.ctypes_function(lpszShortPath, lpszLongPath, cchBuffer)
GetLongPathNameAPrototype = WINFUNCTYPE(DWORD, LPCSTR, LPCSTR, DWORD)
GetLongPathNameAParams = ((1, 'lpszShortPath'), (1, 'lpszLongPath'), (1, 'cchBuffer'))
#def GetLongPathNameW(lpszShortPath, lpszLongPath, cchBuffer):
# return GetLongPathNameW.ctypes_function(lpszShortPath, lpszLongPath, cchBuffer)
GetLongPathNameWPrototype = WINFUNCTYPE(DWORD, LPWSTR, LPWSTR, DWORD)
GetLongPathNameWParams = ((1, 'lpszShortPath'), (1, 'lpszLongPath'), (1, 'cchBuffer'))
#def GetProcessDEPPolicy(hProcess, lpFlags, lpPermanent):
# return GetProcessDEPPolicy.ctypes_function(hProcess, lpFlags, lpPermanent)
GetProcessDEPPolicyPrototype = WINFUNCTYPE(BOOL, HANDLE, LPDWORD, PBOOL)
GetProcessDEPPolicyParams = ((1, 'hProcess'), (1, 'lpFlags'), (1, 'lpPermanent'))
#def GetNamedSecurityInfoA(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor):
# return GetNamedSecurityInfoA.ctypes_function(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
GetNamedSecurityInfoAPrototype = WINFUNCTYPE(DWORD, LPCSTR, SE_OBJECT_TYPE, SECURITY_INFORMATION, POINTER(PSID), POINTER(PSID), POINTER(PACL), POINTER(PACL), POINTER(PSECURITY_DESCRIPTOR))
GetNamedSecurityInfoAParams = ((1, 'pObjectName'), (1, 'ObjectType'), (1, 'SecurityInfo'), (1, 'ppsidOwner'), (1, 'ppsidGroup'), (1, 'ppDacl'), (1, 'ppSacl'), (1, 'ppSecurityDescriptor'))
#def GetNamedSecurityInfoW(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor):
# return GetNamedSecurityInfoW.ctypes_function(pObjectName, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
GetNamedSecurityInfoWPrototype = WINFUNCTYPE(DWORD, LPWSTR, SE_OBJECT_TYPE, SECURITY_INFORMATION, POINTER(PSID), POINTER(PSID), POINTER(PACL), POINTER(PACL), POINTER(PSECURITY_DESCRIPTOR))
GetNamedSecurityInfoWParams = ((1, 'pObjectName'), (1, 'ObjectType'), (1, 'SecurityInfo'), (1, 'ppsidOwner'), (1, 'ppsidGroup'), (1, 'ppDacl'), (1, 'ppSacl'), (1, 'ppSecurityDescriptor'))
#def GetSecurityInfo(handle, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor):
# return GetSecurityInfo.ctypes_function(handle, ObjectType, SecurityInfo, ppsidOwner, ppsidGroup, ppDacl, ppSacl, ppSecurityDescriptor)
GetSecurityInfoPrototype = WINFUNCTYPE(DWORD, HANDLE, SE_OBJECT_TYPE, SECURITY_INFORMATION, POINTER(PSID), POINTER(PSID), POINTER(PACL), POINTER(PACL), POINTER(PSECURITY_DESCRIPTOR))
GetSecurityInfoParams = ((1, 'handle'), (1, 'ObjectType'), (1, 'SecurityInfo'), (1, 'ppsidOwner'), (1, 'ppsidGroup'), (1, 'ppDacl'), (1, 'ppSacl'), (1, 'ppSecurityDescriptor'))
#def ConvertStringSidToSidA(StringSid, Sid):
# return ConvertStringSidToSidA.ctypes_function(StringSid, Sid)
ConvertStringSidToSidAPrototype = WINFUNCTYPE(BOOL, LPCSTR, POINTER(PSID))
ConvertStringSidToSidAParams = ((1, 'StringSid'), (1, 'Sid'))
#def ConvertStringSidToSidW(StringSid, Sid):
# return ConvertStringSidToSidW.ctypes_function(StringSid, Sid)
ConvertStringSidToSidWPrototype = WINFUNCTYPE(BOOL, LPWSTR, POINTER(PSID))
ConvertStringSidToSidWParams = ((1, 'StringSid'), (1, 'Sid'))
#def ConvertSidToStringSidA(Sid, StringSid):
# return ConvertSidToStringSidA.ctypes_function(Sid, StringSid)
ConvertSidToStringSidAPrototype = WINFUNCTYPE(BOOL, PSID, POINTER(LPCSTR))
ConvertSidToStringSidAParams = ((1, 'Sid'), (1, 'StringSid'))
#def ConvertSidToStringSidW(Sid, StringSid):
# return ConvertSidToStringSidW.ctypes_function(Sid, StringSid)
ConvertSidToStringSidWPrototype = WINFUNCTYPE(BOOL, PSID, POINTER(LPWSTR))
ConvertSidToStringSidWParams = ((1, 'Sid'), (1, 'StringSid'))
#def LocalFree(hMem):
# return LocalFree.ctypes_function(hMem)
LocalFreePrototype = WINFUNCTYPE(HLOCAL, HLOCAL)
LocalFreeParams = ((1, 'hMem'),)
#def RegQueryValueExA(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData):
# return RegQueryValueExA.ctypes_function(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData)
RegQueryValueExAPrototype = WINFUNCTYPE(LONG, HKEY, LPCSTR, LPDWORD, LPDWORD, LPBYTE, LPDWORD)
RegQueryValueExAParams = ((1, 'hKey'), (1, 'lpValueName'), (1, 'lpReserved'), (1, 'lpType'), (1, 'lpData'), (1, 'lpcbData'))
#def RegQueryValueExW(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData):
# return RegQueryValueExW.ctypes_function(hKey, lpValueName, lpReserved, lpType, lpData, lpcbData)
RegQueryValueExWPrototype = WINFUNCTYPE(LONG, HKEY, LPWSTR, LPDWORD, LPDWORD, LPBYTE, LPDWORD)
RegQueryValueExWParams = ((1, 'hKey'), (1, 'lpValueName'), (1, 'lpReserved'), (1, 'lpType'), (1, 'lpData'), (1, 'lpcbData'))
#def ShellExecuteA(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd):
# return ShellExecuteA.ctypes_function(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd)
ShellExecuteAPrototype = WINFUNCTYPE(HINSTANCE, HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT)
ShellExecuteAParams = ((1, 'hwnd'), (1, 'lpOperation'), (1, 'lpFile'), (1, 'lpParameters'), (1, 'lpDirectory'), (1, 'nShowCmd'))
#def ShellExecuteW(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd):
# return ShellExecuteW.ctypes_function(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd)
ShellExecuteWPrototype = WINFUNCTYPE(HINSTANCE, HWND, LPWSTR, LPWSTR, LPWSTR, LPWSTR, INT)
ShellExecuteWParams = ((1, 'hwnd'), (1, 'lpOperation'), (1, 'lpFile'), (1, 'lpParameters'), (1, 'lpDirectory'), (1, 'nShowCmd'))
#def InitializeProcThreadAttributeList(lpAttributeList, dwAttributeCount, dwFlags, lpSize):
# return InitializeProcThreadAttributeList.ctypes_function(lpAttributeList, dwAttributeCount, dwFlags, lpSize)
InitializeProcThreadAttributeListPrototype = WINFUNCTYPE(BOOL, LPPROC_THREAD_ATTRIBUTE_LIST, DWORD, DWORD, PSIZE_T)
InitializeProcThreadAttributeListParams = ((1, 'lpAttributeList'), (1, 'dwAttributeCount'), (1, 'dwFlags'), (1, 'lpSize'))
#def UpdateProcThreadAttribute(lpAttributeList, dwFlags, Attribute, lpValue, cbSize, lpPreviousValue, lpReturnSize):
# return UpdateProcThreadAttribute.ctypes_function(lpAttributeList, dwFlags, Attribute, lpValue, cbSize, lpPreviousValue, lpReturnSize)
UpdateProcThreadAttributePrototype = WINFUNCTYPE(BOOL, LPPROC_THREAD_ATTRIBUTE_LIST, DWORD, DWORD_PTR, PVOID, SIZE_T, PVOID, PSIZE_T)
UpdateProcThreadAttributeParams = ((1, 'lpAttributeList'), (1, 'dwFlags'), (1, 'Attribute'), (1, 'lpValue'), (1, 'cbSize'), (1, 'lpPreviousValue'), (1, 'lpReturnSize'))
#def DeleteProcThreadAttributeList(lpAttributeList):
# return DeleteProcThreadAttributeList.ctypes_function(lpAttributeList)
DeleteProcThreadAttributeListPrototype = WINFUNCTYPE(VOID, LPPROC_THREAD_ATTRIBUTE_LIST)
DeleteProcThreadAttributeListParams = ((1, 'lpAttributeList'),)
#def MessageBoxA(hWnd, lpText, lpCaption, uType):
# return MessageBoxA.ctypes_function(hWnd, lpText, lpCaption, uType)
MessageBoxAPrototype = WINFUNCTYPE(INT, HWND, LPCSTR, LPCSTR, UINT)
MessageBoxAParams = ((1, 'hWnd'), (1, 'lpText'), (1, 'lpCaption'), (1, 'uType'))
#def MessageBoxW(hWnd, lpText, lpCaption, uType):
# return MessageBoxW.ctypes_function(hWnd, lpText, lpCaption, uType)
MessageBoxWPrototype = WINFUNCTYPE(INT, HWND, LPWSTR, LPWSTR, UINT)
MessageBoxWParams = ((1, 'hWnd'), (1, 'lpText'), (1, 'lpCaption'), (1, 'uType'))
#def GetWindowsDirectoryA(lpBuffer, uSize):
# return GetWindowsDirectoryA.ctypes_function(lpBuffer, uSize)
GetWindowsDirectoryAPrototype = WINFUNCTYPE(UINT, LPCSTR, UINT)
GetWindowsDirectoryAParams = ((1, 'lpBuffer'), (1, 'uSize'))
#def GetWindowsDirectoryW(lpBuffer, uSize):
# return GetWindowsDirectoryW.ctypes_function(lpBuffer, uSize)
GetWindowsDirectoryWPrototype = WINFUNCTYPE(UINT, LPWSTR, UINT)
GetWindowsDirectoryWParams = ((1, 'lpBuffer'), (1, 'uSize'))
#def RtlGetUnloadEventTraceEx(ElementSize, ElementCount, EventTrace):
# return RtlGetUnloadEventTraceEx.ctypes_function(ElementSize, ElementCount, EventTrace)
RtlGetUnloadEventTraceExPrototype = WINFUNCTYPE(VOID, POINTER(PULONG), POINTER(PULONG), POINTER(PVOID))
RtlGetUnloadEventTraceExParams = ((1, 'ElementSize'), (1, 'ElementCount'), (1, 'EventTrace'))
#def NtQueryInformationFile(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass):
# return NtQueryInformationFile.ctypes_function(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass)
NtQueryInformationFilePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PIO_STATUS_BLOCK, PVOID, ULONG, FILE_INFORMATION_CLASS)
NtQueryInformationFileParams = ((1, 'FileHandle'), (1, 'IoStatusBlock'), (1, 'FileInformation'), (1, 'Length'), (1, 'FileInformationClass'))
#def NtQueryDirectoryFile(FileHandle, Event, ApcRoutine, ApcContext, IoStatusBlock, FileInformation, Length, FileInformationClass, ReturnSingleEntry, FileName, RestartScan):
# return NtQueryDirectoryFile.ctypes_function(FileHandle, Event, ApcRoutine, ApcContext, IoStatusBlock, FileInformation, Length, FileInformationClass, ReturnSingleEntry, FileName, RestartScan)
NtQueryDirectoryFilePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, HANDLE, PIO_APC_ROUTINE, PVOID, PIO_STATUS_BLOCK, PVOID, ULONG, FILE_INFORMATION_CLASS, BOOLEAN, PUNICODE_STRING, BOOLEAN)
NtQueryDirectoryFileParams = ((1, 'FileHandle'), (1, 'Event'), (1, 'ApcRoutine'), (1, 'ApcContext'), (1, 'IoStatusBlock'), (1, 'FileInformation'), (1, 'Length'), (1, 'FileInformationClass'), (1, 'ReturnSingleEntry'), (1, 'FileName'), (1, 'RestartScan'))
#def RtlDosPathNameToNtPathName_U(DosName, NtName, PartName, RelativeName):
# return RtlDosPathNameToNtPathName_U.ctypes_function(DosName, NtName, PartName, RelativeName)
RtlDosPathNameToNtPathName_UPrototype = WINFUNCTYPE(BOOLEAN, PCWSTR, PUNICODE_STRING, POINTER(PCWSTR), PRTL_RELATIVE_NAME_U)
RtlDosPathNameToNtPathName_UParams = ((1, 'DosName'), (1, 'NtName'), (1, 'PartName'), (1, 'RelativeName'))
#def ApiSetResolveToHost(Schema, FileNameIn, ParentName, Resolved, HostBinary):
# return ApiSetResolveToHost.ctypes_function(Schema, FileNameIn, ParentName, Resolved, HostBinary)
ApiSetResolveToHostPrototype = WINFUNCTYPE(NTSTATUS, PVOID, PUNICODE_STRING, PUNICODE_STRING, PBOOLEAN, PUNICODE_STRING)
ApiSetResolveToHostParams = ((1, 'Schema'), (1, 'FileNameIn'), (1, 'ParentName'), (1, 'Resolved'), (1, 'HostBinary'))
#def Sleep(dwMilliseconds):
# return Sleep.ctypes_function(dwMilliseconds)
SleepPrototype = WINFUNCTYPE(VOID, DWORD)
SleepParams = ((1, 'dwMilliseconds'),)
#def SleepEx(dwMilliseconds, bAlertable):
# return SleepEx.ctypes_function(dwMilliseconds, bAlertable)
SleepExPrototype = WINFUNCTYPE(DWORD, DWORD, BOOL)
SleepExParams = ((1, 'dwMilliseconds'), (1, 'bAlertable'))
#def GetProcessMitigationPolicy(hProcess, MitigationPolicy, lpBuffer, dwLength):
# return GetProcessMitigationPolicy.ctypes_function(hProcess, MitigationPolicy, lpBuffer, dwLength)
GetProcessMitigationPolicyPrototype = WINFUNCTYPE(BOOL, HANDLE, PROCESS_MITIGATION_POLICY, PVOID, SIZE_T)
GetProcessMitigationPolicyParams = ((1, 'hProcess'), (1, 'MitigationPolicy'), (1, 'lpBuffer'), (1, 'dwLength'))
#def SetProcessMitigationPolicy(MitigationPolicy, lpBuffer, dwLength):
# return SetProcessMitigationPolicy.ctypes_function(MitigationPolicy, lpBuffer, dwLength)
SetProcessMitigationPolicyPrototype = WINFUNCTYPE(BOOL, PROCESS_MITIGATION_POLICY, PVOID, SIZE_T)
SetProcessMitigationPolicyParams = ((1, 'MitigationPolicy'), (1, 'lpBuffer'), (1, 'dwLength'))
#def GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion, pdwReturnedProductType):
# return GetProductInfo.ctypes_function(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion, pdwReturnedProductType)
GetProductInfoPrototype = WINFUNCTYPE(BOOL, DWORD, DWORD, DWORD, DWORD, PDWORD)
GetProductInfoParams = ((1, 'dwOSMajorVersion'), (1, 'dwOSMinorVersion'), (1, 'dwSpMajorVersion'), (1, 'dwSpMinorVersion'), (1, 'pdwReturnedProductType'))
#def NtSetInformationFile(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass):
# return NtSetInformationFile.ctypes_function(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass)
NtSetInformationFilePrototype = WINFUNCTYPE(NTSTATUS, HANDLE, PIO_STATUS_BLOCK, PVOID, ULONG, FILE_INFORMATION_CLASS)
NtSetInformationFileParams = ((1, 'FileHandle'), (1, 'IoStatusBlock'), (1, 'FileInformation'), (1, 'Length'), (1, 'FileInformationClass'))
|
marpie/PythonForWindows | windows/winobject/wmi.py | <reponame>marpie/PythonForWindows
import windows
import ctypes
import struct
import functools
from ctypes.wintypes import *
import windows.com
from windows.generated_def.winstructs import *
from windows.generated_def.interfaces import IWbemLocator, IWbemServices, IEnumWbemClassObject, IWbemClassObject
# import windows.generated_def as gdef
class WmiRequester(object):
r"""An object to perform wmi request to ``root\cimv2``"""
INSTANCE = None
def __init__(self, target="root\\cimv2", user=None, password=None):
self.namespace = target
locator = IWbemLocator()
service = IWbemServices()
#CLSID_WbemAdministrativeLocator_IID = windows.com.IID.from_string('CB8555CC-9128-11D1-AD9B-00C04FD8FDFF')
WbemLocator_CLSID = windows.com.IID.from_string('4590F811-1D3A-11D0-891F-00AA004B2E24')
windows.com.init()
windows.com.create_instance(WbemLocator_CLSID, locator)
# WBEM_FLAG_CONNECT_USE_MAX_WAIT
locator.ConnectServer(target, user, password , None, 0x80, None, None, ctypes.byref(service))
self.service = service
def select(self, frm, attrs="*", **kwargs):
"""Select ``attrs`` from ``frm``
:rtype: list of dict
"""
return list(self.gen_select(frm, attrs, **kwargs))
def gen_select(self, frm, attrs="*", **kwargs):
"""Select ``attrs`` from ``frm`` in a generator (like :func:`gen_query`)
:rtype: generator
"""
return self.gen_query("select * from {0}".format(frm), attrs, **kwargs)
@property
def classes(self):
"""The list of class available
:rtype: list of str
"""
return [x["__CLASS"] for x in self.query('SELECT * FROM meta_class', attrs=["__CLASS"])]
def query(self, query, attrs="*", timeout=WBEM_INFINITE):
"""Execute WMI ``query`` and return the attributes ``attrs``
Timeout is not applied for the full query time but the time to retrieve one object each time.
:rtype: list of dict
"""
return list(self.gen_query(query, attrs, timeout))
def gen_query(self, query, attrs="*", timeout=WBEM_INFINITE):
"""Execute WMI ``query`` and return a generator that will yield the ``attrs`` for one object each time.
Each iteration is susceptible to raise.
:rtype: generator
"""
enumerator = self._exec_query(query, WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY)
try:
for obj, retval in self._enumerator_values_generator(enumerator, timeout=timeout):
if obj is None:
raise WindowsError(retval, WBEMSTATUS(retval & 0xffffffff).value)
yield self._iwbemclassobject_to_dict(obj, attrs)
finally:
enumerator.Release()
def _exec_query(self, query, flags, ctx=None):
enumerator = IEnumWbemClassObject()
try:
self.service.ExecQuery("WQL", query, flags, ctx, ctypes.byref(enumerator))
except WindowsError as e:
if (e.winerror & 0xffffffff) == WBEM_E_INVALID_CLASS:
raise WindowsError(e.winerror, 'WBEM_E_INVALID_CLASS <Invalid WMI class "{0}">'.format(query))
elif (e.winerror & 0xffffffff) in WBEMSTATUS.values:
raise WindowsError(e.winerror, WBEMSTATUS(e.winerror & 0xffffffff).value)
raise
return enumerator
def _enumerator_values_generator(self, enumerator, timeout=WBEM_INFINITE):
count = ULONG(0)
processor = IWbemClassObject()
result = 0
while result != WBEM_S_FALSE:
try:
result = enumerator.Next(timeout, 1, ctypes.byref(processor), ctypes.byref(count))
except WindowsError as e:
if (e.winerror & 0xffffffff) == WBEM_E_INVALID_CLASS:
raise WindowsError(e.winerror, 'WBEM_E_INVALID_CLASS <Invalid WMI class>')
if (e.winerror & 0xffffffff) in WBEMSTATUS.values:
raise WindowsError(e.winerror, WBEMSTATUS(e.winerror & 0xffffffff).value)
raise
procres = processor if count else None
if result != WBEM_S_FALSE:
yield procres, result
def _iwbemclassobject_to_dict(self, wbemclassobj, attrs):
if attrs == "*":
attrs = [x for x in self.get_names(wbemclassobj) if not x.startswith("__")]
obj_as_dict = {}
variant_res = windows.com.ImprovedVariant()
for name in attrs:
try:
wbemclassobj.Get(name, 0, ctypes.byref(variant_res), None, None)
except WindowsError as e:
if (e.winerror & 0xffffffff) == WBEM_E_NOT_FOUND:
raise WindowsError(e.winerror, 'WBEM_E_NOT_FOUND <Invalid Attribute "{0}">'.format(name))
if (e.winerror & 0xffffffff) in WBEMSTATUS.values:
raise WindowsError(e.winerror, WBEMSTATUS(e.winerror & 0xffffffff).value)
raise
try:
obj_as_dict[name] = variant_res.to_pyobject()
except NotImplementedError as e:
print("[WMI-ERROR] Field <{0}> ignored: {1}".format(name, e))
return obj_as_dict
def get_names(self, processor):
res = POINTER(SAFEARRAY)()
processor.GetNames(None, 0, None, byref(res))
safe_array = ctypes.cast(res, POINTER(windows.com.ImprovedSAFEARRAY))[0]
safe_array.elt_type = BSTR
return safe_array.to_list()
def __repr__(self):
return """<{0} namespace="{1}">""".format(type(self).__name__, self.namespace)
class WmiManager(dict):
"""The main WMI class exposed, used to list and access differents WMI namespace, can be used as a dict to access
:class:`WmiRequester` by namespace
Example:
>>> windows.system.wmi["root\\SecurityCenter2"]
<WmiRequester namespace="root\\SecurityCenter2">
"""
DEFAULT_NAMESPACE = "root\\cimv2" #: The default namespace for :func:`select` & :func:`query`
def __init__(self):
self.wmi_requester_by_namespace = {}
@property
def select(self):
r""":func:`WmiRequester.select` for default WMI namespace 'root\\cimv2'"""
return self[self.DEFAULT_NAMESPACE].select
@property
def query(self):
r""":func:`WmiRequester.query` for default WMI namespace 'root\\cimv2'"""
return self[self.DEFAULT_NAMESPACE].query
def get_subnamespaces(self, root="root"):
return [x["Name"] for x in self[root].select("__NameSpace", ["Name"])]
namespaces = property(get_subnamespaces)
"""The list of available WMI namespaces"""
def _open_wmi_requester(self, namespace):
return WmiRequester(namespace)
def __missing__(self, key):
self[key] = self._open_wmi_requester(key)
return self[key]
def __repr__(self):
return object.__repr__(self) |
marpie/PythonForWindows | windows/utils/__init__.py | <filename>windows/utils/__init__.py
from pythonutils import *
from winutils import *
|
marpie/PythonForWindows | windows/winobject/service.py | <gh_stars>1-10
import ctypes
import windows
from collections import namedtuple
from contextlib import contextmanager
from windows import utils
import windows.generated_def as gdef
from windows.generated_def import *
SERVICE_TYPE = gdef.FlagMapper(SERVICE_KERNEL_DRIVER, SERVICE_FILE_SYSTEM_DRIVER, SERVICE_WIN32_OWN_PROCESS, SERVICE_WIN32_SHARE_PROCESS, SERVICE_INTERACTIVE_PROCESS)
SERVICE_STATE = gdef.FlagMapper(SERVICE_STOPPED, SERVICE_START_PENDING, SERVICE_STOP_PENDING, SERVICE_RUNNING, SERVICE_CONTINUE_PENDING, SERVICE_PAUSE_PENDING, SERVICE_PAUSED)
SERVICE_CONTROLE_ACCEPTED = gdef.FlagMapper()
SERVICE_FLAGS = gdef.FlagMapper(SERVICE_RUNS_IN_SYSTEM_PROCESS)
ServiceStatus = namedtuple("ServiceStatus", ["type", "state", "control_accepted", "flags"])
"""
``type`` might be one of:
* ``SERVICE_KERNEL_DRIVER(0x1L)``
* ``SERVICE_FILE_SYSTEM_DRIVER(0x2L)``
* ``SERVICE_WIN32_OWN_PROCESS(0x10L)``
* ``SERVICE_WIN32_SHARE_PROCESS(0x20L)``
* ``SERVICE_INTERACTIVE_PROCESS(0x100L)``
``state`` might be one of:
* ``SERVICE_STOPPED(0x1L)``
* ``SERVICE_START_PENDING(0x2L)``
* ``SERVICE_STOP_PENDING(0x3L)``
* ``SERVICE_RUNNING(0x4L)``
* ``SERVICE_CONTINUE_PENDING(0x5L)``
* ``SERVICE_PAUSE_PENDING(0x6L)``
* ``SERVICE_PAUSED(0x7L)``
``flags`` might be one of:
* ``0``
* ``SERVICE_RUNS_IN_SYSTEM_PROCESS(0x1L)``
"""
class Service(object):
handle = None
def __repr__(self):
return '<{0} "{1}" {2}>'.format(type(self).__name__, self.name, self.status.state)
@utils.fixedpropety
def name(self):
"""The name of the service
:type: :class:`str`
"""
return self.lpServiceName
@utils.fixedpropety
def description(self):
"""The description of the service
:type: :class:`str`
"""
return self.lpDisplayName
@utils.fixedpropety
def status(self):
"""The status of the service
:type: :class:`ServiceStatus`
"""
status = self.ServiceStatusProcess
stype = SERVICE_TYPE[status.dwServiceType]
sstate = SERVICE_STATE[status.dwCurrentState]
scontrol = status.dwControlsAccepted
sflags = SERVICE_FLAGS[status.dwServiceFlags]
return ServiceStatus(stype, sstate, scontrol, sflags)
@utils.fixedpropety
def process(self):
"""The process running the service (if any)
:type: :class:`WinProcess <windows.winobject.process.WinProcess>` or ``None``
"""
pid = self.ServiceStatusProcess.dwProcessId
if not pid:
return None
l = windows.WinProcess(pid=pid)
return l
class ServiceA(Service, ENUM_SERVICE_STATUS_PROCESSA):
"""A Service object with ascii data"""
def start(self, args=None):
if args is not None:
raise NotImplementedError("Start service with args != None")
with scmanagera(SC_MANAGER_CONNECT) as scm:
# windows.winproxy.StartServiceA()
servh = windows.winproxy.OpenServiceA(scm, self.name, SERVICE_START)
windows.winproxy.StartServiceA(servh, 0, None)
windows.winproxy.CloseServiceHandle(servh)
@contextmanager
def scmanagera(access):
# scmanager = windows.winproxy.OpenSCManagerA(dwDesiredAccess=SC_MANAGER_ENUMERATE_SERVICE)
scmanager = windows.winproxy.OpenSCManagerA(dwDesiredAccess=access)
try:
yield scmanager
finally:
windows.winproxy.CloseServiceHandle(scmanager)
def enumerate_services():
with scmanagera(SC_MANAGER_ENUMERATE_SERVICE) as scm:
size_needed = DWORD()
nb_services = DWORD()
counter = DWORD()
try:
windows.winproxy.EnumServicesStatusExA(scm, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, None, 0, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)
except WindowsError:
pass
while True:
size = size_needed.value
buffer = (BYTE * size)()
try:
windows.winproxy.EnumServicesStatusExA(scm, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, buffer, size, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)
except WindowsError as e:
continue
return_type = (ServiceA * nb_services.value)
return list(return_type.from_buffer(buffer)) |
marpie/PythonForWindows | samples/com/icallinterceptor.py | import windows
import windows.generated_def as gdef
from windows import winproxy
# POC of ICallInterceptor
# Based on works by <NAME>
# http://blogs.microsoft.co.il/pavely/2018/02/28/intercepting-com-objects-with-cogetinterceptor/
windows.com.init()
# Create an interceptor for the firewall (INetFwPolicy2)
interceptor = gdef.ICallInterceptor()
winproxy.CoGetInterceptor(gdef.INetFwPolicy2.IID, None, interceptor.IID, interceptor)
# The PythonForWindows firewall object is a real/valid INetFwPolicy2
# used for demos of ICallFrameEvents.Invoke
real_firewall = windows.system.network.firewall
# Custom Python ICallFrameEvents implementation
class MySink(windows.com.COMImplementation):
IMPLEMENT = gdef.ICallFrameEvents
def OnCall(self, this, frame):
ifname = gdef.PWSTR()
methodname = gdef.PWSTR()
print("Hello from python sink !")
frame.GetNames(ifname, methodname)
print("Catching call to <{0}.{1}>".format(ifname.value, methodname.value))
param0info = gdef.CALLFRAMEPARAMINFO()
param0 = windows.com.ImprovedVariant()
frame.GetParamInfo(0, param0info)
frame.GetParam(0, param0)
print("Info about parameters 0:")
windows.utils.sprint(param0info, name=" * param0info")
print("param0 value = {0}".format(param0.aslong))
frame.Invoke(real_firewall)
frame.SetReturnValue(1234)
print("Leaving the sink !")
return 0
# Create and register our ICallFrameEvents sink
xsink = MySink()
interceptor.RegisterSink(xsink)
# Create the INetFwPolicy2 interceptor interface
fakefirewall = gdef.INetFwPolicy2()
interceptor.QueryInterface(fakefirewall.IID, fakefirewall)
# Calling one of the INetFwPolicy2 function for testing
# Testing on https://msdn.microsoft.com/en-us/library/windows/desktop/aa365316(v=vs.85).aspx
enabled = gdef.VARIANT_BOOL()
res = fakefirewall.get_FirewallEnabled(2, enabled)
print("return value = {0}".format(res))
print("firewall enabled = {0}".format(enabled))
# Test a function taking a POINTER(ICallFrameEvents) (PTR to interface)
print("Testing a function taking a PTR to a COM interface")
sink2 = gdef.ICallFrameEvents()
print("Before call: {0}".format((sink2, sink2.value)))
interceptor.GetRegisteredSink(sink2)
print("After call: {0}".format((sink2, sink2.value)))
# (cmd) python samples\com\icallinterceptor.py
# Hello from python sink !
# Catching call to <INetFwPolicy2.FirewallEnabled>
# Info about parameters 0:
# * param0info.fIn -> 0x1
# * param0info.fOut -> 0x0
# * param0info.stackOffset -> 0x4L
# * param0info.cbParam -> 0x4L
# param0 value = 2
# Leaving the sink !
# return value = 1234
# firewall enabled = VARIANT_BOOL(True)
# Testing a function taking a PTR to a COM interface
# Before call: (<ICallFrameEvents object at 0x066EF3F0>, None)
# After call: (<ICallFrameEvents object at 0x066EF3F0>, 107934504) |
marpie/PythonForWindows | tests/test_winproxy.py | <reponame>marpie/PythonForWindows<gh_stars>1-10
import pytest
import windows
import windows.generated_def as gdef
from pfwtest import *
pytestmark = pytest.mark.usefixtures('check_for_gc_garbage')
def test_createfileA_fail():
with pytest.raises(WindowsError) as ar:
windows.winproxy.CreateFileA("NONEXISTFILE.FILE") |
marpie/PythonForWindows | docs/generate_winproxy_list.py | import sys
import os.path
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "\\..")
print(sys.path[-1])
import pydoc
import re
import windows
winprox = windows.winproxy
all_in_module = [getattr(winprox, x) for x in dir(winprox)]
transp = [f for f in all_in_module if isinstance(f, winprox.TransparentApiProxy)]
functions = [f for f in all_in_module if hasattr(f, "prototype") and f not in transp]
print ("Transparent proxies:")
print("")
for f in transp:
print("* {0}({1})".format(f.target_func, ", ".join([x[1] for x in f.params])))
print ("Functions:")
print("")
for f in functions:
doc = pydoc.text.document(f)
doc = re.sub("\x08." , "", doc)
print("* {0}::\n\n {1}".format(f.func_name, doc))
#print("* {0}::\n\n {1}".format(f.func_name, pydoc.plain(pydoc.render_doc(f))))
|
marpie/PythonForWindows | samples/rpc/lsass.py | <filename>samples/rpc/lsass.py
import windows.rpc
from windows.rpc import ndr
# Ndr stuff
class NdrContext(ndr.NdrStructure):
MEMBERS = [ndr.NdrLong, ndr.NdrLong, ndr.NdrLong, ndr.NdrLong, ndr.NdrLong]
class PLSAPR_OBJECT_ATTRIBUTES(ndr.NdrStructure):
MEMBERS = [ndr.NdrLong,
ndr.NdrUniquePTR(ndr.NdrWString),
ndr.NdrUniquePTR(ndr.NdrLong), # We dont care if the subtype as we will pass None
ndr.NdrLong,
ndr.NdrUniquePTR(ndr.NdrLong), # We dont care if the subtype as we will pass None
ndr.NdrUniquePTR(ndr.NdrLong)] # We dont care if the subtype as we will pass None
class LsarOpenPolicy2Parameter(ndr.NdrParameters):
MEMBERS = [ndr.NdrUniquePTR(ndr.NdrWString),
PLSAPR_OBJECT_ATTRIBUTES,
ndr.NdrLong]
class LsarEnumeratePrivilegesParameter(ndr.NdrParameters):
MEMBERS = [NdrContext,
ndr.NdrLong,
ndr.NdrLong]
class LSAPR_POLICY_PRIVILEGE_DEF(object):
@classmethod
def unpack(cls, stream):
size1 = ndr.NdrShort.unpack(stream)
ptr = ndr.NdrShort.unpack(stream)
size2 = ndr.NdrLong.unpack(stream)
luid = ndr.NdrHyper.unpack(stream)
return ptr, luid
class LSAPR_PRIVILEGE_ENUM_BUFFER(object):
@classmethod
def unpack(cls, stream):
entries = ndr.NdrLong.unpack(stream)
array_size = ndr.NdrLong.unpack(stream)
array_ptr = ndr.NdrLong.unpack(stream)
# Unpack pointed array
array_size2 = ndr.NdrLong.unpack(stream)
assert array_size == array_size2
x = []
# unpack each elements LSAPR_POLICY_PRIVILEGE_DEF
for i in range(array_size2):
ptr, luid = LSAPR_POLICY_PRIVILEGE_DEF.unpack(stream)
if ptr:
x.append(luid)
# unpack pointed strings
result = []
for luid in x:
name = ndr.NdrWcharConformantVaryingArrays.unpack(stream)
result.append((luid, name))
return result
# Actual code
## LSASS alpc endpoints is fixed, no need for the epmapper
client = windows.rpc.RPCClient(r"\RPC Control\lsasspirpc")
## Bind to the desired interface
iid = client.bind('12345778-1234-abcd-ef00-0123456789ab', version=(0,0))
## Craft parameters and call 'LsarOpenPolicy2'
params = LsarOpenPolicy2Parameter.pack([None, (0, None, None, 0, None, None), 0x20000000])
res = client.call(iid, 44, params)
## Unpack the resulting handle
handle = NdrContext.unpack(ndr.NdrStream(res))
## Craft parameters and call 'LsarEnumeratePrivileges'
x = LsarEnumeratePrivilegesParameter.pack([handle, 0, 10000]);
res = client.call(iid, 2, x)
## Unpack the resulting 'LSAPR_PRIVILEGE_ENUM_BUFFER'
priviledges = LSAPR_PRIVILEGE_ENUM_BUFFER.unpack(ndr.NdrStream(res))
for priv in priviledges:
print priv |
marpie/PythonForWindows | tests/test_system.py | import pytest
import windows
from pfwtest import *
@check_for_gc_garbage
class TestSystemWithCheckGarbage(object):
def test_version(self):
return windows.system.version
def test_version_name(self):
return windows.system.version_name
def test_computer_name(self):
return windows.system.computer_name
def test_services(self):
return windows.system.services
def test_logicaldrives(self):
return windows.system.logicaldrives
def test_wmi(self):
# Well, pytest initialize COM with its own parameters
# It might make our own com.init() in WMI fail and therefore not call
# CoInitializeSecurity. But looks like pytest/default COM-security parameters
# does not allow to perform the request we want..
# So we try & do it ourself here.
# Do co-reinit in conftest.py ?
try:
if windows.com.init(): # if init fail. Call CoInitializeSecurity ourself
windows.com.initsecurity()
except Exception as e:
pass
return windows.system.wmi.select("Win32_Process", "*")
def test_handles(self):
return windows.system.handles
def test_handle_process(self):
handle_with_process = [h for h in windows.system.handles if h.dwProcessId]
handle = handle_with_process[-1]
proc = handle.process
assert proc.pid == handle.dwProcessId
def test_system_modules_ntosk(self):
assert windows.system.modules[0].name.endswith("ntoskrnl.exe")
@check_for_gc_garbage
class TestSystemWithCheckGarbageAndHandleLeak(object):
def test_threads(self):
return windows.system.threads
def test_processes(self):
procs = windows.system.processes
assert windows.current_process.pid in [p.pid for p in procs]
def test_system_modules(self):
return windows.system.modules |
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-hiero-export/hiero_customize_export_ui.py | # Copyright (c) 2018 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtGui
HookBaseClass = sgtk.get_hook_baseclass()
#class HieroCustomizeExportUI(HookBaseClass):
# """
# This class defines methods that can be used to customize the UI of the various
# Shotgun-related exporters. Each processor has its own set of create/get/set
# methods, allowing for customizable UI elements for each type of export.
# """
# # For detailed documentation of the methods available for this hook, see
# # the documentation at http://developer.shotgunsoftware.com/tk-hiero-export/
# pass
class HieroCustomizeExportUI(HookBaseClass):
def create_shot_processor_widget(self, parent_widget):
widget = QtGui.QGroupBox("Crater Properties", parent_widget)
widget.setLayout(QtGui.QFormLayout())
return widget
def get_shot_processor_ui_properties(self):
return [
dict(
label="Create Cut:",
name="custom_create_cut_bool_property",
value=True,
tooltip="Create a Cut and CutItems in Shotgun...",
),
dict(
label="Transcode:",
name="custom_transcode_bool_property",
value=True,
tooltip="Transcode and update shots",
),
]
def set_shot_processor_ui_properties(self, widget, properties):
layout = widget.layout()
for label, prop in properties.iteritems():
layout.addRow(label, prop)
# transcode wiget =======================================================
def create_transcode_exporter_widget(self, parent_widget):
widget = QtGui.QGroupBox("Crater Properties", parent_widget)
widget.setLayout(QtGui.QFormLayout())
return widget
def get_transcode_exporter_ui_properties(self):
return [
dict(
label="Custom two:",
name="custom_two",
value=True,
tooltip="Custom two tooltip",
)
]
def set_transcode_exporter_ui_properties(self, widget, properties):
layout = widget.layout()
for label, prop in properties.iteritems():
layout.addRow(label, prop)
# audio exporter wiget =======================================================
def create_audio_exporter_widget(self, parent_widget):
widget = QtGui.QGroupBox("Crater Properties", parent_widget)
widget.setLayout(QtGui.QFormLayout())
return widget
def get_audio_exporter_ui_properties(self):
return [
dict(
label="Custom three:",
name="custom_three",
value=True,
tooltip="Custom three tooltip",
)
]
def set_audio_exporter_ui_properties(self, widget, properties):
layout = widget.layout()
for label, prop in properties.iteritems():
layout.addRow(label, prop)
# nuke shot exporter wiget =======================================================
def create_nuke_shot_exporter_widget(self, parent_widget):
widget = QtGui.QGroupBox("Crater Properties", parent_widget)
widget.setLayout(QtGui.QFormLayout())
return widget
def get_nuke_shot_exporter_ui_properties(self):
return [
dict(
label="Custom four:",
name="custom_four",
value=True,
tooltip="Custom four tooltip",
)
]
def set_nuke_shot_exporter_ui_properties(self, widget, properties):
layout = widget.layout()
for label, prop in properties.iteritems():
layout.addRow(label, prop) |
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-multi-publish2/maya/basic/publish_session_geometry.py | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import pprint
import maya.cmds as cmds
import maya.mel as mel
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class MayaSessionGeometryPublishPlugin(HookBaseClass):
"""
Plugin for publishing an open maya session.
This hook relies on functionality found in the base file publisher hook in
the publish2 app and should inherit from it in the configuration. The hook
setting for this plugin should look something like this::
hook: "{self}/publish_file.py:{engine}/tk-multi-publish2/basic/publish_session.py"
"""
# NOTE: The plugin icon and name are defined by the base file plugin.
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
return """
<p>This plugin publishes session geometry for the current session. Any
session geometry will be exported to the path defined by this plugin's
configured "Publish Template" setting. The plugin will fail to validate
if the "AbcExport" plugin is not enabled or cannot be found.</p>
"""
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to receive
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
# inherit the settings from the base publish plugin
base_settings = super(MayaSessionGeometryPublishPlugin, self).settings or {}
# settings specific to this class
maya_publish_settings = {
"Publish Template": {
"type": "template",
"default": None,
"description": "Template path for published work files. Should"
"correspond to a template defined in "
"templates.yml.",
}
}
# update the base settings
base_settings.update(maya_publish_settings)
return base_settings
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["maya.session.geometry"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
accepted = True
publisher = self.parent
template_name = settings["Publish Template"].value
# ensure a work file template is available on the parent item
work_template = item.parent.properties.get("work_template")
if not work_template:
self.logger.debug(
"A work template is required for the session item in order to "
"publish session geometry. Not accepting session geom item."
)
accepted = False
# ensure the publish template is defined and valid and that we also have
publish_template = publisher.get_template_by_name(template_name)
if not publish_template:
self.logger.debug(
"The valid publish template could not be determined for the "
"session geometry item. Not accepting the item."
)
accepted = False
# we've validated the publish template. add it to the item properties
# for use in subsequent methods
item.properties["publish_template"] = publish_template
# check that the AbcExport command is available!
if not mel.eval("exists \"AbcExport\""):
self.logger.debug(
"Item not accepted because alembic export command 'AbcExport' "
"is not available. Perhaps the plugin is not enabled?"
)
accepted = False
# because a publish template is configured, disable context change. This
# is a temporary measure until the publisher handles context switching
# natively.
item.context_change_allowed = False
return {
"accepted": accepted,
"checked": False
}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish. Returns a
boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
path = _session_path()
# ---- ensure the session has been saved
if not path:
# the session still requires saving. provide a save button.
# validation fails.
error_msg = "The Maya session has not been saved."
self.logger.error(
error_msg,
extra=_get_save_as_action()
)
raise Exception(error_msg)
# get the normalized path
path = sgtk.util.ShotgunPath.normalize(path)
# check that there is still geometry in the scene:
if not cmds.ls(geometry=True, noIntermediate=True):
error_msg = (
"Validation failed because there is no geometry in the scene "
"to be exported. You can uncheck this plugin or create "
"geometry to export to avoid this error."
)
self.logger.error(error_msg)
raise Exception(error_msg)
# get the configured work file template
work_template = item.parent.properties.get("work_template")
publish_template = item.properties.get("publish_template")
# get the current scene path and extract fields from it using the work
# template:
work_fields = work_template.get_fields(path)
# ensure the fields work for the publish template
missing_keys = publish_template.missing_keys(work_fields)
if missing_keys:
error_msg = "Work file '%s' missing keys required for the " \
"publish template: %s" % (path, missing_keys)
self.logger.error(error_msg)
raise Exception(error_msg)
# create the publish path by applying the fields. store it in the item's
# properties. This is the path we'll create and then publish in the base
# publish plugin. Also set the publish_path to be explicit.
item.properties["path"] = publish_template.apply_fields(work_fields)
item.properties["publish_path"] = item.properties["path"]
# use the work file's version number when publishing
if "version" in work_fields:
item.properties["publish_version"] = work_fields["version"]
# run the base class validation
return super(MayaSessionGeometryPublishPlugin, self).validate(
settings, item)
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
# get the path to create and publish
publish_path = item.properties["path"]
# ensure the publish folder exists:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
# set the alembic args that make the most sense when working with Mari.
# These flags will ensure the export of an Alembic file that contains
# all visible geometry from the current scene together with UV's and
# face sets for use in Mari.
alembic_args = [
# only renderable objects (visible and not templated)
"-renderableOnly",
# write shading group set assignments (Maya 2015+)
"-writeFaceSets",
# write uv's (only the current uv set gets written)
"-uvWrite"
]
# find the animated frame range to use:
start_frame, end_frame = _find_scene_animation_range()
if start_frame and end_frame:
alembic_args.append("-fr %d %d" % (start_frame, end_frame))
# Set the output path:
# Note: The AbcExport command expects forward slashes!
alembic_args.append("-file %s" % publish_path.replace("\\", "/"))
# build the export command. Note, use AbcExport -help in Maya for
# more detailed Alembic export help
abc_export_cmd = ("AbcExport -j \"%s\"" % " ".join(alembic_args))
# ...and execute it:
try:
self.parent.log_debug("Executing command: %s" % abc_export_cmd)
mel.eval(abc_export_cmd)
except Exception, e:
self.logger.error("Failed to export Geometry: %s" % e)
return
# Now that the path has been generated, hand it off to the
super(MayaSessionGeometryPublishPlugin, self).publish(settings, item)
def _find_scene_animation_range():
"""
Find the animation range from the current scene.
"""
# look for any animation in the scene:
animation_curves = cmds.ls(typ="animCurve")
# if there aren't any animation curves then just return
# a single frame:
if not animation_curves:
return 1, 1
# something in the scene is animated so return the
# current timeline. This could be extended if needed
# to calculate the frame range of the animated curves.
start = int(cmds.playbackOptions(q=True, min=True))
end = int(cmds.playbackOptions(q=True, max=True))
return start, end
def _session_path():
"""
Return the path to the current session
:return:
"""
path = cmds.file(query=True, sn=True)
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
def _get_save_as_action():
"""
Simple helper for returning a log action dict for saving the session
"""
engine = sgtk.platform.current_engine()
# default save callback
callback = cmds.SaveScene
# if workfiles2 is configured, use that for file save
if "tk-multi-workfiles2" in engine.apps:
app = engine.apps["tk-multi-workfiles2"]
if hasattr(app, "show_file_save_dlg"):
callback = app.show_file_save_dlg
return {
"action_button": {
"label": "Save As...",
"tooltip": "Save the current session",
"callback": callback
}
}
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-hiero-export/hiero_get_quicktime_settings.py | <reponame>koaleksa/tk-config-alpha<gh_stars>1-10
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sys
from tank import Hook
class HieroGetQuicktimeSettings(Hook):
"""
This class defines a hook that allows for customization of encoding
settings for any Quicktimes written by the export process.
"""
def execute(self, for_shotgun, **kwargs):
"""
Gets encoding settings for Quicktimes generated by the export process.
:param bool for_shotgun: Whether the settings are being gathered for
Quicktime output intended for use within the Shotgun web app.
:returns: A tuple, where the first item is the file_type of a Nuke
write node, and the second item is a dictionary of knob names and
values.
:rtype: tuple
"""
import nuke
if sys.platform.startswith("linux") and nuke.NUKE_VERSION_MAJOR < 11:
file_type = "mov"
properties = {
"encoder": "mov64",
"format": "MOV format (mov)",
"bitrate": 2000000,
}
else:
file_type = "mov"
properties = {
"encoder": self.parent.get_default_encoder_name(),
"codec": "avc1\tH.264",
"quality": 3,
"settingsString": "H.264, High Quality",
"keyframerate": 1,
}
return (file_type, properties)
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-multi-launchapp/before_register_command.py | <reponame>koaleksa/tk-config-alpha
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class BeforeRegisterCommand(HookBaseClass):
"""
Before Register Command Hook
This hook is run prior to launchapp registering launcher commands with
the parent engine. Note: this hook is only run for Software entity
launchers.
"""
def determine_engine_instance_name(self, software_version, engine_instance_name):
"""
Hook method to intercept SoftwareLauncher and engine instance name data prior to
launcher command registration and alter the engine instance name should that
be required.
:param software_version: The software version instance constructed when
the scan software routine was run.
:type: :class:`sgtk.platform.SoftwareVersion`
:param str engine_instance_name: The name of the engine instance that will
be used when SGTK is bootstrapped during launch.
:returns: The desired engine instance name.
:rtype: str
"""
# We're going to end up getting a SoftwareVersion for Nuke Studio that
# wants to route us to the tk-nuke engine instance. We don't want that, so
# we'll redirect to tk-nukestudio.
if software_version.product == "NukeStudio":
engine_instance_name = "tk-nukestudio"
return engine_instance_name
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-hiero-export/hiero_upload_thumbnail.py | <reponame>koaleksa/tk-config-alpha
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import sys
import math
import shutil
import tempfile
import traceback
import time
from tank.platform.qt import QtCore
from tank import Hook
import tank.templatekey
class HieroUploadThumbnail(Hook):
"""
This class implements a hook that's responsible for uploading a thumbnail
to a given Shotgun entity for a given Hiero source item.
"""
def execute(self, entity, source, item, **kwargs):
"""
Uploads a thumbnail to the given entity in Shotgun.
:param dict entity: The entity dictionary that will receive the new
thumbnail image.
:param source: The Hiero source sequence object being exported.
:param item: The Hiero task item being processed.
:param task: The Hiero task being processed.
"""
thumbdir = tempfile.mkdtemp(prefix='hiero_process_shot')
try:
path = "%s.png" % os.path.join(thumbdir, source.name())
task = kwargs.get('task', None)
if item is None:
# No timeline info, use the poster frame of the source item
frame = source.posterFrame()
thumb_qimage = source.thumbnail(frame)
else:
if (task is not None) and task.isCollated():
# collated shot, use middle frame from task sequence (all collated items)
max_frame = 0
min_frame = sys.maxint
for track in task._sequence.videoTracks():
for i in track.items():
min_frame = min(i.timelineIn(), min_frame)
max_frame = max(i.timelineOut(), max_frame)
frame = int(math.ceil((min_frame + max_frame)/2.0))
thumb_qimage = task._sequence.thumbnail(frame)
else:
# Simple item, just use middle frame
frame = int(math.ceil((item.sourceIn() + item.sourceOut())/2.0))
thumb_qimage = source.thumbnail(frame)
# scale it down to 600px wide
thumb_qimage_scaled = thumb_qimage.scaledToWidth(600, QtCore.Qt.SmoothTransformation)
# scale thumbnail here...
thumb_qimage_scaled.save(path)
self.parent.log_debug("Uploading thumbnail for %s %s..." % (entity['type'], entity['id']))
self.parent.shotgun.upload_thumbnail(entity['type'], entity['id'], path)
except:
self.parent.log_info("Thumbnail for %s was not refreshed in Shotgun." % source)
tb = traceback.format_exc()
self.parent.log_debug(tb)
finally:
# Sometimes Windows holds on to the temporary thumbnail file longer than expected which
# can cause an exception here. If we wait a second and try again, this usually solves
# the issue.
try:
shutil.rmtree(thumbdir)
except Exception:
self.parent.log_error("Error removing temporary thumbnail file, trying again.")
time.sleep(1.0)
shutil.rmtree(thumbdir)
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-multi-publish2/collector.py | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import mimetypes
import os
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
# This is a dictionary of file type info that allows the basic collector to
# identify common production file types and associate them with a display name,
# item type, and config icon.
COMMON_FILE_INFO = {
"Alembic Cache": {
"extensions": ["abc"],
"icon": "alembic.png",
"item_type": "file.alembic",
},
"3dsmax Scene": {
"extensions": ["max"],
"icon": "3dsmax.png",
"item_type": "file.3dsmax",
},
"Hiero Project": {
"extensions": ["hrox"],
"icon": "hiero.png",
"item_type": "file.hiero",
},
"Houdini Scene": {
"extensions": ["hip", "hipnc"],
"icon": "houdini.png",
"item_type": "file.houdini",
},
"Maya Scene": {
"extensions": ["ma", "mb"],
"icon": "maya.png",
"item_type": "file.maya",
},
"Motion Builder FBX": {
"extensions": ["fbx"],
"icon": "motionbuilder.png",
"item_type": "file.motionbuilder",
},
"Nuke Script": {
"extensions": ["nk"],
"icon": "nuke.png",
"item_type": "file.nuke",
},
"Photoshop Image": {
"extensions": ["psd", "psb"],
"icon": "photoshop.png",
"item_type": "file.photoshop",
},
"Rendered Image": {
"extensions": ["dpx", "exr"],
"icon": "image_sequence.png",
"item_type": "file.image",
},
"Texture Image": {
"extensions": ["tif", "tiff", "tx", "tga", "dds", "rat"],
"icon": "texture.png",
"item_type": "file.texture",
},
}
class BasicSceneCollector(HookBaseClass):
"""
A basic collector that handles files and general objects.
This collector hook is used to collect individual files that are browsed or
dragged and dropped into the Publish2 UI. It can also be subclassed by other
collectors responsible for creating items for a file to be published such as
the current Maya session file.
This plugin centralizes the logic for collecting a file, including
determining how to display the file for publishing (based on the file
extension).
In addition to creating an item to publish, this hook will set the following
properties on the item::
path - The path to the file to publish. This could be a path
representing a sequence of files (including a frame specifier).
sequence_paths - If the item represents a collection of files, the
plugin will populate this property with a list of files matching
"path".
"""
@property
def settings(self):
"""
Dictionary defining the settings that this collector expects to receive
through the settings parameter in the process_current_session and
process_file methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
return {}
def process_current_session(self, settings, parent_item):
"""
Analyzes the current scene open in a DCC and parents a subtree of items
under the parent_item passed in.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
"""
# default implementation does not do anything
pass
def process_file(self, settings, parent_item, path):
"""
Analyzes the given file and creates one or more items
to represent it.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
:param path: Path to analyze
:returns: The main item that was created, or None if no item was created
for the supplied path
"""
# handle files and folders differently
if os.path.isdir(path):
self._collect_folder(parent_item, path)
return None
else:
return self._collect_file(parent_item, path)
def _collect_file(self, parent_item, path, frame_sequence=False):
"""
Process the supplied file path.
:param parent_item: parent item instance
:param path: Path to analyze
:param frame_sequence: Treat the path as a part of a sequence
:returns: The item that was created
"""
# make sure the path is normalized. no trailing separator, separators
# are appropriate for the current os, no double separators, etc.
path = sgtk.util.ShotgunPath.normalize(path)
publisher = self.parent
# get info for the extension
item_info = self._get_item_info(path)
item_type = item_info["item_type"]
type_display = item_info["type_display"]
evaluated_path = path
is_sequence = False
if frame_sequence:
# replace the frame number with frame spec
seq_path = publisher.util.get_frame_sequence_path(path)
if seq_path:
evaluated_path = seq_path
type_display = "%s Sequence" % (type_display,)
item_type = "%s.%s" % (item_type, "sequence")
is_sequence = True
# Crater addition =================================================================
#display_name = publisher.util.get_publish_name(path, sequence=is_sequence)
display_name = publisher.util.get_file_path_components(path)['filename']
#==================================================================================
# create and populate the item
file_item = parent_item.create_item(
item_type, type_display, display_name)
file_item.set_icon_from_path(item_info["icon_path"])
# if the supplied path is an image, use the path as # the thumbnail.
if (item_type.startswith("file.image") or
item_type.startswith("file.texture")):
file_item.set_thumbnail_from_path(path)
# disable thumbnail creation since we get it for free
file_item.thumbnail_enabled = False
# all we know about the file is its path. set the path in its
# properties for the plugins to use for processing.
file_item.properties["path"] = evaluated_path
if is_sequence:
# include an indicator that this is an image sequence and the known
# file that belongs to this sequence
file_item.properties["sequence_paths"] = [path]
self.logger.info("Collected file: %s" % (evaluated_path,))
return file_item
def _collect_folder(self, parent_item, folder, is_playblast = False):
# Crater addition ================================================
# added playblast argument
#=================================================================
"""
Process the supplied folder path.
:param parent_item: parent item instance
:param folder: Path to analyze
:returns: The item that was created
"""
# make sure the path is normalized. no trailing separator, separators
# are appropriate for the current os, no double separators, etc.
folder = sgtk.util.ShotgunPath.normalize(folder)
publisher = self.parent
img_sequences = publisher.util.get_frame_sequences( folder, IMAGE_EXTENSIONS_LIST)
file_items = []
for (image_seq_path, img_seq_files) in img_sequences:
# get info for the extension
item_info = self._get_item_info(image_seq_path)
item_type = item_info["item_type"]
type_display = item_info["type_display"]
# the supplied image path is part of a sequence. alter the
# type info to account for this.
type_display = "%s Sequence" % (type_display,)
item_type = "%s.%s" % (item_type, "sequence")
icon_name = "image_sequence.png"
# get the first frame of the sequence. we'll use this for the
# thumbnail and to generate the display name
img_seq_files.sort()
first_frame_file = img_seq_files[0]
display_name = publisher.util.get_publish_name( first_frame_file, sequence=True )
# Crater addition ==============================================================================
# add playblast to file name
if is_playblast:
display_name = display_name + (' (playblast)')
#================================================================================================
# create and populate the item
file_item = parent_item.create_item(
item_type,
type_display,
display_name
)
icon_path = self._get_icon_path(icon_name)
file_item.set_icon_from_path(icon_path)
# use the first frame of the seq as the thumbnail
file_item.set_thumbnail_from_path(first_frame_file)
# disable thumbnail creation since we get it for free
file_item.thumbnail_enabled = False
# all we know about the file is its path. set the path in its
# properties for the plugins to use for processing.
file_item.properties["path"] = image_seq_path
file_item.properties["sequence_paths"] = img_seq_files
self.logger.info("Collected file: %s" % (image_seq_path,))
file_items.append(file_item)
if not file_items:
self.logger.warn("No image sequences found in: %s" % (folder,))
return file_items
def _get_item_info(self, path):
"""
Return a tuple of display name, item type, and icon path for the given
filename.
The method will try to identify the file as a common file type. If not,
it will use the mimetype category. If the file still cannot be
identified, it will fallback to a generic file type.
:param path: The file path to identify type info for
:return: A dictionary of information about the item to create::
# path = "/path/to/some/file.0001.exr"
{
"item_type": "file.image.sequence",
"type_display": "Rendered Image Sequence",
"icon_path": "/path/to/some/icons/folder/image_sequence.png",
"path": "/path/to/some/file.%04d.exr"
}
The item type will be of the form `file.<type>` where type is a specific
common type or a generic classification of the file.
"""
publisher = self.parent
# extract the components of the supplied path
file_info = publisher.util.get_file_path_components(path)
extension = file_info["extension"]
filename = file_info["filename"]
# default values used if no specific type can be determined
type_display = "File"
item_type = "file.unknown"
icon_name = "file.png"
# keep track if a common type was identified for the extension
common_type_found = False
# look for the extension in the common file type info dict
for display in COMMON_FILE_INFO:
type_info = COMMON_FILE_INFO[display]
if extension in type_info["extensions"]:
# found the extension in the common types lookup. extract the
# item type, icon name.
type_display = display
item_type = type_info["item_type"]
icon_name = type_info["icon"]
common_type_found = True
break
if not common_type_found:
# no common type match. try to use the mimetype category. this will
# be a value like "image/jpeg" or "video/mp4". we'll extract the
# portion before the "/" and use that for display.
(category_type, _) = mimetypes.guess_type(filename)
if category_type:
# mimetypes.guess_type can return unicode strings depending on
# the system's default encoding. If a unicode string is
# returned, we simply ensure it's utf-8 encoded to avoid issues
# with toolkit, which expects utf-8
if isinstance(category_type, unicode):
category_type = category_type.encode("utf-8")
# the category portion of the mimetype
category = category_type.split("/")[0]
type_display = "%s File" % (category.title(),)
item_type = "file.%s" % (category,)
icon_name = "%s.png" % (category,)
# construct a full path to the icon given the name defined above
icon_path = self._get_icon_path(icon_name)
# everything should be populated. return the dictionary
return dict(
item_type=item_type,
type_display=type_display,
icon_path=icon_path,
)
def _get_icon_path(self, icon_name):
"""
Helper to get the full path to an icon from the app's resources folder.
If the supplied icon_name doesn't exist there, fall back to the file.png
icon.
"""
icon_path = os.path.join(
self.disk_location,
"icons",
icon_name
)
# supplied file name doesn't exist. return the default file.png image
if not os.path.exists(icon_path):
icon_path = os.path.join(
self.disk_location,
"icons",
"file.png"
)
return icon_path
def _build_image_extensions_list():
image_file_types = ["Photoshop Image", "Rendered Image", "Texture Image"]
image_extensions = set()
for image_file_type in image_file_types:
image_extensions.update(COMMON_FILE_INFO[image_file_type]["extensions"])
# get all the image mime type image extensions as well
mimetypes.init()
types_map = mimetypes.types_map
for (ext, mimetype) in types_map.iteritems():
if mimetype.startswith("image/"):
image_extensions.add(ext.lstrip("."))
return list(image_extensions)
IMAGE_EXTENSIONS_LIST = _build_image_extensions_list()
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-hiero-export/hiero_update_version_data.py | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from tank import Hook
class HieroUpdateVersionData(Hook):
"""
This class implements a hook that can be used to customize the data
dictionary for a Version entity that is going to be created by the
export process.
"""
def execute(self, version_data, task, **kwargs):
"""
Updates the version_data dictionary to change the data for the Version
that will be created in Shotgun. Updating the given version_data
dictionary in place will ensure your customizations are used when
creating the new Version entity.
:param dict version_data: The data dictionary that will be used by
the export process to create a new Version entity in Shotgun.
:param task: The Hiero export task being processed.
"""
pass
|
koaleksa/tk-config-alpha | tk-config-alpha/config/hooks/tk-multi-publish2/publish_file.py | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import pprint
import traceback
import sgtk
from sgtk.util.filesystem import copy_file, ensure_folder_exists
HookBaseClass = sgtk.get_hook_baseclass()
class BasicFilePublishPlugin(HookBaseClass):
"""
Plugin for creating generic publishes in Shotgun.
This plugin is typically configured to act upon files that are dragged and
dropped into the publisher UI. It can also be used as a base class for
other file-based publish plugins as it contains standard operations for
validating and registering publishes with Shotgun.
Once attached to a publish item, the plugin will key off of properties that
are set on the item. These properties can be set via the collector or
by subclasses prior to calling methods on this class.
The only property that is required for the plugin to operate is the ``path``
property. All of the properties understood by the plugin are documented
below::
Path properties
-------------
path - The path to the file to be published.
sequence_paths - If set, implies the "path" property represents a
sequence of files (typically using a frame identifier such as %04d).
This property should be a list of files on disk matching the "path".
If a work template is provided, and corresponds to the listed
frames, fields will be extracted and applied to the publish template
(if set) and copied to that publish location.
Template properties
-------------------
work_template - If set in the item properties dictionary, is used
to validate "path" and extract fields for further processing and
contextual discovery. For example, if configured and a version key
can be extracted, it will be used as the publish version to be
registered in Shotgun.
publish_template - If set in the item properties dictionary, used to
determine where "path" should be copied prior to publishing. If
not specified, "path" will be published in place.
Publish properties
------------------
publish_type - If set in the item properties dictionary, will be
supplied to SG as the publish type when registering "path" as a new
publish. If not set, will be determined via the plugin's "File Type"
setting.
publish_path - If set in the item properties dictionary, will be
supplied to SG as the publish path when registering the new publish.
If not set, will be determined by the "published_file" property if
available, falling back to publishing "path" in place.
publish_name - If set in the item properties dictionary, will be
supplied to SG as the publish name when registering the new publish.
If not available, will be determined by the "work_template"
property if available, falling back to the ``path_info`` hook
logic.
publish_version - If set in the item properties dictionary, will be
supplied to SG as the publish version when registering the new
publish. If not available, will be determined by the
"work_template" property if available, falling back to the
``path_info`` hook logic.
publish_dependencies - A list of files to include as dependencies when
registering the publish. If the item's parent has been published,
it's path will be appended to this list.
This plugin will also set the properties on the item which may be useful for
child items.
sg_publish_data - The dictionary of publish information returned from
the tk-core register_publish method.
"""
@property
def icon(self):
"""
Path to an png icon on disk
"""
# look for icon one level up from this hook's folder in "icons" folder
return os.path.join(
self.disk_location,
"icons",
"publish.png"
)
@property
def name(self):
"""
One line display name describing the plugin
"""
return "Publish to Shotgun"
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
loader_url = "https://support.shotgunsoftware.com/hc/en-us/articles/219033078"
return """
Publishes the file to Shotgun. A <b>Publish</b> entry will be
created in Shotgun which will include a reference to the file's current
path on disk. Other users will be able to access the published file via
the <b><a href='%s'>Loader</a></b> so long as they have access to
the file's location on disk.
<h3>File versioning</h3>
The <code>version</code> field of the resulting <b>Publish</b> in
Shotgun will also reflect the version number identified in the filename.
The basic worklfow recognizes the following version formats by default:
<ul>
<li><code>filename.v###.ext</code></li>
<li><code>filename_v###.ext</code></li>
<li><code>filename-v###.ext</code></li>
</ul>
<br><br><i>NOTE: any amount of version number padding is supported.</i>
<h3>Overwriting an existing publish</h3>
A file can be published multiple times however only the most recent
publish will be available to other users. Warnings will be provided
during validation if there are previous publishes.
""" % (loader_url,)
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to recieve
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts
as part of its environment configuration.
"""
return {
"File Types": {
"type": "list",
"default": [
["Alembic Cache", "abc"],
["3dsmax Scene", "max"],
["NukeStudio Project", "hrox"],
["Houdini Scene", "hip", "hipnc"],
["Maya Scene", "ma", "mb"],
["Motion Builder FBX", "fbx"],
["Nuke Script", "nk"],
["Photoshop Image", "psd", "psb"],
["Rendered Image", "dpx", "exr"],
["Texture", "tiff", "tx", "tga", "dds"],
["Image", "jpeg", "jpg", "png"],
["Movie", "mov", "mp4"],
],
"description": (
"List of file types to include. Each entry in the list "
"is a list in which the first entry is the Shotgun "
"published file type and subsequent entries are file "
"extensions that should be associated."
)
},
}
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["file.*"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
path = item.properties["path"]
# log the accepted file and display a button to reveal it in the fs
self.logger.info(
"File publisher plugin accepted: %s" % (path,),
extra={
"action_show_folder": {
"path": path
}
}
)
# return the accepted info
return {"accepted": True,
"checked": False}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish.
Returns a boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
publisher = self.parent
path = item.properties.get("path")
# ---- determine the information required to validate
# We allow the information to be pre-populated by the collector or a
# base class plugin. They may have more information than is available
# here such as custom type or template settings.
publish_path = item.properties.get("publish_path") or \
self._get_publish_path(settings, item)
publish_name = item.properties.get("publish_name") or \
self._get_publish_name(settings, item)
# ---- check for conflicting publishes of this path with a status
# Note the name, context, and path *must* match the values supplied to
# register_publish in the publish phase in order for this to return an
# accurate list of previous publishes of this file.
publishes = publisher.util.get_conflicting_publishes(
item.context,
publish_path,
publish_name,
filters=["sg_status_list", "is_not", None]
)
if publishes:
self.logger.debug(
"Conflicting publishes: %s" % (pprint.pformat(publishes),))
if ("work_template" in item.properties or
"publish_template" in item.properties):
# templates are in play and there is already a publish in SG
# for this file path. We will raise here to prevent this from
# happening.
error_msg = (
"Can not validate file path. There is already a publish in "
"Shotgun that matches this path. Please uncheck this "
"plugin or save the file to a different path."
)
self.logger.error(error_msg)
raise Exception(error_msg)
else:
conflict_info = (
"If you continue, these conflicting publishes will no "
"longer be available to other users via the loader:<br>"
"<pre>%s</pre>" % (pprint.pformat(publishes),)
)
self.logger.warn(
"Found %s conflicting publishes in Shotgun" %
(len(publishes),),
extra={
"action_show_more_info": {
"label": "Show Conflicts",
"tooltip": "Show conflicting publishes in Shotgun",
"text": conflict_info
}
}
)
self.logger.info("A Publish will be created in Shotgun and linked to:")
self.logger.info(" %s" % (path,))
return True
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
# ---- determine the information required to publish
# We allow the information to be pre-populated by the collector or a
# base class plugin. They may have more information than is available
# here such as custom type or template settings.
publish_type = item.properties.get("publish_type") or \
self._get_publish_type(settings, item)
publish_name = item.properties.get("publish_name") or \
self._get_publish_name(settings, item)
publish_version = item.properties.get("publish_version") or \
self._get_publish_version(settings, item)
publish_path = item.properties.get("publish_path") or \
self._get_publish_path(settings, item)
# if the parent item has a publish path, include it in the list of
# dependencies
dependency_paths = item.properties.get("publish_dependencies", [])
if "sg_publish_path" in item.parent.properties:
dependency_paths.append(item.parent.properties["sg_publish_path"])
# handle copying of work to publish if templates are in play
self._copy_work_to_publish(settings, item)
# arguments for publish registration
self.logger.info("Registering publish...")
publish_data= {
"tk": publisher.sgtk,
"context": item.context,
"comment": item.description,
"path": publish_path,
"name": publish_name,
"version_number": publish_version,
"thumbnail_path": item.get_thumbnail_as_path(),
"published_file_type": publish_type,
"dependency_paths": dependency_paths
}
# log the publish data for debugging
self.logger.debug(
"Populated Publish data...",
extra={
"action_show_more_info": {
"label": "Publish Data",
"tooltip": "Show the complete Publish data dictionary",
"text": "<pre>%s</pre>" % (pprint.pformat(publish_data),)
}
}
)
# create the publish and stash it in the item properties for other
# plugins to use.
item.properties["sg_publish_data"] = sgtk.util.register_publish(
**publish_data)
self.logger.info("Publish registered!")
def finalize(self, settings, item):
"""
Execute the finalization pass. This pass executes once
all the publish tasks have completed, and can for example
be used to version up files.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
# get the data for the publish that was just created in SG
publish_data = item.properties["sg_publish_data"]
# ensure conflicting publishes have their status cleared
publisher.util.clear_status_for_conflicting_publishes(
item.context, publish_data)
self.logger.info(
"Cleared the status of all previous, conflicting publishes")
path = item.properties["path"]
self.logger.info(
"Publish created for file: %s" % (path,),
extra={
"action_show_in_shotgun": {
"label": "Show Publish",
"tooltip": "Open the Publish in Shotgun.",
"entity": publish_data
}
}
)
############################################################################
# protected methods
def _copy_work_to_publish(self, settings, item):
"""
This method handles copying work file path(s) to a designated publish
location.
This method requires a "work_template" and a "publish_template" be set
on the supplied item.
The method will handle copying the "path" property to the corresponding
publish location assuming the path corresponds to the "work_template"
and the fields extracted from the "work_template" are sufficient to
satisfy the "publish_template".
The method will not attempt to copy files if any of the above
requirements are not met. If the requirements are met, the file will
ensure the publish path folder exists and then copy the file to that
location.
If the item has "sequence_paths" set, it will attempt to copy all paths
assuming they meet the required criteria with respect to the templates.
"""
# ---- ensure templates are available
work_template = item.properties.get("work_template")
if not work_template:
self.logger.debug(
"No work template set on the item. "
"Skipping copy file to publish location."
)
return
publish_template = item.properties.get("publish_template")
if not publish_template:
self.logger.debug(
"No publish template set on the item. "
"Skipping copying file to publish location."
)
return
# ---- get a list of files to be copied
# by default, the path that was collected for publishing
work_files = [item.properties["path"]]
# if this is a sequence, get the attached files
if "sequence_paths" in item.properties:
work_files = item.properties.get("sequence_paths", [])
if not work_files:
self.logger.warning(
"Sequence publish without a list of files. Publishing "
"the sequence path in place: %s" % (item.properties["path"])
)
return
# ---- copy the work files to the publish location
for work_file in work_files:
if not work_template.validate(work_file):
self.logger.warning(
"Work file '%s' did not match work template '%s'. "
"Publishing in place." % (work_file, work_template)
)
return
work_fields = work_template.get_fields(work_file)
missing_keys = publish_template.missing_keys(work_fields)
if missing_keys:
self.logger.warning(
"Work file '%s' missing keys required for the publish "
"template: %s" % (work_file, missing_keys)
)
return
publish_file = publish_template.apply_fields(work_fields)
# copy the file
try:
publish_folder = os.path.dirname(publish_file)
ensure_folder_exists(publish_folder)
copy_file(work_file, publish_file)
except Exception, e:
raise Exception(
"Failed to copy work file from '%s' to '%s'.\n%s" %
(work_file, publish_file, traceback.format_exc())
)
self.logger.debug(
"Copied work file '%s' to publish file '%s'." %
(work_file, publish_file)
)
def _get_publish_type(self, settings, item):
"""
Get a publish type for the supplied settings and item.
:param settings: The publish settings defining the publish types
:param item: The item to determine the publish type for
:return: A publish type or None if one could not be found.
"""
publisher = self.parent
path = item.properties["path"]
# get the publish path components
path_info = publisher.util.get_file_path_components(path)
# determine the publish type
extension = path_info["extension"]
# ensure lowercase and no dot
if extension:
extension = extension.lstrip(".").lower()
for type_def in settings["File Types"].value:
publish_type = type_def[0]
file_extensions = type_def[1:]
if extension in file_extensions:
# found a matching type in settings. use it!
return publish_type
# --- no pre-defined publish type found...
if extension:
# publish type is based on extension
publish_type = "%s File" % extension.capitalize()
else:
# no extension, assume it is a folder
publish_type = "Folder"
return publish_type
def _get_publish_path(self, settings, item):
"""
Get a publish path for the supplied settings and item.
:param settings: The publish settings defining the publish types
:param item: The item to determine the publish type for
:return: A string representing the output path to supply when
registering a publish for the supplied item
Extracts the publish path via the configured work and publish templates
if possible.
"""
path = item.properties["path"]
work_template = item.properties.get("work_template")
publish_template = item.properties.get("publish_template")
work_fields = []
publish_path = None
# We need both work and publish template to be defined for template support to be enabled.
if work_template and publish_template:
if work_template.validate(path):
work_fields = work_template.get_fields(path)
missing_keys = publish_template.missing_keys(work_fields)
if missing_keys:
self.logger.warning(
"Not enough keys to apply work fields (%s) to "
"publish template (%s)" % (work_fields, publish_template))
else:
publish_path = publish_template.apply_fields(work_fields)
self.logger.debug(
"Used publish template to determine the publish path: %s" %
(publish_path,)
)
else:
self.logger.debug("publish_template: %s" % publish_template)
self.logger.debug("work_template: %s" % work_template)
if not publish_path:
publish_path = path
self.logger.debug(
"Could not validate a publish template. Publishing in place.")
return publish_path
def _get_publish_version(self, settings, item):
"""
Get the publish version for the supplied settings and item.
:param settings: The publish settings defining the publish types
:param item: The item to determine the publish version for
Extracts the publish version via the configured work template if
possible. Will fall back to using the path info hook.
"""
publisher = self.parent
path = item.properties["path"]
work_template = item.properties.get("work_template")
work_fields = None
publish_version = None
if work_template:
if work_template.validate(path):
self.logger.debug(
"Work file template configured and matches file.")
work_fields = work_template.get_fields(path)
if work_fields:
# if version number is one of the fields, use it to populate the
# publish information
if "version" in work_fields:
publish_version = work_fields.get("version")
self.logger.debug(
"Retrieved version number via work file template.")
else:
self.logger.debug("Using path info hook to determine publish version.")
publish_version = publisher.util.get_version_number(path)
if publish_version is None:
publish_version = 1
return publish_version
def _get_publish_name(self, settings, item):
"""
Get the publish name for the supplied settings and item.
:param settings: The publish settings defining the publish types
:param item: The item to determine the publish version for
Uses the path info hook to retrieve the publish name.
"""
publisher = self.parent
path = item.properties["path"]
if "sequence_paths" in item.properties:
# generate the name from one of the actual files in the sequence
name_path = item.properties["sequence_paths"][0]
is_sequence = True
else:
name_path = path
is_sequence = False
return publisher.util.get_publish_name(
name_path,
sequence=is_sequence
)
def _get_next_version_info(self, path, item):
"""
Return the next version of the supplied path.
If templates are configured, use template logic. Otherwise, fall back to
the zero configuration, path_info hook logic.
:param str path: A path with a version number.
:param item: The current item being published
:return: A tuple of the form::
# the first item is the supplied path with the version bumped by 1
# the second item is the new version number
(next_version_path, version)
"""
if not path:
self.logger.debug("Path is None. Can not determine version info.")
return None, None
publisher = self.parent
# if the item has a known work file template, see if the path
# matches. if not, warn the user and provide a way to save the file to
# a different path
work_template = item.properties.get("work_template")
work_fields = None
if work_template:
if work_template.validate(path):
work_fields = work_template.get_fields(path)
# if we have template and fields, use them to determine the version info
if work_fields and "version" in work_fields:
# template matched. bump version number and re-apply to the template
work_fields["version"] += 1
next_version_path = work_template.apply_fields(work_fields)
version = work_fields["version"]
# fall back to the "zero config" logic
else:
next_version_path = publisher.util.get_next_version_path(path)
cur_version = publisher.util.get_version_number(path)
if cur_version is not None:
version = cur_version + 1
else:
version = None
return next_version_path, version
def _save_to_next_version(self, path, item, save_callback):
"""
Save the supplied path to the next version on disk.
:param path: The current path with a version number
:param item: The current item being published
:param save_callback: A callback to use to save the file
Relies on the _get_next_version_info() method to retrieve the next
available version on disk. If a version can not be detected in the path,
the method does nothing.
If the next version path already exists, logs a warning and does
nothing.
This method is typically used by subclasses that bump the current
working/session file after publishing.
"""
(next_version_path, version) = self._get_next_version_info(path, item)
if version is None:
self.logger.debug(
"No version number detected in the publish path. "
"Skipping the bump file version step."
)
return None
self.logger.info("Incrementing file version number...")
# nothing to do if the next version path can't be determined or if it
# already exists.
if not next_version_path:
self.logger.warning("Could not determine the next version path.")
return None
elif os.path.exists(next_version_path):
self.logger.warning(
"The next version of the path already exists",
extra={
"action_show_folder": {
"path": next_version_path
}
}
)
return None
# save the file to the new path
save_callback(next_version_path)
self.logger.info("File saved as: %s" % (next_version_path,))
return next_version_path
|
dotJobs/ghost-client | tests/test_posts.py | <gh_stars>10-100
import math
try:
from .unittest_helper import GhostTestCase, GhostException
except:
from unittest_helper import GhostTestCase, GhostException
class PostTests(GhostTestCase):
def _setup_client(self):
return self.new_logged_in_client()
def test_create_post(self):
post = self.create_post(title='Sample Post Title')
self.assertIn({'id': post.id}, self.ghost.posts.list(fields='id', status='all'))
def test_edit_post(self):
post = self.create_post(title='Sample Post Update')
updated = self.ghost.posts.update(post.id, slug='update-sample',
markdown='Intro\n\n## Section\n\nBody content')
self.assertEqual(updated.id, post.id)
self.assertIn({'id': post.id}, self.ghost.posts.list(fields='id', status='all'))
posts = self.ghost.posts.list(filter='id:%s' % post.id,
status='all')
self.assertEqual(len(posts), 1)
stored = posts[0]
for text in ('Intro', 'Section', 'Body content'):
self.assertIn(text, stored.html)
def test_delete_post(self):
post = self.ghost.posts.create(title='Sample Post to Delete')
self.assertIn({'id': post.id}, self.ghost.posts.list(fields='id', status='all'))
self.ghost.posts.delete(post.id)
self.assertNotIn({'id': post.id}, self.ghost.posts.list(fields='id', status='all'))
def test_publish(self):
post = self.create_post(title='Sample to publish')
posts = self.ghost.posts.list(fields='id')
self.assertNotIn({'id': post.id}, posts)
post.slug = 'publish-test'
post.markdown = 'Content'
self.ghost.posts.update(**post)
self.ghost.posts.update(post.id, status='published')
posts = self.ghost.posts.list(
filter='id:%s' % post.id,
fields=('id', 'title', 'slug', 'html')
)
self.assertEqual(len(posts), 1)
stored = posts[0]
self.assertEqual(stored.id, post.id)
self.assertEqual(stored.title, post.title)
self.assertEqual(stored.slug, post.slug)
self.assertIn('<p>Content</p>', stored.html)
def test_get_post(self):
post = self.create_post(title='For testing', status='published')
by_id = self.ghost.posts.get(post.id)
self.assertEqual(post.id, by_id.id)
by_id = self.ghost.posts.get(id=post.id)
self.assertEqual(post.id, by_id.id)
by_slug = self.ghost.posts.get(slug=post.slug)
self.assertEqual(post.id, by_slug.id)
def test_markdown(self):
self.create_post(title='Sample for Markdown', markdown='Line 1\n\n## Section 2\n\nBody 3')
posts = self.ghost.posts.list(formats='mobiledoc', status='all')
self.assertTrue(any(
post.title == 'Sample for Markdown' and
post.markdown == 'Line 1\n\n## Section 2\n\nBody 3'
for post in posts
))
def test_list_without_login(self):
self.create_post(title='Test Post A', status='published')
self.create_post(title='Test Post B', status='published')
self.enable_public_api()
posts = self.new_client().posts.list(fields='title')
self.assertIn({'title': 'Test Post A'}, posts)
self.assertIn({'title': 'Test Post B'}, posts)
def test_pagination(self):
preexisting = len(self.ghost.posts.list(status='draft'))
for idx in range(10):
self.create_post(title='Testing post #%d' % idx)
posts = self.ghost.posts.list(status='draft', limit=3)
self.assertEqual(posts.total, 10 + preexisting)
self.assertEqual(posts.pages, math.ceil((10 + preexisting) / 3.0))
self.assertEqual(posts.limit, 3)
self.assertIsNone(posts.prev_page())
last = None
for _ in range(posts.pages):
last, posts = posts, posts.next_page()
if not posts:
break
self.assertIsNotNone(posts.prev_page())
self.assertIsNone(last.next_page())
def test_filter_by_authors(self):
if self.ghost.version < '1.22.0':
self.skipTest(
'Multiple authors are not supported on version %s (< 1.22.0)' %
self.ghost.version
)
users = self.ghost.users.list()
self.assertGreater(len(users), 1)
created = self.create_post(title='Multiple authors', authors=users)
for user in users:
posts = self.ghost.posts.list(
filter='authors:[%s]' % user.slug, status='draft'
)
self.assertGreater(len(posts), 0)
self.assertIn(created.id, list(p.id for p in posts))
def test_invalid_post(self):
self.assertRaises(GhostException, self.ghost.posts.create, uuid='xyz')
def test_invalid_get(self):
self.assertRaises(GhostException, self.ghost.posts.get, title='Without ID or Slug')
|
dotJobs/ghost-client | ghost_client/models.py | import json
from .errors import GhostException
class Model(dict):
"""
Wrapper around the response objects
to allow accessing fields as properties.
"""
def __getattr__(self, item):
return self.get(item)
def __setattr__(self, key, value):
self[key] = value
class Post(Model):
"""
Model for posts.
Allows getting Markdown content through the
`markdown` property (both on v0.+ and v1.+ servers).
"""
def __getattr__(self, item):
if item == 'markdown':
return self._get_markdown()
elif item == 'tags' and 'tags' in self:
return list(map(Model, self['tags']))
elif item == 'author':
return Model(self['author'])
else:
return super(Post, self).__getattr__(item)
def _get_markdown(self):
if 'markdown' in self:
return self['markdown']
if self.mobiledoc:
doc = json.loads(self.mobiledoc)
return doc['cards'][0][1]['markdown']
class ModelList(list):
"""
Wrapper around lists returned by the API.
Exposes methods related to pagination and
wraps each item in their respective model type.
"""
def __init__(self, data, type_name, controller, list_kwargs, model_type=Model):
"""
Enhances a regular list.
:param data: The original iterable
:param type_name: The name of the type as the API knows it
:param controller: The controller that returned the list
:param list_kwargs: Parameters to use when fetching pages from the API
:param model_type: The model type of the items
"""
super(ModelList, self).__init__(map(model_type, data[type_name]))
self.meta = data['meta']['pagination']
self._controller = controller
self._list_kwargs = list_kwargs
@property
def total(self):
"""
:return: The total number of results available for the query
"""
return self.meta['total']
@property
def pages(self):
"""
:return: The number of pages available for the query
"""
return self.meta['pages']
@property
def limit(self):
"""
:return: The limit used for queries
"""
return self.meta['limit']
def next_page(self):
"""
:return: The next page fetched from the API for the query
"""
return self.get_page(self.meta['next'])
def prev_page(self):
"""
:return: The previous page fetched from the API for the query
"""
return self.get_page(self.meta['prev'])
def get_page(self, page_number):
"""
:param page_number: The page number to fetch (1-indexed)
:return: The requested page fetched from the API for the query
"""
if page_number:
kwargs = dict(self._list_kwargs)
kwargs['limit'] = self.limit
kwargs['page'] = page_number
return self._controller.list(**kwargs)
class Controller(object):
"""
The API controller dealing with requests for a specific type.
"""
def __init__(self, ghost, type_name, model_type=Model):
"""
Initializes a new controller.
:param ghost: An instance of the API client
:param type_name: The type name as the API knows it
:param model_type: The model type to wrap response items as
"""
self.ghost = ghost
self._type_name = type_name
self._model_type = model_type
def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
)
def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0])
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def update(self, id, **kwargs):
"""
Updates an existing resource.
:param id: The ID of the resource
:param kwargs: The properties of the resource to change
:return: The updated item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def delete(self, id):
"""
Deletes an existing resource.
Does not return anything but raises an exception when failed.
:param id: The ID of the resource
"""
self.ghost.execute_delete('%s/%s/' % (self._type_name, id))
class PostController(Controller):
"""
Controller extension for managing posts.
"""
def __init__(self, ghost):
"""
Initialize a new controller for posts.
:param ghost: An instance of the API client
"""
super(PostController, self).__init__(ghost, 'posts', model_type=Post)
def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs))
def update(self, id, **kwargs):
"""
Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object
"""
return super(PostController, self).update(id, **self._with_markdown(kwargs))
def _with_markdown(self, kwargs):
markdown = kwargs.pop('markdown', None)
if markdown:
if self.ghost.version.startswith('0'):
# put it back as is for version 0.x
kwargs['markdown'] = markdown
else:
updated = dict(kwargs)
updated['mobiledoc'] = json.dumps({
"version": "0.3.1", "markups": [], "atoms": [],
"cards": [["card-markdown", {"cardName": "card-markdown", "markdown": markdown}]],
"sections": [[10, 0]]})
return updated
return kwargs
|
dotJobs/ghost-client | ghost_client/errors.py | class GhostException(Exception):
"""
Type of exceptions raised by the client.
"""
def __init__(self, code, errors):
"""
Constructor.
:param code: The HTTP status code returned by the API
:param errors: The `errors` field returned in the response JSON
"""
super(GhostException, self).__init__(code, errors)
self.code = code
self.errors = errors
|
dotJobs/ghost-client | ghost_client/__init__.py | from .api import Ghost, GhostException
|
dotJobs/ghost-client | tests/test_uploads.py | <gh_stars>10-100
import os
try:
from .unittest_helper import GhostTestCase, GhostException
except:
from unittest_helper import GhostTestCase, GhostException
class UploadTests(GhostTestCase):
def __init__(self, *args, **kwargs):
super(UploadTests, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertRegex'):
self.assertRegex = self.assertRegexpMatches
def _setup_client(self):
return self.new_logged_in_client()
def test_upload_from_file_obj(self):
file_path = os.path.join(os.path.dirname(__file__), 'images/ghost-icon-1.png')
with open(file_path, 'rb') as image_file:
response = self.ghost.upload(file_obj=image_file)
self.assertRegex(response, '/content/images/[0-9]{4}/[0-9]{2}/ghost-icon-1.png')
def test_upload_from_file_path(self):
file_path = os.path.join(os.path.dirname(__file__), 'images/ghost-icon-2.png')
response = self.ghost.upload(file_path=file_path)
self.assertRegex(response, '/content/images/[0-9]{4}/[0-9]{2}/ghost-icon-2.png')
def test_upload_from_data(self):
file_path = os.path.join(os.path.dirname(__file__), 'images/ghost-icon-1.png')
with open(file_path, 'rb') as image_file:
data = image_file.read()
response = self.ghost.upload(name='custom-name.png', data=data)
self.assertRegex(response, '/content/images/[0-9]{4}/[0-9]{2}/custom-name.png')
def test_invalid_arguments(self):
self.assertRaises(GhostException, self.ghost.upload)
|
dotJobs/ghost-client | tests/test_tags.py | <gh_stars>10-100
import math
try:
from .unittest_helper import GhostTestCase, GhostException
except:
from unittest_helper import GhostTestCase, GhostException
class TagTests(GhostTestCase):
def _setup_client(self):
return self.new_logged_in_client()
def test_create_tag(self):
tag = self.create_tag(name='Example tag')
self.assertIn({'id': tag.id}, self.ghost.tags.list(fields='id'))
def test_edit_tag(self):
tag = self.create_tag(name='Tag to edit')
updated = self.ghost.tags.update(tag.id, name='Tag after update',
slug='updated-tag', visibility='internal')
self.assertEqual(updated.id, tag.id)
self.assertIn({'id': tag.id}, self.ghost.tags.list(fields='id'))
tags = self.ghost.tags.list(filter='id:%s' % tag.id)
self.assertEqual(len(tags), 1)
stored = tags[0]
self.assertEqual(stored.name, 'Tag after update')
self.assertEqual(stored.slug, 'updated-tag')
self.assertEqual(stored.visibility, 'internal')
def test_delete_tag(self):
tag = self.ghost.tags.create(name='Tag to delete')
self.assertIn({'id': tag.id}, self.ghost.tags.list(fields='id'))
self.ghost.tags.delete(tag.id)
self.assertNotIn({'id': tag.id}, self.ghost.tags.list(fields='id'))
def test_create_post_with_tag(self):
tag = self.create_tag(name='Tag for post')
post = self.create_post(title='Tagged post', tags=[tag])
stored = self.ghost.posts.get(post.id, include='tags', status='all')
self.assertEqual(stored.id, post.id)
self.assertEqual(stored.title, 'Tagged post')
self.assertEqual(len(stored.tags), 1)
self.assertEqual(stored.tags[0].name, 'Tag for post')
def test_list_without_login(self):
self.create_tag(name='Test Tag A')
self.create_tag(name='Test Tag B')
self.enable_public_api()
tags = self.new_client().tags.list(fields='name')
self.assertIn({'name': 'Test Tag A'}, tags)
self.assertIn({'name': 'Test Tag B'}, tags)
def test_get_tag(self):
tag = self.create_tag(name='For testing', slug='test-tag')
by_id = self.ghost.tags.get(tag.id)
self.assertEqual(by_id.id, tag.id)
by_id = self.ghost.tags.get(id=tag.id)
self.assertEqual(by_id.id, tag.id)
by_slug = self.ghost.tags.get(slug=tag.slug)
self.assertEqual(by_slug.id, tag.id)
def test_pagination(self):
preexisting = len(self.ghost.tags.list())
for idx in range(10):
self.create_tag(name='Testing tag #%d' % idx)
tags = self.ghost.tags.list(limit=3)
self.assertEqual(tags.total, 10 + preexisting)
self.assertEqual(tags.pages, math.ceil((10 + preexisting) / 3.0))
self.assertEqual(tags.limit, 3)
self.assertIsNone(tags.prev_page())
last = None
for _ in range(tags.pages):
last, tags = tags, tags.next_page()
if not tags:
break
self.assertIsNotNone(tags.prev_page())
self.assertIsNone(last.next_page())
def test_invalid_tag(self):
self.assertRaises(GhostException, self.ghost.tags.create, uuid='xyz', created_at='xyz', name='Invalid Tag')
|
dotJobs/ghost-client | tests/test_users.py | <reponame>dotJobs/ghost-client
try:
from .unittest_helper import GhostTestCase, GhostException
except:
from unittest_helper import GhostTestCase, GhostException
class UserTests(GhostTestCase):
def _setup_client(self):
return self.new_logged_in_client()
def test_create_post_with_user(self):
user = next(iter(self.ghost.users.list()))
post = self.create_post(title='Post with author', author_id=user.id)
stored = self.ghost.posts.get(post.id, include='author', status='all')
self.assertEqual(stored.id, post.id)
self.assertEqual(stored.title, 'Post with author')
self.assertIsNotNone(stored.author)
self.assertEqual(stored.author.name, user.name)
def test_list_without_login(self):
all_users = self.ghost.users.list()
self.enable_public_api()
users = self.new_client().users.list(fields='name')
for user in all_users:
self.assertIn({'name': user.name}, users)
def test_get_user(self):
user = next(iter(self.ghost.users.list()))
by_id = self.ghost.users.get(user.id)
self.assertEqual(by_id.id, user.id)
by_id = self.ghost.users.get(id=user.id)
self.assertEqual(by_id.id, user.id)
by_slug = self.ghost.users.get(slug=user.slug)
self.assertEqual(by_slug.id, user.id)
def test_pagination(self):
all_users = self.ghost.users.list()
users = self.ghost.users.list(limit=1)
self.assertEqual(users.total, len(all_users))
self.assertEqual(users.pages, len(all_users))
self.assertEqual(users.limit, 1)
self.assertIsNone(users.prev_page())
last = None
for _ in range(users.pages):
last, users = users, users.next_page()
if not users:
break
self.assertIsNotNone(users.prev_page())
self.assertIsNone(last.next_page())
def test_invalid_user(self):
self.assertRaises(GhostException, self.ghost.users.create, created_at='xyz', name='Invalid User')
|
dotJobs/ghost-client | tests/unittest_helper.py | import os
import unittest
from ghost_client import Ghost, GhostException
class GhostTestCase(unittest.TestCase):
GHOST_BASE_URL = os.environ.get('GHOST_BASE_URL', 'http://localhost:12368')
GHOST_VERSION = os.environ.get('GHOST_VERSION', 'auto')
GHOST_USERNAME = os.environ.get('GHOST_USERNAME', '<EMAIL>')
GHOST_PASSWORD = os.environ.get('GHOST_PASSWORD', '<PASSWORD>')
def setUp(self):
self.ghost = self._setup_client()
self._posts_to_delete = list()
self._tags_to_delete = list()
self._users_to_delete = list()
def tearDown(self):
for post_id in self._posts_to_delete:
self.ghost.posts.delete(post_id)
for tag_id in self._tags_to_delete:
self.ghost.tags.delete(tag_id)
for user_id in self._users_to_delete:
self.ghost.users.delete(user_id)
self.ghost.logout()
def _setup_client(self):
raise NotImplementedError(
'No "client" was set up (use "new_client" or "new_logged_in_client")'
)
def new_client(self, version=GHOST_VERSION):
return Ghost.from_sqlite(
self._find_database(), self.GHOST_BASE_URL, version=version
)
def new_logged_in_client(self, version=GHOST_VERSION):
client = self.new_client(version=version)
self.login(client)
return client
def login(self, client=None, username=None, password=None):
if client is None:
client = self.ghost
if username is None:
username = self.GHOST_USERNAME
if password is None:
password = self.GHOST_PASSWORD
return client.login(username, password)
def enable_public_api(self, client=None):
if client is None:
client = self.ghost
return client.execute_put('settings/', json={
"settings": [{"key": "labs", "value": "{\"publicAPI\":true}"}]
})
@staticmethod
def _find_database():
for filename in os.listdir(os.path.join(os.path.dirname(__file__), 'ghost-db')):
if os.path.splitext(filename)[1] == '.db':
return os.path.join(os.path.dirname(__file__), 'ghost-db/%s' % filename)
def create_post(self, client=None, **kwargs):
if client is None:
client = self.ghost
post = client.posts.create(**kwargs)
self._posts_to_delete.append(post.id)
return post
def create_tag(self, client=None, **kwargs):
if client is None:
client = self.ghost
tag = client.tags.create(**kwargs)
self._tags_to_delete.append(tag.id)
return tag
def create_user(self, client=None, **kwargs):
if client is None:
client = self.ghost
user = client.users.create(**kwargs)
self._users_to_delete.append(user.id)
return user
|
dotJobs/ghost-client | ghost_client/helpers.py | <gh_stars>10-100
import functools
def refresh_session_if_necessary(f):
"""
Decorator to use on methods that are allowed
to retry the request after reauthenticating the client.
:param f: The original function
:return: The decorated function
"""
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
try:
result = f(self, *args, **kwargs)
except Exception as ex:
if hasattr(ex, 'code') and ex.code in (401, 403):
self.refresh_session()
# retry now
result = f(self, *args, **kwargs)
else:
raise ex
return result
return wrapped
|
dotJobs/ghost-client | tests/test_session.py | try:
from .unittest_helper import GhostTestCase, Ghost, GhostException
except:
from unittest_helper import GhostTestCase, Ghost, GhostException
class SessionTest(GhostTestCase):
def _setup_client(self):
return self.new_client()
def test_login(self):
self.assertNotEqual(len(self.login()), 0)
def test_invalid_login(self):
self.assertRaises(GhostException, self.login, username='fake', password='<PASSWORD>')
def test_no_client_id_or_secret(self):
self.assertRaises(GhostException, Ghost, self.GHOST_BASE_URL, client_id=None, client_secret=None)
self.assertRaises(GhostException, Ghost, self.GHOST_BASE_URL, client_id='x', client_secret=None)
self.assertRaises(GhostException, Ghost, self.GHOST_BASE_URL, client_id=None, client_secret='y')
def test_invalid_client_id(self):
self.assertRaises(
GhostException, Ghost.from_sqlite, self._find_database(), self.GHOST_BASE_URL, client_id='fake'
)
def test_change_without_login(self):
logged_in = self.new_logged_in_client()
post = None
try:
post = self.create_post(logged_in, title='Existing')
self.assertRaises(GhostException, self.ghost.posts.create, title='Failing')
self.assertRaises(GhostException, self.ghost.posts.update, id=post.id, title='Failing')
self.assertRaises(GhostException, self.ghost.posts.delete, id=post.id)
finally:
logged_in.posts.delete(post.id)
self._posts_to_delete.remove(post.id)
logged_in.logout()
def test_logout(self):
client = self.new_logged_in_client()
self.assertGreater(len(client.posts.list(status='all')), 0)
client.logout()
self.assertRaises(GhostException, client.posts.list, status='all')
def test_refresh_token(self):
client = self.new_logged_in_client()
self.assertGreater(len(client.posts.list(status='all')), 0)
client.revoke_access_token()
self.assertGreater(len(client.posts.list(status='all')), 0)
def test_reauthenticate(self):
client = self.new_logged_in_client()
self.assertGreater(len(client.posts.list(status='all')), 0)
client.revoke_refresh_token()
client.revoke_access_token()
self.assertGreater(len(client.posts.list(status='all')), 0)
def test_version(self):
client = self.new_client(version='auto')
try:
self.assertEqual('1', client.version)
self.login(client)
self.assertNotEqual('1', client.version)
finally:
client.logout()
def test_version_caching(self):
client = self.new_client(version='auto')
try:
counters = dict()
_exec_get = client.execute_get
def counting_get(resource, *args, **kwargs):
if resource not in counters:
counters[resource] = 1
else:
counters[resource] += 1
return _exec_get(resource, *args, **kwargs)
client.execute_get = counting_get
for _ in range(3):
self.assertEqual('1', client.version)
self.assertEqual(counters['configuration/about/'], 3)
counters.clear()
self.login(client)
for _ in range(3):
self.assertNotEqual('1', client.version)
self.assertEqual(counters['configuration/about/'], 1)
finally:
client.logout()
|
teamo1996/Point-cloud-process-shenlan | 09-Registration/code/Registration.py | <reponame>teamo1996/Point-cloud-process-shenlan
import os
import argparse
import progressbar
import numpy as np
import open3d as o3d
import pandas as pd
import copy
import Utils
import Detector
import Decriptor
import RANSAC
def get_args():
parase = argparse.ArgumentParser("Registration")
parase.add_argument("--pointcloud_dir",type=str,default="/workspace/Data/registration_dataset")
parase.add_argument("--radius",type=float,default=0.5)
return parase.parse_args()
if __name__ == "__main__":
args = get_args()
datasets_dir = args.pointcloud_dir
radius = args.radius
# 进度条
progress = progressbar.ProgressBar()
# 以pandas.dataframe格式读取结果文件
registration_results = Utils.Homework.read_registration_results(os.path.join(datasets_dir,"reg_result.txt"))
# 初始化输出文件结构
df_output = Utils.Homework.init_output()
# 迭代reg_result中的每一行来获取需要配准的点云文件
for index,row in progress(list(registration_results.iterrows())):
idx_source = int(row["idx2"])
idx_target = int(row["idx1"])
# 读取点云,输出格式为open3d的点云格式
pcd_source = Utils.pointcloud.read_point_cloud_bin(os.path.join(datasets_dir,"point_clouds",f"{idx_source}.bin"))
pcd_target = Utils.pointcloud.read_point_cloud_bin(os.path.join(datasets_dir,"point_clouds",f"{idx_target}.bin"))
# 移除指定范围内没有邻居的外点
pcd_source, ind = pcd_source.remove_radius_outlier(nb_points=4, radius=radius)
pcd_target, ind = pcd_target.remove_radius_outlier(nb_points=4, radius=radius)
# 特征点检测
source_detector = Detector.ISS_detector()
source_detector.set_pointcloud(pcd_source)
source_detector.detect()
keypoints_source = source_detector.get_feature_points()
target_detector = Detector.ISS_detector()
target_detector.set_pointcloud(pcd_target)
target_detector.detect()
keypoints_target = target_detector.get_feature_points()
# 提取描述子
source_descriptor = Decriptor.FPFH_decriptor()
source_descriptor.set_pointclouds(pcd_source)
source_descriptor.set_keypoints(keypoints_source)
source_descriptor.describe()
source_fpfh = source_descriptor.get_descriptors()
target_descriptor = Decriptor.FPFH_decriptor()
target_descriptor.set_pointclouds(pcd_target)
target_descriptor.set_keypoints(keypoints_target)
target_descriptor.describe()
target_fpfh = target_descriptor.get_descriptors()
# 特征点云
pcd_source = pcd_source.select_by_index(list(source_detector.get_feature_index()))
pcd_target = pcd_target.select_by_index(list(target_detector.get_feature_index()))
# 位姿估计器
ransac_icp = RANSAC.RANSAC_ICP.Builder().set_max_iteration(10000).build()
# 配置信息
print(ransac_icp)
# 设置输入
ransac_icp.set_source_pointscloud(pcd_source)
ransac_icp.set_target_pointscloud(pcd_target)
ransac_icp.set_source_features(source_fpfh.T)
ransac_icp.set_target_features(target_fpfh.T)
# 匹配
result = ransac_icp.ransac_match()
Utils.Homework.add_to_output(df_output, idx_target, idx_source, result.transformation)
Utils.Homework.write_output(
os.path.join(datasets_dir, 'reg_result_teamo1998_test.txt'),
df_output
) |
teamo1996/Point-cloud-process-shenlan | 09-Registration/code/Utils.py | <filename>09-Registration/code/Utils.py
from ast import Num
from os import stat
import pandas as pd
import pandas
import open3d as o3d
import open3d
import numpy as np
import numpy
import copy
from scipy.spatial.transform import Rotation
class pointcloud:
def __init__(self):
pass
# 从文件中读取点云
@staticmethod
def read_pointcloud(file_name:str)-> open3d.geometry.PointCloud:
df = pd.read_csv(file_name,header = None)
df.columns = ["x","y","z",
"nx","ny","nz"]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(df[["x","y","z"]].values)
pcd.normals = o3d.utility.Vector3dVector(df[["nx","ny","nz"]].values)
return pcd
# 读取bin格式的点云
@staticmethod
def read_point_cloud_bin(bin_path:str) -> open3d.geometry.PointCloud:
data = np.fromfile(bin_path, dtype=np.float32)
# 将数据重新格式化
N, D = data.shape[0]// 6, 6
point_cloud_with_normal = np.reshape(data, (N, D))
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(point_cloud_with_normal[:, 0:3])
point_cloud.normals = o3d.utility.Vector3dVector(point_cloud_with_normal[:, 3:6])
return point_cloud
class Homework:
def __init__(self) -> None:
pass
# 以pandas的dataframe格式读取结果文件
@staticmethod
def read_registration_results(results_path:str) -> pandas.DataFrame:
df_results = pd.read_csv(
results_path
)
return df_results
# 使用与输入相同的格式设置输出格式
@staticmethod
def init_output()->dict:
df_output = {
'idx1': [],
'idx2': [],
't_x': [],
't_y': [],
't_z': [],
'q_w': [],
'q_x': [],
'q_y': [],
'q_z': []
}
return df_output
# 绘制配准后的点云
@staticmethod
def draw_registration_result(source:open3d.geometry.PointCloud, target:open3d.geometry.PointCloud, transformation:numpy.matrix):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp],
zoom=0.4559,
front=[0.6452, -0.3036, -0.7011],
lookat=[1.9892, 2.0208, 1.8945],
up=[-0.2779, -0.9482, 0.1556])
# 向结果dataframe中写入数据
@staticmethod
def add_to_output(df_output:dict, idx1:str, idx2:str, T:numpy.matrix) -> None:
"""
Add record to output
"""
def format_transform_matrix(T):
r = Rotation.from_matrix(T[:3, :3])
q = r.as_quat()
t = T[:3, 3]
return (t, q)
df_output['idx1'].append(idx1)
df_output['idx2'].append(idx2)
(t, q) = format_transform_matrix(T)
# translation:
df_output['t_x'].append(t[0])
df_output['t_y'].append(t[1])
df_output['t_z'].append(t[2])
# rotation:
df_output['q_w'].append(q[3])
df_output['q_x'].append(q[0])
df_output['q_y'].append(q[1])
df_output['q_z'].append(q[2])
@staticmethod
def write_output(filename:str, df_output:dict) -> None:
df_output = pd.DataFrame.from_dict(
df_output
)
print(f'write output to {filename}')
df_output[
[
'idx1', 'idx2',
't_x', 't_y', 't_z',
'q_w', 'q_x', 'q_y', 'q_z'
]
].to_csv(filename, index=False) |
teamo1996/Point-cloud-process-shenlan | 02-Nearest Neighbor Problem/code/kdtree.py | <gh_stars>1-10
# kdtree的具体实现,包括构建和查找
import random
import math
import numpy as np
from result_set import KNNResultSet, RadiusNNResultSet
# Node类,Node是tree的基本组成元素
class Node:
def __init__(self, axis, value, left, right, point_indices):
self.axis = axis
self.value = value
self.left = left
self.right = right
self.point_indices = point_indices
def is_leaf(self):
if self.value is None:
return True
else:
return False
def __str__(self):
output = ''
output += 'axis %d, ' % self.axis
if self.value is None:
output += 'split value: leaf, '
else:
output += 'split value: %.2f, ' % self.value
output += 'point_indices: '
output += str(self.point_indices.tolist())
return output
# 功能:构建树之前需要对value进行排序,同时对一个的key的顺序也要跟着改变
# 输入:
# key:键
# value:值
# 输出:
# key_sorted:排序后的键
# value_sorted:排序后的值
def sort_key_by_vale(key, value):
assert key.shape == value.shape
assert len(key.shape) == 1
sorted_idx = np.argsort(value)
key_sorted = key[sorted_idx]
value_sorted = value[sorted_idx]
return key_sorted, value_sorted
# 功能:随机分割轴
# 输入:
# axis:当前的分割轴
# dim:总维度
def axis_round_robin(axis, dim):
if axis == dim-1:
return 0
else:
return axis + 1
# 功能:通过递归的方式构建树
# 输入:
# root: 树的根节点
# db: 点云数据
# point_indices:排序后的键
# axis: scalar:初始分割轴
# leaf_size: scalar:叶子节点包含的最多数据点
# 输出:
# root: 即构建完成的树
def kdtree_recursive_build(root, db, point_indices, axis, leaf_size):
if root is None:
root = Node(axis, None, None, None, point_indices) #构建节点
# 决定是否分割当前节点
if len(point_indices) > leaf_size:
# 获取分割位置
point_indices_sorted, _ = sort_key_by_vale(point_indices, db[point_indices, axis]) # 按给定的轴对于点进行排序
# 位于分割轴左边的最远一个点的索引
middle_left_index = math.ceil(point_indices_sorted.shape[0]/2) - 1
middle_left_point_idx = point_indices_sorted[middle_left_index]
middle_left_point_value = db[middle_left_point_idx,axis]
# 位于分割轴右边的最近一个点的索引
middle_right_index = middle_left_index +1
middle_right_point_idx = point_indices_sorted[middle_right_index]
middle_right_point_value = db[middle_right_point_idx,axis]
# 当前节点的值 = 距离分割值最近的两个点的平均
root.value = (middle_left_point_value + middle_right_point_value) * 0.5
# 递归构建左边和右边叶子节点
root.left = kdtree_recursive_build(root.left,db,point_indices_sorted[0:middle_right_index],axis_chosed_by_cov(db[point_indices_sorted[0:middle_right_index]]),leaf_size)
root.right = kdtree_recursive_build(root.right,db,point_indices_sorted[middle_right_index:],axis_chosed_by_cov(db[point_indices_sorted[middle_right_index:]]),leaf_size)
return root
# 功能:按照协方差的大小选取分割轴
# 输入:
# data: 需要分割的数据点
def axis_chosed_by_cov(data):
axis = 0
max_cov = 0
for i in range(data.shape[1]):
if np.cov(data[:,i]) > max_cov:
max_cov = np.cov(data[:,i])
axis = i
return axis
# 功能:遍历一个kd树
# 输入:
# root:kd树
# depth: 当前深度
# max_depth:最大深度
def traverse_kdtree(root: Node, depth, max_depth):
depth[0] += 1
if max_depth[0] < depth[0]:
max_depth[0] = depth[0]
if root.is_leaf():
print(root)
else:
traverse_kdtree(root.left, depth, max_depth)
traverse_kdtree(root.right, depth, max_depth)
depth[0] -= 1
# 功能:构建kd树(利用kdtree_recursive_build功能函数实现的对外接口)
# 输入:
# db_np:原始数据 N*3维
# leaf_size:scale :叶子节点尺寸
# 输出:
# root:构建完成的kd树
def kdtree_construction(db_np, leaf_size):
N, dim = db_np.shape[0], db_np.shape[1]
# build kd_tree recursively
root = None
root = kdtree_recursive_build(root,
db_np,
np.arange(N),
axis=0,
leaf_size=leaf_size)
return root
# 功能:通过kd树实现knn搜索,即找出最近的k个近邻
# 输入:
# root: kd树
# db: 原始数据
# result_set:搜索结果
# query:索引信息
# 输出:
# 搜索失败则返回False
def kdtree_knn_search(root: Node, db: np.ndarray, result_set: KNNResultSet, query: np.ndarray):
if root is None:
return False
if root.is_leaf():
# 比较当前叶子节点下的每一个数据点
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
return False
# 判断先从分割轴的哪一侧开始搜索
if query[root.axis] <= root.value:
kdtree_knn_search(root.left,db,result_set,query)
# 判断是否需要搜索另外一侧
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_knn_search(root.right,db,result_set,query)
else:
kdtree_knn_search(root.right,db,result_set,query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_knn_search(root.left,db,result_set,query)
return False
# 功能:通过kd树实现radius搜索,即找出距离radius以内的近邻
# 输入:
# root: kd树
# db: 原始数据
# result_set:搜索结果
# query:索引信息
# 输出:
# 搜索失败则返回False
def kdtree_radius_search(root: Node, db: np.ndarray, result_set: RadiusNNResultSet, query: np.ndarray):
if root is None:
return False
# 遍历叶节点下的点
if root.is_leaf():
# 比较当前叶子节点下的每一个数据点
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
return False
# 判断先从分割轴的哪一侧开始搜索
if query[root.axis] <= root.value:
kdtree_radius_search(root.left,db,result_set,query)
# 判断是否需要搜索另外一侧
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_radius_search(root.right,db,result_set,query)
else:
kdtree_radius_search(root.right,db,result_set,query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_radius_search(root.left,db,result_set,query)
return False
def main():
# 测试集配置
db_size = 64 # 数据集中的数据点个数
dim = 3 # 数据点的维度
leaf_size = 4 #叶子节点包含的最多数据点个数
k = 1 #搜索个数
db_np = np.random.rand(db_size, dim) #N*3维的随机数据点
root = kdtree_construction(db_np, leaf_size=leaf_size) #构建kd树
# 遍历当前树,获取每一个叶子节点的信息
depth = [0]
max_depth = [0]
traverse_kdtree(root, depth, max_depth)
print("tree max depth: %d" % max_depth[0])
# Knn搜索测试
query = np.asarray([0, 0, 0])
result_set = KNNResultSet(capacity=k)
kdtree_knn_search(root, db_np, result_set, query)
#
print(result_set)
# 暴力搜索测试
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
print(nn_idx[0:k])
print(nn_dist[0:k])
# Radiusnn搜索测试
print("Radius search:")
query = np.asarray([0, 0, 0])
result_set = RadiusNNResultSet(radius = 0.5)
kdtree_radius_search(root, db_np, result_set, query)
print(result_set)
if __name__ == '__main__':
main() |
teamo1996/Point-cloud-process-shenlan | 07-Feature Detection/code/Utils.py | import pandas as pd
import open3d as o3d
import open3d
class pointcloud:
def __init__(self):
pass
# 从文件中读取点云
@staticmethod
def read_pointcloud(file_name:str)-> open3d.geometry.PointCloud:
df = pd.read_csv(file_name,header = None)
df.columns = ["x","y","z",
"nx","ny","nz"]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(df[["x","y","z"]].values)
pcd.normals = o3d.utility.Vector3dVector(df[["nx","ny","nz"]].values)
return pcd
|
teamo1996/Point-cloud-process-shenlan | 03-Clustering/code/GMM.py | # 文件功能:实现 GMM 算法
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
plt.style.use('seaborn')
class GMM(object):
def __init__(self, n_clusters, max_iter=50):
self.n_clusters = n_clusters # 聚类个数
self.max_iter = max_iter # 最大迭代次数
self.Mu = None # k个类的均值
self.Var = None # k个类的协方差
self.Pi = None # k个类的权重值
self.W = None # r(Z_nk)
self.data = None # 数据点
self.n_points = None # 数据点个数
self.loglh = None # 损失函数
#利用数据初始化相关参数
def initialize(self, data):
self.n_points = data.shape[0] # 数据点个数
self.data = data # 数据点
self.Mu = np.empty((0,data.shape[1])) # 均值0x3
self.Var = [] # 协方差
# 随机选取k个点
for index in np.random.choice(self.n_points,self.n_clusters): # n选k
# 初始均值
self.Mu = np.append(self.Mu,[data[index,:]],axis=0)
# 初始协方差
self.Var.append(10 * np.diag([1,1]))
# 初始r(Z_nk),初始时刻认为属于每个聚类的概率相等
self.W = np.ones((self.n_points,self.n_clusters))/self.n_clusters
# 初始Pi,初始时刻认为每个高斯分布的权值相等且和为1
self.Pi = [1/self.n_clusters] * self.n_clusters
# 初始损失函数
self.loglh = []
# 更新r(z_nk)
def update_W(self):
# 计算每个点属于每个高斯分布的概率
pdfs = np.zeros(((self.n_points, self.n_clusters)))
for i in range(self.n_clusters):
pdfs[:, i] = self.Pi[i] * multivariate_normal.pdf( self.data, self.Mu[i], np.asarray(self.Var[i]) )
# 归一化,需要保证每个点属于所有高斯分布的概率和为1
self.W = pdfs / pdfs.sum(axis=1).reshape(-1, 1)
return self.W
# 更新Mu
def update_Mu(self):
self.Mu = np.zeros((self.n_clusters, self.data.shape[1]))
for i in range(self.n_clusters):
self.Mu[i] = np.average(self.data, axis=0, weights=self.W[:, i])
# 更新Var
def update_Var(self):
self.Var = []
for i in range(self.n_clusters):
self.Var.append(np.cov(self.data - self.Mu[i], rowvar=0, aweights=self.W[:, i]))
# 更新pi
def update_Pi(self):
self.Pi = self.W.sum(axis=0) / self.n_points
return self.Pi
# 计算损失函数
def logLH(self):
# pi_k * N(x_n|mu,sigma)
pdfs = np.zeros(((self.n_points, self.n_clusters))) # nxk
# 遍历每个高斯分布
for i in range(self.n_clusters):
# 计算每个点的高斯分布
pdfs[:, i] = self.Pi[i] * multivariate_normal.pdf(self.data, self.Mu[i], self.Var[i])
# 先对点求和取对数再对聚类求和
return np.sum(np.log(pdfs.sum(axis=1)),axis=0)
def fit(self, data):
# 初始化相关参数
self.initialize(data)
# 迭代次数
num_iter = 0
# 保存当前的损失函数
self.loglh.append(self.logLH())
while num_iter < self.max_iter:
self.update_W()
self.update_Pi()
self.update_Mu()
self.update_Var()
self.loglh.append(self.logLH())
# print(self.loglh)
# if abs(self.loglh[-1] - self.loglh[-2]) < 1e-9:
# break
num_iter += 1
def predict(self, data):
result = []
# 计算每个点属于每个高斯分布的概率
pdfs = np.zeros((data.shape[0], self.n_clusters))
for i in range(self.n_clusters):
pdfs[:, i] = self.Pi[i] * multivariate_normal.pdf(data, self.Mu[i], np.diag(self.Var[i]))
W = pdfs / pdfs.sum(axis=1).reshape(-1, 1)
# 获取最大的概率对应的聚类索引
result = np.argmax(W, axis=1)
return result
# 生成仿真数据
def generate_X(true_Mu, true_Var):
# 第一簇的数据
num1, mu1, var1 = 400, true_Mu[0], true_Var[0]
X1 = np.random.multivariate_normal(mu1, np.diag(var1), num1)
# 第二簇的数据
num2, mu2, var2 = 600, true_Mu[1], true_Var[1]
X2 = np.random.multivariate_normal(mu2, np.diag(var2), num2)
# 第三簇的数据
num3, mu3, var3 = 1000, true_Mu[2], true_Var[2]
X3 = np.random.multivariate_normal(mu3, np.diag(var3), num3)
# 合并在一起
X = np.vstack((X1, X2, X3))
# 显示数据
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X1[:, 0], X1[:, 1], s=5)
plt.scatter(X2[:, 0], X2[:, 1], s=5)
plt.scatter(X3[:, 0], X3[:, 1], s=5)
plt.show()
return X
if __name__ == '__main__':
# 生成数据
true_Mu = [[0.5, 0.5], [5.5, 2.5], [1, 7]]
true_Var = [[1, 3], [2, 2], [6, 2]]
X = generate_X(true_Mu, true_Var)
gmm = GMM(n_clusters=3)
gmm.fit(X)
cat = gmm.predict(X)
print(cat)
# print(cat)
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X[:, 0], X[:, 1], s=5, c=cat)
plt.show()
|
teamo1996/Point-cloud-process-shenlan | 09-Registration/code/Detector.py | '''
Author: teamo1998
Date: 2021-05-03 15:12:57
LastEditTime: 2021-05-03 21:10:05
LastEditors: Please set LastEditors
Description: 本代码是为了完成ISS特征点检测
FilePath: /Point-cloud-process/第7章:3D Feature Detection/ISS.py
'''
import pandas as pd
import open3d as o3d
import argparse
import numpy as np
import heapq
import Utils
import open3d
import numpy
import pandas
# 获取命令行参数
def get_args():
parase = argparse.ArgumentParser(description="arg parser")
parase.add_argument("--file_name",type=str,default= "/workspace/Data/modelnet40_normal_resampled/airplane/airplane_0002.txt")
parase.add_argument("--radius",type=float,default= 0.1)
parase.add_argument("--gamma32",type=float,default= 1.2)
parase.add_argument("--gamma21",type=float,default= 1.2)
parase.add_argument("--l3_min",type=float,default= 1.0)
parase.add_argument("--k_min",type=int,default= 1)
return parase.parse_args()
class ISS_detector:
def __init__(self) -> None:
self.__point_cloud = None
self.__radius = 1
self.__gamma_32 = 1.2
self.__gamma_21 = 1.2
self.__l3_min = 0.0001
self.__k_min = 3
self.__feature_index = None
# 小顶堆,用于非极大值抑制
self.__pq = []
# 保存被抑制的点的集合
self.__suppressed = set()
# 记录每个点的邻居点,用于进行极大值抑制
self.__neighbors_every_point = []
def set_pointcloud(self,pointcloud:open3d.geometry.PointCloud) -> None:
self.__point_cloud = pointcloud
# 构建搜索树
self.__search_tree = o3d.geometry.KDTreeFlann(self.__point_cloud)
def set_attribute(self,radius:float,gamma_32:float,gamma_21:float,l3_min:float,k_min:int) -> None:
self.__radius = radius
self.__gamma_32 = gamma_32
self.__gamma_21 = gamma_21
self.__l3_min = l3_min
self.__k_min = k_min
def __get_cov_matrix(self,center,pointclouds,neigbor_idx,weight):
# 获取邻点
neighbors = pointclouds[neigbor_idx]
# 获取距离
distance = neighbors - center
# 加权权重
weight = np.asarray(weight)
weight = 1.0/weight
weight = np.reshape(weight,(-1,))
# 协方差矩阵
cov = 1.0/weight.sum()*np.dot(distance.T,np.dot(np.diag(weight),distance))
return cov
def __non_maximum_suppression(self) -> None:
# 进行非极大值抑制
while(self.__pq):
# 取出l3最大的点
_,idx_centor = heapq.heappop(self.__pq)
# 判断取出的点是否已经被抑制
if not idx_centor in self.__suppressed:
# 取出当前点对应的邻居
neighbor = self.__neighbors_every_point[idx_centor]
# 排除自身
neighbor = neighbor[1:]
# 加入被抑制点的集合
for _i in neighbor:
self.__suppressed.add(_i)
else:
continue
def __filter_by_param(self,df_data:dict) -> None:
df_data = pd.DataFrame.from_dict(df_data)
# 排除非极大值点
df_data = df_data.loc[df_data["id"].apply(lambda id:not id in self.__suppressed),df_data.columns]
# 排除不符合协方差要求的点
df_data = df_data.loc[( df_data["l1"] > df_data["l2"]*self.__gamma_21) \
& ( df_data["l2"] > df_data["l3"]*self.__gamma_32) \
& (df_data["l3"] > self.__l3_min) ,df_data.columns]
self.__feature_index = df_data["id"].values
def __eig_and_sort(self,cov:numpy.ndarray) -> numpy.ndarray:
# 对协方差矩阵进行特征值分解
eigen_values,_ = np.linalg.eig(cov)
# 对特征值进行排序
eigen_values = eigen_values[np.argsort(eigen_values)[::-1]]
return eigen_values
def detect(self) -> None:
# 保存每个点的id lamda1,2,3
df_data = {
"id":[],
"l1":[],
"l2":[],
"l3":[],
}
pointclouds = np.asarray(self.__point_cloud.points)
# 保存每个点的邻居点个数的缓存,用于加速计算
num_neighbor_cach = np.zeros((pointclouds.shape[0],1))
# 遍历每一个点
for idx_centor, center in enumerate(pointclouds):
# 计算对应的协方差矩阵
# 计算给定半径内的邻点
[k,idx,_] = self.__search_tree.search_radius_vector_3d(center,self.__radius)
self.__neighbors_every_point.append(idx)
# 排除邻点个数小于给定阈值的点
if k < self.__k_min:
# 构造小顶堆,用于非极大值抑制,注意这里是-l3
heapq.heappush(self.__pq,(-0.0,idx_centor))
df_data["id"].append(idx_centor)
df_data["l1"].append(0.0)
df_data["l2"].append(0.0)
df_data["l3"].append(0.0)
continue
# 权重矩阵
w = []
# 遍历每个邻点,并记录每个点的邻点个数
for index in idx:
# 如果没有被计算过
if(num_neighbor_cach[index] == 0):
[k_,_,_] = self.__search_tree.search_radius_vector_3d(pointclouds[index],self.__radius)
num_neighbor_cach[index] = k_
# 保存当前判定点对应的每个邻居的邻居个数,用于计算权重矩阵
w.append(num_neighbor_cach[index])
# 获取协方差矩阵
cov = self.__get_cov_matrix(center,pointclouds,idx,w)
# 获取排序后的特征值
eigen_values = self.__eig_and_sort(cov)
# 构造小顶堆,用于非极大值抑制,注意这里是-l3
heapq.heappush(self.__pq,(-eigen_values[2],idx_centor))
df_data["id"].append(idx_centor)
df_data["l1"].append(eigen_values[0])
df_data["l2"].append(eigen_values[1])
df_data["l3"].append(eigen_values[2])
# 非极大值抑制
self.__non_maximum_suppression()
# 根据参数进行过滤并获取特征点对应的索引
self.__filter_by_param(df_data)
def get_feature_index(self) -> numpy.ndarray:
return self.__feature_index
def get_feature_points(self) -> numpy.ndarray:
return np.asarray(self.__point_cloud.points)[self.__feature_index,:]
if __name__ == "__main__":
# 获取命令行参数
args = get_args()
file_name = args.file_name
# 获取点云
pcd = Utils.pointcloud.read_pointcloud(file_name)
# 构造检测器并进行特征点检测
detector = ISS_detector()
detector.set_pointcloud(pcd)
detector.detect()
feature_index = detector.get_feature_index()
print(detector.get_feature_points())
# 绘图
pcd.paint_uniform_color([0.95,0.95,0.95])
np.asarray(pcd.colors)[feature_index,:] = [1.0,0.0,0.0]
o3d.visualization.draw_geometries([pcd])
|
teamo1996/Point-cloud-process-shenlan | 09-Registration/code/Registration_o3d.py | <gh_stars>1-10
import os
import argparse
import progressbar
import numpy as np
import open3d as o3d
import pandas as pd
import copy
import Utils
# 获取命令行参数
def get_args():
parase = argparse.ArgumentParser("Registration")
parase.add_argument("--pointcloud_dir",type=str,default="/workspace/Data/registration_dataset")
parase.add_argument("--voxel_size",type=float,default=1)
return parase.parse_args()
if __name__ == "__main__":
args = get_args()
# 获取文件夹路径
datasets_dir = args.pointcloud_dir
# 进度条
progress = progressbar.ProgressBar()
# 以pandas.dataframe格式读取结果文件
registration_results = Utils.Homework.read_registration_results(os.path.join(datasets_dir,"reg_result.txt"))
# 初始化输出文件结构
df_output = Utils.Homework.init_output()
# 体素滤波的尺寸
voxel_size = args.voxel_size
# 迭代reg_result中的每一行来获取需要配准的点云文件
for index,row in progress(list(registration_results.iterrows())):
# 需要配准的点云文件名称
idx_target = int(row["idx1"])
idx_source = int(row["idx2"])
# 读取点云,输出格式为open3d的点云格式
pcd_source = Utils.pointcloud.read_point_cloud_bin(os.path.join(datasets_dir,"point_clouds",f"{idx_source}.bin"))
pcd_target = Utils.pointcloud.read_point_cloud_bin(os.path.join(datasets_dir,"point_clouds",f"{idx_target}.bin"))
# 在配准前可视化
# Utils.Homework.draw_registration_result(pcd_source,pcd_target,np.identity(4))
# 降采样
pcd_down_source = pcd_source.voxel_down_sample(voxel_size)
pcd_down_target = pcd_target.voxel_down_sample(voxel_size)
# 绘制降采样后的结果
# Utils.Homework.draw_registration_result(pcd_down_source,pcd_down_target,np.identity(4))
# 最近邻搜索半径
radius_feature = voxel_size * 5
# 计算fpfh描述子
pcd_fpfh_source = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down_source,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
pcd_fpfh_target = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down_target,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
# 距离阈值
distance_threshold = voxel_size * 0.5
# 使用open3d的RANSAC流程函数进行global registration
init_result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
pcd_down_source, pcd_down_target, pcd_fpfh_source, pcd_fpfh_target, True,
distance_threshold,
o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
3, [
# 这三个参数用来评价RANSAC配准的结果是否有效
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength( # 这个参数用来检测配准之后的两个点云的几何不变性
0.95),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance( # 这个用于检测配准之后两个点云的距离接近性
distance_threshold),
o3d.pipelines.registration.CorrespondenceCheckerBasedOnNormal(10*2*3.14/180) # 这个用于检测配准后两个点云的法向量相似性
], o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999)) # RANSAC的最大迭代次数及置信概率
# 绘制配准后的结果
# Utils.Homework.draw_registration_result(pcd_down_source, pcd_down_target, init_result.transformation)
distance_threshold = 0.05 * 0.4
# 使用ICP优化结果 这里由于已经有了初始值,选择使用原始点云
final_result = o3d.pipelines.registration.registration_icp(
pcd_source, pcd_target, distance_threshold, init_result.transformation,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
# 绘制最终配准后的结果
# Utils.Homework.draw_registration_result(pcd_source, pcd_target, final_result.transformation)
# 往结果dict中加入数据
Utils.Homework.add_to_output(df_output, idx_target, idx_source, final_result.transformation)
# 将结果写入文件
Utils.Homework.write_output(
os.path.join(datasets_dir, 'reg_result_teamo.txt'),
df_output) |
teamo1996/Point-cloud-process-shenlan | 03-Clustering/code/KMeans.py | # 文件功能: 实现 K-Means 算法
import numpy as np
import math
class K_Means(object):
# k是分组数;tolerance‘中心点误差’;max_iter是迭代次数
def __init__(self, n_clusters=2, tolerance=0.0001, max_iter=300):
self.k_ = n_clusters
self.tolerance_ = tolerance
self.max_iter_ = max_iter
self.centers = None
def fit(self, data):
np.random.seed(0)
# step 1 随机选取k个点为初始聚类中心点
self.centers = np.empty((0,data.shape[1])) # 0x2
for i in np.random.choice(data.shape[0],self.k_): # n个点里面选取k个点
self.centers = np.append(self.centers,[data[i]],axis=0) # 将选取的点附加到聚类中心中
# step 2 开始循环聚类
num_iter = 0 # 迭代次数
tolerance_achive = False # 是否已经到达误差的容差
cluster_assment = np.zeros((data.shape[0],2)) # nx2 第一列为每个点对应的聚类中心的索引 第二列为对应的到聚类中心的距离
center_dist = np.zeros((data.shape[0],self.k_)) # 保存每个点到每个聚类中心的距离 nxk
# 开始迭代
while num_iter < self.max_iter_ and (not tolerance_achive):
# 遍历每个聚类中心
for center_index in range(self.k_):
diff = data - self.centers[center_index,:] # 每个点减聚类中心
diff = np.sqrt(np.sum(np.square(diff),axis=1)) # 计算每个点到聚类中心的距离
center_dist[:,center_index] = diff.reshape(data.shape[0]) # 保存每个点到每个聚类中心的距离
min_dist_index = np.argmin(center_dist,axis=1) # 获取每个点对应的聚类中心的索引
dist = np.min(center_dist,axis=1) # 获取每个点对应的最小距离
# 计算每个点对应的聚类中心和对应的距离
for point_index in range(data.shape[0]):
cluster_assment[point_index,:] = min_dist_index[point_index],dist[point_index]
tolerance_achive = True
# 遍历每个聚类中心
for center_index in range(self.k_):
# 取出属于对应聚类中心的数据点
point_in_k_cluster = data[np.nonzero(cluster_assment[:,0] == center_index)[0]]
# 如果数据点的个数大于0
if len(point_in_k_cluster) != 0:
# 计算数据点的均值
new_mean = np.mean(point_in_k_cluster,axis=0)
# 判断是否聚类中心的移动距离小于给定阈值
if np.sum(np.square(self.centers[center_index,:] - new_mean)) > np.square(self.tolerance_):
tolerance_achive = False
self.centers[center_index,:] = new_mean
num_iter += 1
def predict(self, p_datas):
result = []
# 每个点到每个聚类中心的距离
center_dist = np.zeros((p_datas.shape[0],self.k_))
# 遍历每个聚类中心
for center_index in range(self.k_):
# 计算点到对应聚类中心的距离
diff = p_datas - self.centers[center_index,:]
diff = np.sqrt(np.sum(np.square(diff),axis=1))
center_dist[:,center_index] = diff.reshape(p_datas.shape[0])
# 获取聚类结果
result = np.argmin(center_dist,axis=1)
return result
if __name__ == '__main__':
x = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
k_means = K_Means(n_clusters=2)
k_means.fit(x)
cat = k_means.predict(x)
print(cat)
|
teamo1996/Point-cloud-process-shenlan | 01-Introduction and Basic Algorithm/code/voxel_filter.py | # 实现voxel滤波,并加载数据集中的文件进行验证
import open3d as o3d
import os
import numpy as np
import pandas as pd
import math
from pyntcloud import PyntCloud
import random
import argparse
# 获取命令行参数
def get_args():
parase = argparse.ArgumentParser()
parase.add_argument("--filename",type=str,default="../../Data/modelnet40_normal_resampled/airplane/airplane_0001.txt")
parase.add_argument("--leaf_size",type=float,default=0.08)
parase.add_argument("--mode",type=int,default=0)
return parase.parse_args()
# 功能:对点云进行voxel滤波
# 输入:
# point_cloud:输入点云
# leaf_size: voxel尺寸
def voxel_filter(point_cloud, leaf_size,mode):
filtered_points = []
point_cloud = np.array(point_cloud, dtype=np.float64)
# 分离三个轴的数据
x_array = point_cloud[:, 0]
y_array = point_cloud[:, 1]
z_array = point_cloud[:, 2]
# 获取三个轴的最大值和最小值
x_max, x_min = np.max(x_array), np.min(x_array)
y_max, y_min = np.max(y_array), np.min(y_array)
z_max, z_min = np.max(z_array), np.min(z_array)
# 计算网格维度
D_x = math.ceil((x_max - x_min) / leaf_size)
D_y = math.ceil((y_max - y_min) / leaf_size)
D_z = math.ceil((z_max - z_min) / leaf_size)
# 分别计算每个点的所属网格
h_array = np.array([], dtype=np.float64)
for point in point_cloud:
point = np.array(point, dtype=np.float64)
h_x = np.floor((point[0] - x_min) / leaf_size)
h_y = np.floor((point[1] - y_min) / leaf_size)
h_z = np.floor((point[2] - z_min) / leaf_size)
h = h_x + h_y * D_x + h_z * D_x * D_y
h_array = np.append(h_array, h)
sort = h_array.argsort()
point_cloud = point_cloud[sort]
# 在每个网格中选取一个点
voxel = []
for i in range(len(point_cloud)):
# print(i)
if i < len(point_cloud) - 1 and h_array[sort][i] == h_array[sort][i+1]:
voxel.append(i)
else:
voxel.append(i)
if mode == 0:
# centroid
choice_point = np.mean(point_cloud[i-len(voxel)+1:i+1, :], axis=0)
else:
# random
random_index = random.randint(0,len(voxel)-1)
choice_point = point_cloud[voxel[random_index],:]
filtered_points.append(choice_point)
voxel = []
# 把点云格式改成array,并对外返回
filtered_points = np.array(filtered_points, dtype=np.float64)
print(filtered_points.shape)
return filtered_points
def main():
# 获取命令行参数
args = get_args()
filename = args.filename
leaf_size = args.leaf_size
mode = args.mode
# 加载txt格式原始点云
point_cloud_pd = pd.read_csv(filename)
point_cloud_pd.columns = ["x","y","z","nx","ny","nz"]
point_cloud_pynt = PyntCloud(point_cloud_pd)
point_cloud_o3d = point_cloud_pynt.to_instance("open3d", mesh=False)
o3d.visualization.draw_geometries([point_cloud_o3d]) # 显示原始点云
# 调用voxel滤波函数,实现滤波
filtered_cloud = voxel_filter(point_cloud_o3d.points, leaf_size,mode)
point_cloud_o3d.points = o3d.utility.Vector3dVector(filtered_cloud)
#显示滤波后的点云
o3d.visualization.draw_geometries([point_cloud_o3d])
if __name__ == '__main__':
main()
|
teamo1996/Point-cloud-process-shenlan | 01-Introduction and Basic Algorithm/code/pca_normal.py | # 实现PCA分析和法向量计算,并加载数据集中的文件进行验证
import open3d as o3d
import numpy as np
import pandas as pd
from pyntcloud import PyntCloud
import argparse
# 获取命令行参数
def get_args():
parase = argparse.ArgumentParser()
parase.add_argument("--filename",type=str,default="../../Data/modelnet40_normal_resampled/airplane/airplane_0001.txt")
parase.add_argument("--number_nearest",type=int,default=50)
return parase.parse_args()
# 主成分分析,返回对应的奇异值和奇异向量
def PCA(data, correlation=False, sort=True):
X = data.T
X_mean = np.mean(X,axis=1).reshape(3,1)
X_head = X - X_mean
H = X_head.dot(X_head.T)
eigenvalues,eigenvectors = np.linalg.eig(H)
if sort:
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def main():
# 获取命令行参数
args = get_args()
filename = args.filename
number_nearest = args.number_nearest
point_cloud_o3d = o3d.geometry.PointCloud()
point_cloud_o3d.points = o3d.utility.Vector3dVector(np.genfromtxt(filename,delimiter=',')[:,0:3])
# 加载txt格式原始点云
# point_cloud_pd = pd.read_csv(filename)
# point_cloud_pd.columns = ["x","y","z","nx","ny","nz"]
# point_cloud_pynt = PyntCloud(point_cloud_pd)
# point_cloud_o3d = point_cloud_pynt.to_instance("open3d", mesh=False)
# 显示原始点云
o3d.visualization.draw_geometries([point_cloud_o3d])
# 从点云中获取点,只对点进行处理
points = np.asarray(point_cloud_o3d.points)
print('total points number is:', points.shape[0])
# 用PCA分析点云主方向
w, v = PCA(points)
point_cloud_vector = v[:, 0] #点云主方向对应的向量
print('the main orientation of this pointcloud is: ', point_cloud_vector)
# 绘制三个主方向
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector([np.mean(points, axis=0), np.mean(points, axis=0) + v[:, 2], np.mean(points, axis=0) + v[:, 1], np.mean(points, axis=0) + v[:, 0]])
line_set.lines = o3d.utility.Vector2iVector([[0, 1], [0, 2], [0, 3]])
line_set.colors = o3d.utility.Vector3dVector([[0, 0, 1], [0, 1, 0], [1, 0, 0]])
o3d.visualization.draw_geometries([point_cloud_o3d, line_set])
# 循环计算每个点的法向量
pcd_tree = o3d.geometry.KDTreeFlann(point_cloud_o3d)
normals = []
for point in point_cloud_o3d.points:
[k, idx, _] = pcd_tree.search_knn_vector_3d(point, knn=number_nearest)
w,v = PCA(points[idx,:])
normals.append(v[:,2])
normals = np.array(normals, dtype=np.float64)
# 对点云的法向量进行赋值
point_cloud_o3d.normals = o3d.utility.Vector3dVector(normals)
o3d.visualization.draw_geometries([point_cloud_o3d])
if __name__ == '__main__':
main()
|
teamo1996/Point-cloud-process-shenlan | 04-Model Fitting/code/clustering.py | <gh_stars>1-10
# 文件功能:
# 1. 从数据集中加载点云数据
# 2. 从点云数据中滤除地面点云
# 3. 从剩余的点云中提取聚类
import numpy as np
import os
import struct
from numpy.core.fromnumeric import argmin
from sklearn import cluster, datasets, mixture
from itertools import cycle, islice
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import neighbors
from sklearn.neighbors import KDTree
from sklearn.cluster import DBSCAN
from pyntcloud import PyntCloud
import pandas as pd
import open3d as o3d
# 功能:从kitti的.bin格式点云文件中读取点云
# 输入:
# path: 文件路径
# 输出:
# 点云数组
def read_velodyne_bin(path):
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float32)
# 功能:从点云文件中滤除地面点
# 输入:
# data: 一帧完整点云
# 输出:
# segmengted_cloud: 删除地面点之后的点云
def ground_segmentation(data):
# 计算每个点的法向量 利用法向量确定可能是内点的数据点
print("开始构建KDtree")
kdtree = KDTree(data,leaf_size=8)
print("KDTree构建完成")
# 计算法向量
normals = np.empty((data.shape[0],data.shape[1]))
bad_index = np.zeros((data.shape[0],1))
print("开始计算每个点的法向量")
for i in range(data.shape[0]):
point = data[i,:]
index = kdtree.query_radius(point.reshape(1,-1),r=1.0)
neighbors_radius = data[index[0],:]
if(neighbors_radius.shape[0] < 3): # 认为1m内邻近点数小于3个的点没有准确的法向量
normals[i,:] = np.zeros((1,3))
bad_index[i]=1
else:
neighbors_mean = np.mean(neighbors_radius,axis=0)
neighbors_without_mean = neighbors_radius-neighbors_mean
H = np.dot(neighbors_without_mean.T,neighbors_without_mean)
eigenvalues,eigenvectors = np.linalg.eig(H)
ind = argmin(eigenvalues)
normal = eigenvectors[:,ind].T
normals[i,:] = normal
if(np.sqrt(np.power(normal[0],2.0) + np.power(normal[1],2.0))/np.abs(normal[2]) > 0.57):
bad_index[i]=1
# 根据法向量接近垂直的个数乘以系数0.7确定RANSAC提前停止迭代的阈值
inline_percent = (1 - bad_index.sum(axis=0)[0]/data.shape[0])*0.7
print("法向量计算完成")
# 使用RANSCA方法拟合地面
# 地面模型:ax+by+cz+d=0
distance_threshold = 0.5 # 距离阈值tau
ransac_n = 3 # RANSAC参数点数目
num_iter = 50 # RANSAC迭代次数
inlier_cloud = np.empty(shape=[0, data.shape[1]]) # 内点的点云
for i in range(num_iter):
print("迭代次数:%d"%(i))
print("开始选点")
# step1 随机选取三个点
p = np.empty(shape=[0,data.shape[1]])
while p.shape[0]<3:
index = np.random.choice(data.shape[0],1)[0]
if(bad_index[index] < 1 and np.sqrt(np.power(normals[index][0],2.0) + np.power(normals[index][1],2.0))/np.abs(normals[index][2]) < 0.57 ):
p = np.append(p,[data[index]],axis=0)
print("选点结束")
print("开始拟合平面")
# step2 使用选取的三个点拟合平面
a = (p[1][1] - p[0][1]) * (p[2][2] - p[0][2]) - (p[1][2] - p[0][2]) * (p[2][1] - p[0][1])
b = (p[1][2] - p[0][2]) * (p[2][0] - p[0][0]) - (p[1][0] - p[0][0]) * (p[2][2] - p[0][2])
c = (p[1][0] - p[0][0]) * (p[2][1] - p[0][1]) - (p[1][1] - p[0][1]) * (p[2][0] - p[0][0])
d = 0 - (a * p[0][0] + b * p[0][1] + c * p[0][2])
print("开始遍历剩余点")
# step3 遍历剩余点,标记内点和外点
inlier = np.empty(shape=[0, data.shape[1]]) # 内点
inlier_idx = np.empty(shape=[0, 1], dtype=int) # 内点序号
for idx in range(data.shape[0]):
p = data[idx, :]
point_distance = abs(a*p[0] + b*p[1] + c*p[2] + d) / np.sqrt(a*a + b*b + c*c) # 计算对应点到拟合平面的距离
if (point_distance < distance_threshold and bad_index[idx] < 1 ): # 认为距离小于阈值并且法向量垂直的点为内点
inlier = np.append(inlier, [data[idx]], axis=0)
inlier_idx = np.append(inlier_idx, idx)
if inlier.shape[0] > inlier_cloud.shape[0]:
inlier_cloud = inlier
segmengted_cloud = np.delete(data, inlier_idx, axis=0)
print("内点比例:%.2f"%(inlier.shape[0]/data.shape[0]))
if(inlier_cloud.shape[0]/data.shape[0] > inline_percent): # 提前停止条件
break
print('origin data points num:', data.shape[0])
print('segmented data points num:', segmengted_cloud.shape[0])
return segmengted_cloud, inlier_cloud
# 功能:从点云中提取聚类
# 输入:
# data: 点云(滤除地面后的点云)
# 输出:
# clusters_index: 一维数组,存储的是点云中每个点所属的聚类编号(参考上一章内容容易理解)
def clustering(data):
distance_threshold = 1.0 # 搜索阈值
min_sample = 4 # 最小邻域点个数
n = len(data)
# 构建kd_tree
leaf_size = 8
tree = KDTree(data,leaf_size)
#step1 初始化核心对象集合T,聚类个数k,聚类集合C, 未访问集合P
core_sets = set() # set 集合
k = 0 # 第k类
cluster_index = np.zeros(n,dtype=int) # 聚类集合
unvisited = set(range(n)) # 初始化未访问集合
#step2 通过判断,通过kd_tree radius NN找出所有核心点
nearest_idx = tree.query_radius(data, distance_threshold) # 进行radius NN搜索,半径为epsion,所有点的最临近点储存在 nearest_idx中
for d in range(n):
if len(nearest_idx[d]) >= min_sample: # 临近点数 > min_sample,加入核心点
core_sets.add(d) # 最初的核心点
#step3 聚类
while len(core_sets):
unvisited_old = unvisited # 更新为访问集合
core = list(core_sets)[np.random.randint(0,len(core_sets))] # 从核心点集中随机选取一个核心点core
unvisited = unvisited - set([core]) # 把核心点标记为 visited,从 unvisited 集合中剔除
visited = []
visited.append(core) # 把核心点加入已经访问过的点
while len(visited):
new_core = visited[0]
#kd-tree radius NN 搜索邻近
if new_core in core_sets: # 如果当前搜索点是核心点
S = unvisited & set(nearest_idx[new_core]) # 当前核心对象的nearest与unvisited 的交集
visited += (list(S)) # 对该newcore所能辐射的点,再做检测
unvisited = unvisited - S # unvisited 剔除已visited 的点
visited.remove(new_core) # newcore已做检测,去掉new core
cluster = unvisited_old - unvisited # 原有的unvisited和去掉了该核心对象的密度可达对象的visited就是该类的所有对象
core_sets = core_sets - cluster # 去掉该类对象里面包含的核心对象
cluster_index[list(cluster)] = k
k += 1 #类个数
noise_cluster = unvisited
cluster_index[list(noise_cluster)] = -1 #噪声归类为-1
return cluster_index
# 功能:显示聚类点云,每个聚类一种颜色
# 输入:
# data:点云数据(滤除地面后的点云)
# cluster_index:一维数组,存储的是点云中每个点所属的聚类编号(与上同)
def plot_clusters(segmented_ground, segmented_cloud, cluster_index):
def colormap(c, num_clusters):
# outlier:
if c == -1:
color = [0]*3
# surrouding object:
else:
color = [c/num_clusters*128 + 127] * 3
color[c % 3] = 0
return color
# 地面的颜色
pcd_ground = o3d.geometry.PointCloud()
pcd_ground.points = o3d.utility.Vector3dVector(segmented_ground)
pcd_ground.colors = o3d.utility.Vector3dVector(
[
[0, 0, 255] for i in range(segmented_ground.shape[0])
]
)
# surrounding object elements:
pcd_objects = o3d.geometry.PointCloud()
pcd_objects.points = o3d.utility.Vector3dVector(segmented_cloud)
num_clusters = max(cluster_index) + 1
print(num_clusters)
pcd_objects.colors = o3d.utility.Vector3dVector(
[
colormap(c, num_clusters) for c in cluster_index
]
)
o3d.visualization.draw_geometries([pcd_ground, pcd_objects])
def main():
root_dir = '../../Data/data_object_velodyne//testing/velodyne' # 数据集路径
cat = os.listdir(root_dir)
cat = cat[1:]
filename = os.path.join(root_dir, cat[250])
print("当前使用的点云文件为:")
print('clustering pointcloud file:', filename)
print("读取点云文件")
origin_points = read_velodyne_bin(filename)
print("点云大小为:")
print(origin_points.shape)
print("开始提取地面")
segmented_points,ground_points = ground_segmentation(data=origin_points)
cluster_index = clustering(segmented_points)
plot_clusters(ground_points, segmented_points, cluster_index)
if __name__ == '__main__':
main()
|
teamo1996/Point-cloud-process-shenlan | 03-Clustering/code/Spectral_Clustering.py | <reponame>teamo1996/Point-cloud-process-shenlan
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
import KMeans
from sklearn import neighbors
plt.style.use('seaborn')
class spectral_clustering(object):
def __init__(self, n_clusters, epsilon=1e-4, max_iter=50):
self.n_clusters = n_clusters # 聚类中心个数,这里可以根据SVD的结果自动选取
self.max_iter = max_iter # K-means最大迭代次数
self.epsilon = epsilon # K-means容差阈值
self.W = None # 图的相似性矩阵
self.L = None # 图的拉普拉斯矩阵
self.D = None # 图的度矩阵
self.N = None # 数据点个数
self.vector = None # 降维特征向量
def init_param(self, data):
# 初始化参数
self.N = data.shape[0] # 数据点个数
self.cal_weight_mat(data) # 计算相似矩阵W
self.D = np.diag(self.W.sum(axis=1)) # 计算度矩阵D
self.L = self.D - self.W # 计算未归一化的拉普拉斯矩阵
return
# 计算相似矩阵W
def cal_weight_mat(self, data, n_neighbors=5):
# 连接最近的五个邻居
self.W = neighbors.kneighbors_graph(data, n_neighbors, mode='connectivity', include_self=False)
# 将图转换为矩阵
self.W = self.W.A
# 对角化
self.W = 0.5 * (self.W + self.W.T)
def fit(self, data):
self.init_param(data) # 初始化
w, v = np.linalg.eig(self.L) # 特征值分解
inds = np.argsort(w)[:self.n_clusters] # 选取前k个特征向量
self.vector = v[:, inds] # 构造新的k维数据
def predict(self, data):
# 对新的k维数据进行k均值聚类
km = KMeans.K_Means(n_clusters=self.n_clusters, tolerance=self.epsilon, max_iter=self.max_iter)
km.fit(self.vector)
result = km.predict(self.vector)
return result
# 生成仿真数据
def generate_X(true_Mu, true_Var):
# 第一簇的数据
num1, mu1, var1 = 400, true_Mu[0], true_Var[0]
X1 = np.random.multivariate_normal(mu1, np.diag(var1), num1)
# 第二簇的数据
num2, mu2, var2 = 600, true_Mu[1], true_Var[1]
X2 = np.random.multivariate_normal(mu2, np.diag(var2), num2)
# 第三簇的数据
num3, mu3, var3 = 1000, true_Mu[2], true_Var[2]
X3 = np.random.multivariate_normal(mu3, np.diag(var3), num3)
# 合并在一起
X = np.vstack((X1, X2, X3))
# 显示数据
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X1[:, 0], X1[:, 1], s=5)
plt.scatter(X2[:, 0], X2[:, 1], s=5)
plt.scatter(X3[:, 0], X3[:, 1], s=5)
plt.show()
return X
if __name__ == '__main__':
# 生成数据
true_Mu = [[0.5, 0.5], [5.5, 2.5], [1, 7]]
true_Var = [[1, 3], [2, 2], [6, 2]]
X = generate_X(true_Mu, true_Var)
s_clustering = spectral_clustering(n_clusters=3)
s_clustering.fit(X)
cat = s_clustering.predict(X)
print(cat)
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X[:, 0], X[:, 1], s=5, c=cat)
plt.show() |
teamo1996/Point-cloud-process-shenlan | 08-Feature Description/code/Decriptor.py | <gh_stars>1-10
import argparse # 命令行参数获取
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import open3d as o3d
import Utils
import numpy
import open3d
class FPFH_decriptor:
def __init__(self) -> None:
self.__B = 11
self.__pointclouds = None
self.__feature_index = None
self.__seacrch_tree = None
self.__radius = 0.05
self.__keypoints = None
self.__descriptors = None
self.__search_tree = None
pass
def set_pointclouds(self,pointclouds:open3d.geometry.PointCloud) -> None:
self.__pointclouds = pointclouds
self.__search_tree = o3d.geometry.KDTreeFlann(self.__pointclouds)
def set_attribute(self,B:int,radius:float) -> None:
self.__B = B
self.__radius = radius
def set_keypoints(self,keypoints:numpy.ndarray) -> None:
self.__keypoints = keypoints
def __SFPH(self,keypoint:numpy.ndarray) -> None:
# 获取点云
points = np.asarray(self.__pointclouds.points)
normals = np.asarray(self.__pointclouds.normals)
# 获取邻居节点
[k,idx_neighbors,_] = self.__search_tree.search_radius_vector_3d(keypoint,self.__radius)
# 获取n1
n1 = normals[idx_neighbors[0]]
# 移除关键点本身
idx_neighbors = idx_neighbors[1:]
# 计算 (p2-p1)/norm(p2-p1)
diff = points[idx_neighbors] - keypoint
diff = diff/np.reshape(np.linalg.norm(diff,ord=2,axis=1),(k-1,1))
u = n1
v = np.cross(u,diff)
w = np.cross(u,v)
# 计算n2
n2 = normals[idx_neighbors]
# 计算alpha
alpha = np.reshape((v*n2).sum(axis = 1),(k-1,1))
# 计算phi
phi = np.reshape((u*diff).sum(axis = 1),(k-1,1))
# 计算 theta
theta = np.reshape(np.arctan2((w*n2).sum(axis = 1),(u*n2).sum(axis = 1)),(k-1,1))
# 计算相应的直方图
alpha_hist = np.reshape(np.histogram(alpha,B,range=[-1.0,1.0])[0],(1,B))
phi_hist = np.reshape(np.histogram(phi,B,range=[-1.0,1.0])[0],(1,B))
theta_hist = np.reshape(np.histogram(theta,B,range=[-3.14,3.14])[0],(1,B))
# 组成描述子
fpfh = np.hstack((alpha_hist,phi_hist,theta_hist))
return fpfh
def describe(self) -> None:
pointclouds = np.asarray(self.__pointclouds.points)
self.__descriptors = numpy.ndarray((0,3*self.__B))
for keypoint in self.__keypoints:
# 寻找keypoint的邻居点
[k,idx_neighbors,_] = self.__search_tree.search_radius_vector_3d(keypoint,self.__radius)
# 移除关键点本身
idx_neighbors = idx_neighbors[1:]
# 计算权重
w = 1.0/np.linalg.norm(keypoint - pointclouds[idx_neighbors],ord = 2,axis = 1)
# 计算邻居的SPFH
neighbors_SPFH = np.reshape(np.asarray([self.__SFPH(pointclouds[i]) for i in idx_neighbors]),(k-1,3*self.__B))
# 计算自身的描述子
self_SFPH = self.__SFPH(keypoint)
# 计算最终的FPFH
neighbors_SPFH = 1.0/(k-1)*np.dot(w,neighbors_SPFH)
# 获取描述子并归一化
finial_FPFH = self_SFPH + neighbors_SPFH
finial_FPFH = finial_FPFH/np.linalg.norm(finial_FPFH)
self.__descriptors = numpy.vstack((self.__descriptors,finial_FPFH))
def get_descriptors(self) -> list:
return self.__descriptors
# 获取命令行参数
def get_args():
parase = argparse.ArgumentParser(description="arg parser")
parase.add_argument("--file_name",type=str,default= "/workspace/Data/modelnet40_normal_resampled/chair/chair_0001.txt") # 需要提取描述子的的点云文件路径
parase.add_argument("--radius",type=float,default= 0.05) # PFPH需要的参数
parase.add_argument("--x",type=float,default=0.084) # 特征点的x坐标
parase.add_argument("--y",type=float,default=0.2597) # 特征点的y坐标
parase.add_argument("--z",type=float,default=-0.0713) # 特征点的z坐标
parase.add_argument("--B",type=int,default=11) # 描述子的分段数
return parase.parse_args()
if __name__ == "__main__":
# 获取命令行参数
args = get_args()
radius = args.radius
file_name = args.file_name
B = args.B
# 读取点云
pointclouds = Utils.pointcloud.read_pointcloud(file_name)
# 创建特征点
key_point1 = np.array([0.4333,-0.7807,-0.4372])
key_point2 = np.array([-0.4240,-0.7850,-0.4392])
key_point3 = np.array([0.02323,0.004715,-0.2731])
keypoints = np.vstack((key_point1,key_point2,key_point3))
# 创建描述子
descriptor = FPFH_decriptor()
descriptor.set_pointclouds(pointclouds)
descriptor.set_keypoints(keypoints)
descriptor.describe()
descriptors = descriptor.get_descriptors()
print(descriptors.shape)
plt.plot(range(3*B), descriptors[0,:].T, ls="-.",color="r",marker =",", lw=2, label="keypoint1")
plt.plot(range(3*B), descriptors[1,:].T, ls="-.",color="g",marker =",", lw=2, label="keypoint2")
plt.plot(range(3*B), descriptors[2,:].T, ls="-.",color="b",marker =",", lw=2, label="keypoint3")
plt.legend()
plt.show()
|
teamo1996/Point-cloud-process-shenlan | 09-Registration/code/RANSAC.py | <reponame>teamo1996/Point-cloud-process-shenlan
from cmath import inf
import collections
import copy
import concurrent.futures
import this
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.transform import Rotation as R
import open3d as o3d
import open3d
class RANSAC_ICP:
def __init__(self,builder) -> None:
self.max_works = builder.max_works
self.num_samples = builder.num_samples
self.max_correspondence_distance = builder.max_correspondence_distance
self.max_iteration = builder.max_iteration
self.max_validation = builder.max_validation
self.max_edge_length_ratio = builder.max_edge_length_ratio
self.normal_angle_threshold = builder.normal_angle_threshold
self.__pcd_source = None
self.__pcd_target = None
self.__source_features = None
self.__target_features = None
# 为目标点云构建搜索树:
self.__search_tree_target = None
def __str__(self) -> str:
info = ("RANSAC_ICP:" + "\n" + "max_works:{}".format(self.max_works) + "\n" +
"num_samples:{}".format(self.num_samples) + "\n" +
"max_correspondence_distance:{}".format(self.max_correspondence_distance) + "\n" +
"max_iteration:{}".format(self.max_iteration) + "\n" +
"max_validation:{}".format(self.max_validation) + "\n" +
"max_edge_length_ratio:{}".format(self.max_edge_length_ratio) + "\n" +
"normal_angle_threshold:{}".format(self.normal_angle_threshold))
return info
def ransac_match(self) -> np.matrix:
# 获取对应的特征空间匹配对:
matches = self.__get_potential_matches()
# RANSAC:
N, _ = matches.shape
idx_matches = np.arange(N)
# SE3
T = None
# 构造一个生成器,用来随机选取四个点
proposal_generator = (
matches[np.random.choice(idx_matches, self.num_samples, replace=False)] for _ in iter(int, 1)
)
# 验证器,用来根据给定的检查参数获取符合要求的匹配对:
validator = lambda proposal: self.__is_valid_match(proposal)
# 并行执行,找到一个满足要求的初始解
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_works) as executor:
for T in map(validator, proposal_generator):
if not (T is None):
break
print('[RANSAC ICP]: Get first valid proposal:\n')
print(T)
print("Start registration...")
# ICP迭代一次,获取用于迭代的初始解
best_result = self.__icp_iter(T)
# RANSAC验证:这步根据实践可以省略
num_validation = 0
for i in range(self.max_iteration):
# get proposal:
T = validator(next(proposal_generator))
# check validity:
if (not (T is None)) and (num_validation < self.max_validation):
num_validation += 1
# refine estimation on all keypoints:
result = self.__icp_iter(T)
# 更新最好的结果:
best_result = best_result if best_result.fitness > result.fitness else result
if num_validation == self.max_validation:
break
return best_result
def __icp_iter(self,T):
# source点云中的点数:
N = len(self.__pcd_source.points)
# 评估两次迭代之间的相对变化用于提前停止:
result_prev = result_curr = o3d.pipelines.registration.evaluate_registration(
self.__pcd_source, self.__pcd_target, self.max_correspondence_distance, T
)
for _ in range(self.max_iteration):
# TODO: transform is actually an in-place operation. deep copy first otherwise the result will be WRONG
pcd_source_current = copy.deepcopy(self.__pcd_source)
# apply transform:
pcd_source_current = pcd_source_current.transform(T)
# find correspondence:
matches = []
for n in range(N):
query = np.asarray(pcd_source_current.points)[n]
_, idx_nn_target, dis_nn_target = self.__search_tree_target.search_knn_vector_3d(query, 1)
if dis_nn_target[0] <= self.max_correspondence_distance:
matches.append(
[n, idx_nn_target[0]]
)
matches = np.asarray(matches)
if len(matches) >= 4:
# sovle ICP:
P = np.asarray(self.__pcd_source.points)[matches[:,0]]
Q = np.asarray(self.__pcd_target.points)[matches[:,1]]
T = self.__solve_icp(P, Q)
# evaluate:
result_curr = o3d.pipelines.registration.evaluate_registration(
self.__pcd_source, self.__pcd_target, self.max_correspondence_distance, T
)
# if no significant improvement:
if self.__shall_terminate(result_curr, result_prev):
print('[RANSAC ICP]: Early stopping.')
break
return result_curr
def __shall_terminate(self,result_curr, result_prev):
# relative fitness improvement:
relative_fitness_gain = result_curr.fitness / result_prev.fitness - 1
return relative_fitness_gain < 0.01
# 用来检测给定给的匹配对在给定参数下是否有效
def __is_valid_match(self,proposal):
idx_source, idx_target = proposal[:,0], proposal[:,1]
# 1.法向量方向检查
if not self.normal_angle_threshold is None:
# 获取对应的法向量匹配对:
normals_source = np.asarray(self.__pcd_source.normals)[idx_source]
normals_target = np.asarray(self.__pcd_target.normals)[idx_target]
# 检测对应特征点法向量之间的差距:
normal_cos_distances = (normals_source*normals_target).sum(axis = 1)
is_valid_normal_match = np.all(normal_cos_distances >= np.cos(self.normal_angle_threshold))
if not is_valid_normal_match:
return None
# 获取相关点
points_source = np.asarray(self.__pcd_source.points)[idx_source]
points_target = np.asarray(self.__pcd_target.points)[idx_target]
# 2.几何相似性检查:
pdist_source = pdist(points_source)
pdist_target = pdist(points_target)
is_valid_edge_length = np.all(
np.logical_and(
pdist_source > self.max_edge_length_ratio * pdist_target,
pdist_target > self.max_edge_length_ratio * pdist_source
)
)
if not is_valid_edge_length:
return None
# 3.相关距离检查
T = self.__solve_icp(points_source, points_target) # 两对点之间的变换矩阵
R, t = T[0:3, 0:3], T[0:3, 3]
# 判断经过变换之后的距离
deviation = np.linalg.norm(
points_target - np.dot(points_source, R.T) - t,
axis = 1
)
is_valid_correspondence_distance = np.all(deviation <= self.max_correspondence_distance)
# 有效则返回初步的变换矩阵,否则返回None
return T if is_valid_correspondence_distance else None
# 闭式求解icp
def __solve_icp(self,source, target):
# 计算均值:
up = source.mean(axis = 0)
uq = target.mean(axis = 0)
# 去重心化后的点云:
P_centered = source - up
Q_centered = target - uq
# SVD分解求R和T
U, s, V = np.linalg.svd(np.dot(Q_centered.T, P_centered), full_matrices=True, compute_uv=True)
R = np.dot(U, V)
t = uq - np.dot(R, up)
# 将R和T变换为变换矩阵的格式
T = np.zeros((4, 4))
T[0:3, 0:3] = R
T[0:3, 3] = t
T[3, 3] = 1.0
return T
def set_source_pointscloud(self,pointscloud:open3d.geometry.PointCloud) -> None:
self.__pcd_source = pointscloud
def set_target_pointscloud(self,pointscloud:open3d.geometry.PointCloud) -> None:
self.__pcd_target = pointscloud
self.__search_tree_target = o3d.geometry.KDTreeFlann(self.__pcd_target)
def set_source_features(self,source_features:np.ndarray) -> None:
self.__source_features = source_features
def set_target_features(self,target_features:np.ndarray) -> None:
self.__target_features = target_features
def __get_potential_matches(self) -> np.ndarray:
# 在高维空间构建对应的搜索树
search_tree = o3d.geometry.KDTreeFlann(self.__target_features)
# 为原始点云中的每一个点寻找对应的特征点
_, N = self.__source_features.shape
matches = []
for i in range(N):
query = self.__source_features[:, i]
_, idx_nn_target, _ = search_tree.search_knn_vector_xd(query, 1)
matches.append(
[i, idx_nn_target[0]]
)
# 结果为N*2的数组
matches = np.asarray(matches)
return matches
# 建造者模式
class Builder:
def __init__(self) -> None:
self.max_works = 16
self.num_samples = 4
self.max_correspondence_distance = 1.5
self.max_iteration = 1000
self.max_validation = 500
self.max_edge_length_ratio = 0.9
self.normal_angle_threshold = None
def set_max_works(self,max_works:int):
self.max_works = max_works
return self
def set_num_samples(self,num_samples:int):
self.num_samples = num_samples
return self
def set_max_correspondence_distance(self,max_correspondence_distance:float):
self.max_correspondence_distance = max_correspondence_distance
return self
def set_max_iteration(self,max_iteration:int):
self.max_iteration = max_iteration
return self
def set_max_validation(self,max_validation:int):
self.max_validation = max_validation
return self
def set_max_edge_length_ratio(self,max_edge_length_ratio:float):
self.max_edge_length_ratio = max_edge_length_ratio
return self
def set_normal_angle_threshold(self,normal_angle_threshold:float):
self.normal_angle_threshold = normal_angle_threshold
return self
def build(self):
return RANSAC_ICP(self)
|
teamo1996/Point-cloud-process-shenlan | 02-Nearest Neighbor Problem/code/result_set.py | # 该文件定义了在树中查找数据所需要的数据结构,类似一个中间件
import copy
# 数据结构:保存点的索引和距离
class DistIndex:
def __init__(self, distance, index):
self.distance = distance
self.index = index
def __lt__(self, other):
return self.distance < other.distance
# KNN搜索结果
# 输入:
# capacity:需要搜索的点的个数
class KNNResultSet:
def __init__(self, capacity):
self.capacity = capacity
self.count = 0
self.worst_dist = 1e10
self.dist_index_list = []
for i in range(capacity):
self.dist_index_list.append(DistIndex(self.worst_dist, 0))
self.comparison_counter = 0
def size(self):
return self.count
def full(self):
return self.count == self.capacity
def worstDist(self):
return self.worst_dist
# 插入点
def add_point(self, dist, index):
self.comparison_counter += 1
if dist > self.worst_dist:
return
if self.count < self.capacity:
self.count += 1
i = self.count - 1
while i > 0:
if self.dist_index_list[i-1].distance > dist:
self.dist_index_list[i] = copy.deepcopy(self.dist_index_list[i-1])
i -= 1
else:
break
self.dist_index_list[i].distance = dist
self.dist_index_list[i].index = index
self.worst_dist = self.dist_index_list[self.capacity-1].distance
def __str__(self):
output = ''
for i, dist_index in enumerate(self.dist_index_list):
output += '%d - %.2f\n' % (dist_index.index, dist_index.distance)
output += 'In total %d comparison operations.' % self.comparison_counter
return output
# RadiusNN搜索结果
# 输入:
# radius:需要搜索的半径
class RadiusNNResultSet:
def __init__(self, radius):
self.radius = radius
self.count = 0
self.worst_dist = radius
self.dist_index_list = []
self.comparison_counter = 0
def size(self):
return self.count
def worstDist(self):
return self.radius
def add_point(self, dist, index):
self.comparison_counter += 1
if dist > self.radius:
return
self.count += 1
self.dist_index_list.append(DistIndex(dist, index))
def __str__(self):
self.dist_index_list.sort()
output = ''
for i, dist_index in enumerate(self.dist_index_list):
output += '%d - %.2f\n' % (dist_index.index, dist_index.distance)
output += 'In total %d neighbors within %f.\nThere are %d comparison operations.' \
% (self.count, self.radius, self.comparison_counter)
return output
|
teamo1996/Point-cloud-process-shenlan | 09-Registration/registration_dataset/evaluate_rt.py | <reponame>teamo1996/Point-cloud-process-shenlan
import numpy as np
import math
import os
import struct
from scipy.spatial.transform import Rotation
import open3d as o3d
def get_P_from_Rt(R, t):
P = np.identity(4)
P[0:3, 0:3] = R
P[0:3, 3] = t
return P
def is_registration_successful(P_pred_np, P_gt_np):
rte, rre = get_P_diff(P_pred_np, P_gt_np)
return rte<2.0 and rre<5.0, rte, rre
def get_P_diff(P_pred_np, P_gt_np):
P_diff = np.dot(np.linalg.inv(P_pred_np), P_gt_np)
t_diff = np.linalg.norm(P_diff[0:3, 3])
r_diff = P_diff[0:3, 0:3]
R_diff = Rotation.from_matrix(r_diff)
angles_diff = np.sum(np.abs(R_diff.as_euler('xyz', degrees=True)))
return t_diff, angles_diff
def visualize_pc_pair(src_np, dst_np):
pcd_src = o3d.geometry.PointCloud()
pcd_src.points = o3d.utility.Vector3dVector(np.transpose(src_np))
pcd_src.paint_uniform_color([1, 0, 0])
pcd_dst = o3d.geometry.PointCloud()
pcd_dst.points = o3d.utility.Vector3dVector(np.transpose(dst_np))
pcd_dst.paint_uniform_color([0, 1, 0])
o3d.visualization.draw_geometries([pcd_src, pcd_dst])
def read_oxford_bin(bin_path):
'''
:param path:
:return: [x,y,z,nx,ny,nz]: 6xN
'''
data_np = np.fromfile(bin_path, dtype=np.float32)
return np.transpose(np.reshape(data_np, (int(data_np.shape[0]/6), 6)))
def read_reg_results(file_path, splitter=','):
reg_gt_list = []
with open(file_path, 'r') as f:
line = f.readline()
while line:
items = line.split(splitter)
items = [x.strip() for x in items]
reg_gt_list.append(items)
line = f.readline()
return reg_gt_list
def reg_result_row_to_array(reg_result_row):
idx1 = int(reg_result_row[0])
idx2 = int(reg_result_row[1])
t = np.asarray([float(x) for x in reg_result_row[2:5]])
q_wxyz = [float(x) for x in reg_result_row[5:9]]
q_xyzw = np.asarray(q_wxyz[1:] + q_wxyz[:1])
rot = Rotation.from_quat(q_xyzw)
return idx1, idx2, t, rot
def evaluate_rt(gt_file_path, predict_file_path):
gt_reg = read_reg_results(gt_file_path)
predict_reg = read_reg_results(predict_file_path)
assert len(gt_reg) == len(predict_reg)
counter_successful = 0
rte_sum = 0
rre_sum = 0
for i in range(1, len(predict_reg)):
gt_row = gt_reg[i]
gt_idx1, gt_idx2, gt_t, gt_rot = reg_result_row_to_array(gt_row)
gt_P = get_P_from_Rt(gt_rot.as_matrix(), gt_t)
predict_row = predict_reg[i]
predict_idx1, predict_idx2, predict_t, predict_rot = reg_result_row_to_array(predict_row)
predict_P = get_P_from_Rt(predict_rot.as_matrix(), predict_t)
assert gt_idx1 == predict_idx1
assert gt_idx2 == predict_idx2
is_reg_succ, rte, rre = is_registration_successful(predict_P, gt_P)
if is_reg_succ:
counter_successful += 1
rte_sum += rte
rre_sum += rre
# for debug
print(predict_row)
reg_success_rate = counter_successful/len(gt_reg)
avg_rte = rte_sum/counter_successful
avg_rre = rre_sum/counter_successful
print("Registration successful rate: %.2f, successful counter: %d, \n"
"average Relative Translation Error (RTE): %.2f, average Relative Rotation Error (RRE): %.2f"
% (reg_success_rate, counter_successful, avg_rte, avg_rre))
return reg_success_rate, avg_rte, avg_rre
def main():
# We do NOT provide the "groundtruth.txt". This is the evaluation script we will be using.
# You are required to provide your registration results via 'reg_result.txt'
# In our provided `reg_result.txt`, we provides 3 ground truth registration results as an example.
# This script provides some functions to read and visualize the registration results.
dataset_path = '/media/teamo/samsung/Homework 9/registration_dataset'
ground_truth_reg_result_path = os.path.join(dataset_path, 'groundtruths.txt')
your_reg_result_path = os.path.join(dataset_path, 'reg_result_teamo.txt')
# evaluate registration performance
if os.path.exists(ground_truth_reg_result_path):
evaluate_rt(ground_truth_reg_result_path, your_reg_result_path)
# visualize registration result
visualize_row_idx = 310
reg_list = read_reg_results(os.path.join(dataset_path, 'reg_result_teamo.txt'), splitter=',')
idx1, idx2, t, rot = reg_result_row_to_array(reg_list[visualize_row_idx])
src_np = read_oxford_bin(os.path.join(dataset_path, 'point_clouds', '%d.bin' % idx1))[0:3, :]
dst_np = read_oxford_bin(os.path.join(dataset_path, 'point_clouds', '%d.bin' % idx2))[0:3, :]
dst_trans_np = np.dot(rot.as_matrix(), dst_np) + np.expand_dims(t, 1)
visualize_pc_pair(src_np, dst_trans_np)
if __name__ == '__main__':
main() |
teamo1996/Point-cloud-process-shenlan | 02-Nearest Neighbor Problem/code/benchmark.py | <gh_stars>1-10
# 对数据集中的点云,批量执行构建树和查找,包括kdtree和octree,并评测其运行时间
import random
import math
import numpy as np
import time
import os
import struct
import matplotlib.pyplot as plt
from scipy import spatial
import octree as octree
import kdtree as kdtree
from result_set import KNNResultSet, RadiusNNResultSet
def read_velodyne_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*3
'''
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float32).T
def main():
# 测试配置
leaf_size = 32
min_extent = 0.0001
k = 8
radius = 1
# 数据读取
filename = "/workspace/Point-cloud-process-shenlan/02-Nearest Neighbor Problem/code/000000.bin"
db_np = read_velodyne_bin(filename).T
print("octree --------------")
construction_time = 0
knn_time = 0
radius_time = 0
brute_time = 0
octree_knn = []
begin_t = time.time()
root = octree.octree_construction(db_np, leaf_size, min_extent)
construction_time = time.time() - begin_t
query = db_np[0,:]
begin_t = time.time()
result_set = KNNResultSet(capacity=k)
octree.octree_knn_search(root, db_np, result_set, query)
knn_time = time.time() - begin_t
begin_t = time.time()
result_set = RadiusNNResultSet(radius=radius)
octree.octree_radius_search_fast(root, db_np, result_set, query)
radius_time = time.time() - begin_t
begin_t = time.time()
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
brute_time = time.time() - begin_t
print("Octree: build %.3f, knn %.3f, radius %.3f, brute %.3f" % (construction_time*1000,
knn_time*1000,
radius_time*1000,
brute_time*1000))
for i in range(100):
begin_t = time.time()
result_set = KNNResultSet(capacity=i+1)
octree.octree_knn_search(root, db_np, result_set, query)
knn_time = time.time() - begin_t
octree_knn.append(knn_time*1000)
print("kdtree --------------")
construction_time = 0
knn_time = 0
radius_time = 0
brute_time = 0
begin_t = time.time()
root = kdtree.kdtree_construction(db_np, leaf_size)
construction_time += time.time() - begin_t
query = db_np[0,:]
kdtree_knn = []
begin_t = time.time()
result_set = KNNResultSet(capacity=k)
kdtree.kdtree_knn_search(root, db_np, result_set, query)
knn_time = time.time() - begin_t
begin_t = time.time()
result_set = RadiusNNResultSet(radius=radius)
kdtree.kdtree_radius_search(root, db_np, result_set, query)
radius_time += time.time() - begin_t
begin_t = time.time()
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
brute_time = time.time() - begin_t
print("Kdtree: build %.3f, knn %.3f, radius %.3f, brute %.3f" % (construction_time * 1000,
knn_time * 1000,
radius_time * 1000,
brute_time * 1000))
for i in range(100):
begin_t = time.time()
result_set = KNNResultSet(capacity=i+1)
kdtree.kdtree_knn_search(root, db_np, result_set, query)
knn_time = time.time() - begin_t
kdtree_knn.append(knn_time*1000)
plt.plot(range(100),octree_knn,'g',range(100),kdtree_knn)
plt.legend(["octree","kdtree"])
plt.xlabel("k")
plt.ylabel("knn search time(ms)")
plt.show()
print("Scipy-kdtree --------------")
construction_time = 0
knn_time = 0
radius_time
begin_t = time.time()
sc_tree = spatial.KDTree(db_np,leaf_size)
construction_time = time.time() - begin_t
begin_t = time.time()
sc_tree.query(query,k)
knn_time = time.time() - begin_t
print("Scipy-kdtree: build %.3f, knn %.3f" % (construction_time * 1000,
knn_time * 1000))
if __name__ == '__main__':
main()
|
gzock/tocaro-exporter | tocaro_exporter.py | #!/usr/bin/env python3
import os
from argparse import ArgumentParser
from configparser import ConfigParser
from logging import getLogger, basicConfig, INFO, DEBUG
import json
from distutils.util import strtobool
from tocaro_session import TocaroSession, CsrfTokenNotFoundError, SignInError
class TocaroExporter():
logger = getLogger(__name__)
group_type = "show"
interval = 0.3
def __init__(self, email="", password=""):
self.tocaro = TocaroSession()
if email and password:
self.signin(email, password)
def signin(self, email, password):
self.tocaro.signin(email, password)
def get_groups(self):
if self.group_type not in ["show", "hide"]:
self.logger.error("group_type invalid. valid value: show, hide")
raise Exception
return self.tocaro.get_groups(self.group_type)
def export_groups(self, output_path):
groups = self.get_groups()
__path = "%s/groups.json" % output_path
self.logger.info("groups saving... output path: " + __path)
self.__save_json(groups, __path)
def gather_group_ids(self, groups, includes="", excludes=""):
ret = []
for group in groups:
if group["type"] not in ["group", "talk"]:
self.logger.debug("group_id: %s, group type isn't group or talk. processing will be skip." % group["code"])
continue
if excludes and excludes in group["name"]:
self.logger.debug("group_id: %s, contain excudes string in group name. processing will be skip." % group["code"])
continue
if includes and includes not in group["name"]:
self.logger.debug("group_id: %s, not include string in group name. processing will be skip." % group["code"])
continue
ret.append(group["code"])
return ret
def export_messages(self, output_path, group_id="", includes="", excludes=""):
group_ids = []
if group_id:
group_ids.append(group_id)
else:
groups = self.get_groups()
group_ids.extend(
self.gather_group_ids(groups, includes, excludes)
)
self.logger.info("gatherd group id list: " + str(group_ids))
for group_id in group_ids:
self.logger.info("get message from group_id: " + group_id)
messages = self.tocaro.get_all_messages(
group_id=group_id,
interval=float(self.interval)
)
__path = "%s/%s.json" % (output_path, group_id)
self.logger.info("messages saving... output path: " + __path)
self.__save_json(messages, __path)
def __save_json(self, content, output_path):
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(content, f, indent=4, ensure_ascii=False)
def InvalidArgsError(Exception):
pass
def main(args):
logger = getLogger(__name__)
logger.info("tocaro exporter execution start.")
if 2 > sum(list(map(lambda x: bool(vars(args)[x]), vars(args)))):
logger.error("specified arguments is wrong.")
exit(1)
logger.info("reading config...")
config = ConfigParser()
if not os.path.exists(args.config):
logger.error("specified config file not exists.")
exit(1)
config.read(args.config, encoding='utf-8')
if strtobool(config["common"]["debug"]):
getLogger().setLevel(DEBUG)
logger.debug("enable debug logging.")
logger.info("connect to tocaro.")
try:
exporter = TocaroExporter(
email=config["account"]["email"],
password=config["account"]["password"]
)
except (CsrfTokenNotFoundError, SignInError) as e:
logger.error("signin processing unsuccessfully... please, check error message: " + str(e))
exit(1)
exporter.group_type = config["common"]["group_type"]
exporter.interval = config["common"]["interval"]
if args.group_only:
exporter.export_groups(config["output"]["path"])
else:
exporter.export_messages(
output_path=config["output"]["path"],
group_id=args.group_id,
includes=args.includes,
excludes=config["common"]["excludes"]
)
logger.info("processing sucessfully.")
if __name__ == "__main__":
basicConfig(
format='[%(asctime)s] %(name)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
getLogger().setLevel(INFO)
parser = ArgumentParser(description="Tocaro Exporter")
parser.add_argument("-c", "--config", default="./config.ini", help="specify config file path.")
parser.add_argument("-a", "--all", action="store_true", help="export all messages from all groups.")
parser.add_argument("-g", "--group-id", help="export messages from specified group.")
parser.add_argument("-i", "--includes", help="export message from groups including the specified string.")
parser.add_argument("--group-only", action="store_true", help="export for group list json only.")
args = parser.parse_args()
main(args)
|
gzock/tocaro-exporter | tocaro_session.py | #!/usr/bin/env python3
import re
import json
from logging import getLogger
from time import time, sleep
from requests import session, Response
from bs4 import BeautifulSoup
class CsrfTokenNotFoundError(Exception):
pass
class AuthTokenNotFoundError(Exception):
pass
class SignInError(Exception):
pass
class TocaroSession():
logger = getLogger(__name__)
__base_url = "https://tocaro.im"
__signin_url = __base_url + "/sign-in"
__groups_url = __base_url + "/api/v3/groups"
__messages_url = __groups_url + "/%s/messages"
signin_data = {
"email": "",
"password": "",
"sso_failback": "",
"commit": "サインイン",
"authenticity_token": ""
}
def __init__(self):
self.session = session()
self.now = int(time() * 100)
def get_csrf_token(self, html: str) -> str:
soup = BeautifulSoup(html,"html.parser")
form_input_dom = soup.select(".submitOnce input")
for i in form_input_dom:
if i.attrs["name"] == "authenticity_token":
return i.attrs["value"]
raise CsrfTokenNotFoundError("csrf token(authenticity_token) not found.")
def signin(self, email: str, password: str) -> Response:
self.signin_data["email"] = email
self.signin_data["password"] = password
res = self.session.get(self.__signin_url)
csrf_token = self.get_csrf_token(res.text)
self.logger.info("got csrf token: " + csrf_token)
self.signin_data["authenticity_token"] = csrf_token
res = self.session.post(self.__signin_url, data=self.signin_data)
if "メールアドレスかパスワードが間違っています。" in res.text:
raise SignInError("signin failed. needs confirm username or password.")
res.raise_for_status()
self.logger.info("signin successfully.")
self.auth_token = self.get_auth_token(res)
return res
def get_auth_token(self, html: Response) -> str:
for line in html.iter_lines():
self.logger.debug("processing html line: " + line.decode(encoding="utf-8"))
result = re.findall("var\sbootData\s=\s(.*)", line.decode(encoding="utf-8"))
if len(result) > 0:
json_data = json.loads(result[0])
self.logger.debug("got your tocaro bootData: " + str(json_data))
return json_data["currentUser"]["credentials"]["tocaro"]["token"]
def get_groups(self, group_type: str = "show") -> dict:
if not self.auth_token:
raise AuthTokenNotFoundError()
self.logger.debug("using auth_token: " + self.auth_token)
self.logger.info("let's get your joined groups.")
res = self.session.get(
self.__groups_url,
headers={"authorization": "Bearer " + self.auth_token},
params={"type": group_type, "t": self.now}
)
res.raise_for_status()
self.logger.info("got group successfully.")
return res.json()
def get_messages(self, group_id: str, next_id: str = "") -> dict:
if not self.auth_token:
raise AuthTokenNotFoundError()
self.logger.debug("using auth_token: " + self.auth_token)
self.logger.info("let's get messages. target group is " + group_id)
params = {"t": self.now}
if next_id:
params["ulid"] = next_id
res = self.session.get(
self.__messages_url % group_id,
headers={"authorization": "Bearer " + self.auth_token},
params=params
)
res.raise_for_status()
self.logger.info("got message successfully.")
return res.json()
def get_all_messages(self, group_id: str, interval: float = 0.3) -> dict:
self.logger.info("let's get all messages. target group is " + group_id)
messages = []
next_id = ""
while True:
new = self.get_messages(group_id, next_id=next_id)
if not new:
break
messages.extend(new)
next_id = messages[-1]["ulid"]
self.logger.debug("next message id: " + next_id)
sleep(interval)
self.logger.info("got all message successfully.")
return messages
|
conversence/pyoembed | pyoembed/tests/providers/test_main.py | import re
import unittest
from pyoembed.providers import BaseProvider
class MyProvider(BaseProvider):
priority = 1
oembed_endpoint = 'http://google.com/?format={format}'
oembed_schemas = [re.compile('http://bola\.com/guda/.*'),
'http://google.com/*/foo']
class BaseProviderTestCase(unittest.TestCase):
def test_url_supported(self):
provider = MyProvider()
self.assertTrue(provider.url_supported('http://bola.com/guda/arco'))
self.assertTrue(provider.url_supported('http://google.com/bola/foo'))
self.assertFalse(provider.url_supported('http://arcoiro.com/bola'))
def test_oembed_url(self):
provider = MyProvider()
self.assertEqual(provider.oembed_url('http://bola.com/guda/arco'),
'http://google.com/?format=json&url=http%3A%2F%2F'
'bola.com%2Fguda%2Farco')
self.assertEqual(provider.oembed_url('http://google.com/bola/foo'),
'http://google.com/?format=json&url=http%3A%2F%2F'
'google.com%2Fbola%2Ffoo')
def test_build_re(self):
provider = MyProvider()
_re = provider._build_re('http://google.com/*/foo')
self.assertEqual(_re.pattern, r'^http://google\.com/.*/foo$')
def test_get_re(self):
provider = MyProvider()
_re = provider._get_re()
self.assertEqual(len(_re), 2)
self.assertEqual(_re[0].pattern, 'http://bola\.com/guda/.*')
self.assertEqual(_re[1].pattern,
r'^http://google\.com/.*/foo$')
self.assertEqual(len(provider._re_schemas), 2)
self.assertEqual(provider._re_schemas[0].pattern,
r'http://bola\.com/guda/.*')
self.assertEqual(provider._re_schemas[1].pattern,
r'^http://google\.com/.*/foo$')
|
bebleo/bebleo_smtpd_fixture | smtpdfix/event_handler.py | from __future__ import annotations
import logging
from typing import Any
from .typing import CallableHandler
log = logging.getLogger(__name__)
class EventHandler():
def __init__(self) -> None:
self._handlers: set[CallableHandler] = set()
def __iadd__(self, handler: CallableHandler) -> EventHandler:
self._handlers.add(handler)
return self
def __isub__(self, handler: CallableHandler) -> EventHandler:
self._handlers.remove(handler)
return self
def __call__(self, *args: Any, **kwargs: Any) -> None:
for handler in self._handlers:
handler(*args, **kwargs)
|
bebleo/bebleo_smtpd_fixture | smtpdfix/__init__.py | <filename>smtpdfix/__init__.py
__all__ = (
"AuthController",
"Authenticator",
"AuthMessage",
"Config",
"smtpd",
"SMTPDFix",
)
__version__ = "0.4.0.dev"
from .authenticator import Authenticator
from .configuration import Config
from .controller import AuthController
from .fixture import SMTPDFix, smtpd
from .handlers import AuthMessage
config = Config()
|
bebleo/bebleo_smtpd_fixture | smtpdfix/handlers.py | <filename>smtpdfix/handlers.py
import base64
import hmac
import logging
import secrets
from datetime import datetime
from email.message import Message as EmailMessage
from typing import List
from aiosmtpd.handlers import Message
from aiosmtpd.smtp import MISSING, SMTP, AuthResult, auth_mechanism
log = logging.getLogger(__name__)
class AuthMessage(Message):
def __init__(self, messages: List[EmailMessage]) -> None:
super().__init__()
self._messages = messages
@auth_mechanism("CRAM-MD5")
async def auth_CRAM_MD5(self, server: SMTP, args: List[str]) -> AuthResult:
log.debug("AUTH CRAM-MD5 received")
# Generate challenge
secret = secrets.token_hex(8)
ts = datetime.now().timestamp()
hostname = server.hostname
challenge = f"<{secret}{ts}@{hostname}>"
response = await server.challenge_auth(challenge)
user, received = response.split()
password = server._authenticator.get_password(user.decode())
# Verify
mac = hmac.HMAC(password.encode(),
challenge.encode(),
"md5")
expected = mac.hexdigest().encode()
if hmac.compare_digest(expected, received):
log.debug("AUTH CARM-MD5 succeeded")
return AuthResult(success=True, handled=True, auth_data=user)
log.debug("AUTH CRAM-MD5 failed")
return AuthResult(success=False, handled=False)
async def auth_LOGIN(self, server: SMTP, args: List[str]) -> AuthResult:
log.info("AUTH LOGIN received")
login = []
for n in range(1, len(args)):
arg = base64.b64decode(args[n]).decode()
login.extend(arg.split(maxsplit=1))
while len(login) < 2:
prompt = "Password" if len(login) >= 1 else ""
response = await server.challenge_auth(prompt)
if response is MISSING:
return AuthResult(success=False, handled=True)
response = response.decode()
login.extend(response.split(maxsplit=1 - len(login)))
username = login[0]
password = <PASSWORD>[1]
if server._authenticator.validate(username, password):
log.info("AUTH LOGIN succeeded.")
return AuthResult(success=True, handled=True, auth_data=username)
log.info("AUTH LOGIN failed.")
return AuthResult(success=False, handled=False)
async def auth_PLAIN(self, server: SMTP, args: List[str]) -> AuthResult:
log.debug("AUTH PLAIN received")
response = b""
if len(args) >= 2:
response = base64.b64decode(args[1])
else:
response = await server.challenge_auth("")
decoded_resp = response.decode()
split_resp: List[str] = decoded_resp.split()
if len(split_resp) < 2:
return AuthResult(success=False, handled=False)
if (
len(split_resp) >= 2
and server._authenticator.validate(split_resp[0], split_resp[-1])
):
log.debug("AUTH PLAIN succeeded")
return AuthResult(success=True, handled=True)
log.debug("AUTH PLAIN failed")
return AuthResult(success=False, handled=False)
def handle_message(self, message: EmailMessage) -> None:
self._messages.append(message)
|
bebleo/bebleo_smtpd_fixture | tests/test_controller.py | import logging
import os
import ssl
from pathlib import Path
from smtplib import SMTP, SMTP_SSL, SMTPSenderRefused, SMTPServerDisconnected
import pytest
from smtpdfix.certs import _generate_certs
from smtpdfix.configuration import Config
from smtpdfix.controller import AuthController
from smtpdfix.fixture import _Authenticator
log = logging.getLogger(__name__)
def test_missing_auth_handler(smtpd):
smtpd.config.auth_require_tls = False
with SMTP(smtpd.hostname, smtpd.port) as client:
client.helo()
code, resp = client.docmd("AUTH", "PSEUDOMECH")
assert code != 235 # This should be 504
def test_use_starttls(smtpd, msg):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
with pytest.raises(SMTPSenderRefused) as error:
code, resp = client.send_message(msg)
assert error.type == SMTPSenderRefused
def test_custom_ssl_context(request, tmp_path_factory, msg):
path = tmp_path_factory.mktemp("certs")
_generate_certs(path, separate_key=True)
cert_path = path.joinpath("cert.pem").resolve()
key_path = path.joinpath("key.pem").resolve()
_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
_context.load_cert_chain(str(cert_path), str(key_path))
server = AuthController(ssl_context=_context)
request.addfinalizer(server.stop)
server.config.use_starttls = True
server.start()
with SMTP(server.hostname, server.port) as client:
client.starttls()
client.send_message(msg)
assert len(server.messages) == 1
def test_missing_certs(request, msg):
with pytest.raises(FileNotFoundError) as error:
_config = Config()
_config.use_starttls = True
_config.ssl_certs_path = "."
_authenticator = _Authenticator(config=_config)
server = AuthController(hostname=_config.host,
port=_config.port,
config=_config,
authenticator=_authenticator)
request.addfinalizer(server.stop)
server.start()
with SMTP(server.hostname, server.port) as client:
client.send_message(msg)
assert error.type == FileNotFoundError
def test_custom_cert_and_key(request, tmp_path_factory, msg):
path = tmp_path_factory.mktemp("certs")
_generate_certs(path, separate_key=True)
_config = Config()
_config.use_ssl = True
_config.ssl_cert_files = (path.joinpath("cert.pem"),
path.joinpath("key.pem"))
server = AuthController(config=_config)
request.addfinalizer(server.stop)
server.start()
with SMTP_SSL(server.hostname, server.port) as client:
client.send_message(msg)
assert len(server.messages) == 1
def test_TLS_not_supported(request, tmp_path_factory, msg, user):
path = tmp_path_factory.mktemp("certs")
_generate_certs(path)
ssl_cert_files = str(path.joinpath("cert.pem"))
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_verify_locations(ssl_cert_files)
context.load_cert_chain(ssl_cert_files)
config = Config()
config.enforce_auth = True
config.use_ssl = True
server = AuthController(config=config,
authenticator=_Authenticator(config),
ssl_context=context)
request.addfinalizer(server.stop)
server.start()
with pytest.raises(SMTPServerDisconnected):
with SMTP(server.hostname, server.port) as client:
# this should return a 523 Encryption required error
# but instead returns an SMTPServerDisconnected Error
client.send_message(msg)
assert len(server.messages) == 1
def test_config_file(request, msg):
_original_env = os.environ.copy()
config_file = Path(__file__).parent.joinpath("assets/.test.env")
_config = Config(filename=config_file, override=True)
server = AuthController(hostname=_config.host,
port=_config.port,
config=_config)
request.addfinalizer(server.stop)
server.start()
with SMTP(server.hostname, server.port) as client:
client.send_message(msg)
assert server.port == 5025
os.environ.clear()
os.environ.update(_original_env)
def test_exception_handler(request, msg):
def raise_error():
raise Exception("Deliberately raised error.")
server = AuthController()
request.addfinalizer(server.stop)
server.start()
with pytest.raises(Exception):
with SMTP(server.hostname, server.port) as client:
client.ehlo()
server.loop.call_soon_threadsafe(raise_error)
client.send_message(msg)
assert len(server.messages) == 1
|
bebleo/bebleo_smtpd_fixture | smtpdfix/fixture.py | import logging
import os
from typing import Any, Generator, Optional
import pytest
from .authenticator import Authenticator
from .certs import _generate_certs
from .configuration import Config
from .controller import AuthController
from .typing import TempPathFactory
log = logging.getLogger(__name__)
class _Authenticator(Authenticator):
def __init__(self, config: Config) -> None:
self.config = config
def validate(self, username: str, password: str) -> bool:
if (
username == self.config.login_username
and password == self.config.login_password
):
log.debug("Validating username and password for succeeded")
return True
log.debug("Validating username and password failed")
return False
def verify(self, username: str) -> bool:
return super().verify(username)
def get_password(self, username: Optional[str]) -> str:
return self.config.login_password
class SMTPDFix():
def __init__(self,
hostname: Optional[str] = None,
port: int = 8025,
config: Optional[Config] = None) -> None:
self.hostname = hostname
self.port = int(port) if port is not None else 8025
self.config = config or Config()
def __enter__(self) -> AuthController:
self.controller = AuthController(
hostname=self.hostname,
port=self.port,
config=self.config,
authenticator=_Authenticator(self.config)
)
self.controller.start()
return self.controller
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
self.controller.stop()
@pytest.fixture
def smtpd(
tmp_path_factory: TempPathFactory
) -> Generator[AuthController, None, None]:
"""A small SMTP server for use when testing applications that send email
messages. To access the messages call `smtpd.messages` which returns a copy
of the list of messages sent to the server.
Example:
def test_mail(smtpd):
from smtplib import SMTP
with SMTP(smtpd.hostname, smtpd.port) as client:
code, resp = client.noop()
assert code == 250
"""
if os.getenv("SMTPD_SSL_CERTS_PATH") is None:
path = tmp_path_factory.mktemp("certs")
_generate_certs(path)
os.environ["SMTPD_SSL_CERTS_PATH"] = str(path.resolve())
with SMTPDFix() as fixture:
yield fixture
|
bebleo/bebleo_smtpd_fixture | smtpdfix/controller.py | <reponame>bebleo/bebleo_smtpd_fixture
import asyncio
import errno
import logging
from contextlib import ExitStack
from os import strerror
from pathlib import Path
from socket import create_connection
from ssl import CERT_OPTIONAL, Purpose, SSLContext, create_default_context
from typing import Any, List, Optional
from aiosmtpd.controller import Controller, get_localhost
from aiosmtpd.smtp import SMTP
from .authenticator import Authenticator
from .configuration import Config
from .handlers import AuthMessage
from .typing import AsyncServer, PathType, ServerCoroutine
log = logging.getLogger(__name__)
class AuthController(Controller):
def __init__(self,
loop: Optional[asyncio.AbstractEventLoop] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
ready_timeout: Optional[float] = None,
ssl_context: Optional[SSLContext] = None,
config: Optional[Config] = None,
authenticator: Optional[Authenticator] = None,
**kwargs: Any) -> None:
self.config = config or Config()
self._messages = kwargs.get("messages") or []
self._ssl_context = ssl_context
self._authenticator = authenticator
_handler = AuthMessage(messages=self._messages)
_hostname = hostname or self.config.host
_port = int(port or self.config.port)
_ready_timeout = float(ready_timeout or self.config.ready_timeout)
_loop = loop or asyncio.new_event_loop()
_loop.set_exception_handler(self._handle_exception)
def context_or_none() -> Optional[SSLContext]:
# Determines whether to return a sslContext or None to avoid a
# situation where both could be used. Prefers STARTTLS to TLS.
if (self.config.use_ssl and not self.config.use_starttls):
context = ssl_context or self._get_ssl_context()
context.verify_mode = CERT_OPTIONAL
return context
return None
super().__init__(handler=_handler,
hostname=_hostname,
port=_port,
loop=_loop,
ready_timeout=_ready_timeout,
ssl_context=context_or_none(),
authenticator=self._authenticator,
**kwargs)
# The event handler for changes to the config goes here to prevent it
# firing when the obkect is initialized.
if hostname is not None:
self.config.host = hostname
if port is not None:
self.config.port = port
self.config.OnChanged += self.reset
log.info(f"SMTPDFix running on {self.hostname}:{self.port}")
def factory(self) -> SMTP:
use_starttls = self.config.use_starttls
context = self._get_ssl_context() if use_starttls else None
return SMTP(handler=self.handler,
require_starttls=self.config.use_starttls,
auth_required=self.config.enforce_auth,
auth_require_tls=self.config.auth_require_tls,
tls_context=context,
authenticator=self._authenticator)
def _get_ssl_context(self) -> SSLContext:
if self._ssl_context is not None:
return self._ssl_context
certs_path = Path(self.config.ssl_certs_path).resolve()
cert_file, key_file = self.config.ssl_cert_files
def _resolve_file(basepath: Path, file_: PathType) -> str:
# Resolve the file paths in order:
# 1. if the file exists return the path as string
# 2. try to combine basepath and the filename and if that exists
# return as a string.
# NB: the paths are returned as strings becuase PYPY3 doesn't
# support paths in sslcontext.load_cert_chain()
if Path(file_).is_file():
return str(Path(file_))
elif basepath.joinpath(file_).resolve().is_file():
return str(basepath.joinpath(file_).resolve())
raise FileNotFoundError(errno.ENOENT,
strerror(errno.ENOENT),
file_)
cert_path = _resolve_file(certs_path, cert_file)
key_path = _resolve_file(certs_path, key_file) if key_file else None
context = create_default_context(Purpose.CLIENT_AUTH)
context.check_hostname = False
context.load_verify_locations(cert_path)
context.load_cert_chain(cert_path, keyfile=key_path)
return context
def _handle_exception(self, loop: Any, context: Any) -> None:
loop.default_exception_handler(context)
status = "421 Service not available. Closing connection."
asyncio.ensure_future(self.smtpd.push(status))
self.smtpd.transport.close()
self.server.close()
def _run(self, ready_event: Any) -> None:
asyncio.set_event_loop(self.loop)
try:
# Need to do two-step assignments here to ensure IDEs can properly
# detect the types of the vars. Cannot use `assert isinstance`,
# because Python 3.6 in asyncio debug mode has a bug wherein
# CoroWrapper is not an instance of Coroutine
coro_kwargs = {}
if self.ssl_context:
coro_kwargs["ssl_handshake_timeout"] = 5.0
srv_coro: ServerCoroutine = self.loop.create_server(
self._factory_invoker,
host=self.hostname,
port=self.port,
ssl=self.ssl_context,
**coro_kwargs
)
self.server_coro = srv_coro
srv: AsyncServer = self.loop.run_until_complete(srv_coro)
self.server = srv
except Exception as error: # pragma: on-wsl; # pragma: no cover
# Usually will enter this part only if create_server() cannot bind
# to the specified host:port.
#
# Somehow WSL 1.0 (Windows Subsystem for Linux) allows multiple
# listeners on one port?!
# That is why we add "pragma: on-wsl" there, so this block will not
# affect coverage on WSL 1.0.
self._thread_exception = error
return
self.loop.call_soon(ready_event.set)
self.loop.run_forever()
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.close()
self.server = None # type: ignore
def reset(self, persist_messages: bool = True) -> None:
_running = False
try:
self.stop()
_running = True
except AssertionError:
pass
# Remove the handler to avoid recursion
self.config.OnChanged -= self.reset
# Ignoring this for the purposes of type checking on the grounds that
# this works and can't be replaced for now.
self.__init__( # type: ignore
loop=None if self.loop.is_closed() else self.loop,
hostname=self.config.host,
port=self.config.port,
ssl_context=self._ssl_context,
config=self.config,
authenticator=self._authenticator,
messages=self._messages if persist_messages else None
)
if _running:
self.start()
def _trigger_server(self) -> None:
hostname = self.hostname or get_localhost()
with ExitStack() as stk:
conn = create_connection((hostname, self.port), 1.0)
s = stk.enter_context(conn)
# connecting using the ssl_context removed as this fails under
# python 3.10 when using opportunistic SSL
_ = s.recv(1024)
@property
def messages(self) -> List[Any]:
return self._messages.copy()
|
bebleo/bebleo_smtpd_fixture | smtpdfix/configuration.py | import os
from pathlib import Path
from typing import Any, Optional, Tuple, Union
from dotenv import load_dotenv
from .event_handler import EventHandler
from .typing import PathType
_current_dir = Path(__file__).parent
load_dotenv()
def _strtobool(val: str) -> bool:
"""Convert a string representation of truth to true (1) or false (0).
True values are "y", "yes", "t", "true", "on", and "1"; false values are
"n", "no", "f", "false", "off", and "0". Raises ValueError if "val" is
anything else.
"""
# Copied and updated from distutils.util in response to distutils removal
# as of python 3.12
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
elif val in ("n", "no", "f", "false", "off", "0"):
return False
else:
raise ValueError(f"invalid truth value {val}")
class Config():
def __init__(self,
filename: Optional[PathType] = None,
override: bool = False) -> None:
if filename:
load_dotenv(filename, override=override)
self.OnChanged = EventHandler()
self._host = os.getenv("SMTPD_HOST")
self._port = int(os.getenv("SMTPD_PORT", 8025))
self._ready_timeout = float(os.getenv("SMTPD_READY_TIMEOUT", 10.0))
self._login_username = os.getenv("SMTPD_LOGIN_NAME", "user")
self._login_password = os.getenv("SMTPD_LOGIN_PASSWORD", "password")
self._enforce_auth = _strtobool(os.getenv("SMTPD_ENFORCE_AUTH",
"False"))
self._auth_require_tls = _strtobool(os.getenv("SMTPD_AUTH_REQUIRE_TLS",
"True"))
self._ssl_cert_path: PathType = os.getenv(
"SMTPD_SSL_CERTS_PATH",
_current_dir.joinpath("certs"))
self._ssl_cert_files = (
os.getenv("SMTPD_SSL_CERTIFICATE_FILE", "./cert.pem"),
os.getenv("SMTPD_SSL_KEY_FILE"))
self._use_starttls = _strtobool(os.getenv("SMTPD_USE_STARTTLS",
"False"))
self._use_ssl = (_strtobool(os.getenv("SMTPD_USE_SSL", "False"))
or _strtobool(os.getenv("SMTPD_USE_TLS", "False")))
def convert_to_bool(self, value: Any) -> bool:
"""Consistently convert to bool."""
if isinstance(value, str):
return bool(_strtobool(value))
return bool(value)
@property
def host(self) -> Optional[str]:
return self._host
@host.setter
def host(self, value: Optional[str]) -> None:
self._host = value
self.OnChanged()
@property
def port(self) -> int:
return self._port
@port.setter
def port(self, value: int) -> None:
self._port = int(value)
self.OnChanged()
@property
def ready_timeout(self) -> float:
return self._ready_timeout
@ready_timeout.setter
def ready_timeout(self, value: float) -> None:
self._ready_timeout = float(value)
self.OnChanged()
@property
def login_username(self) -> str:
return self._login_username
@login_username.setter
def login_username(self, value: str) -> None:
self._login_username = value
self.OnChanged()
@property
def login_password(self) -> str:
return self._login_password
@login_password.setter
def login_password(self, value: str) -> None:
self._login_password = value
self.OnChanged()
@property
def enforce_auth(self) -> bool:
return self._enforce_auth
@enforce_auth.setter
def enforce_auth(self, value: bool) -> None:
self._enforce_auth = self.convert_to_bool(value)
self.OnChanged()
@property
def auth_require_tls(self) -> bool:
return self._auth_require_tls
@auth_require_tls.setter
def auth_require_tls(self, value: bool) -> None:
self._auth_require_tls = self.convert_to_bool(value)
self.OnChanged()
@property
def ssl_certs_path(self) -> PathType:
return self._ssl_cert_path
@ssl_certs_path.setter
def ssl_certs_path(self, value: PathType) -> None:
self._ssl_cert_path = value
self.OnChanged()
@property
def ssl_cert_files(self) -> Tuple[str, Optional[str]]:
return self._ssl_cert_files
@ssl_cert_files.setter
def ssl_cert_files(self,
value: Union[str, Tuple[str, Optional[str]]]) -> None:
if isinstance(value, tuple):
self._ssl_cert_files = (value[0], value[1])
else:
self._ssl_cert_files = (value, None)
self.OnChanged()
@property
def use_starttls(self) -> bool:
return self._use_starttls
@use_starttls.setter
def use_starttls(self, value: Any) -> None:
self._use_starttls = self.convert_to_bool(value)
self.OnChanged()
@property
def use_ssl(self) -> bool:
return self._use_ssl
@use_ssl.setter
def use_ssl(self, value: Any) -> None:
self._use_ssl = self.convert_to_bool(value)
self.OnChanged()
|
bebleo/bebleo_smtpd_fixture | tests/test_fixture.py | from base64 import b64encode
from smtplib import (SMTP, SMTP_SSL, SMTPAuthenticationError,
SMTPResponseException)
from unittest import mock
import pytest
from smtpdfix import Config
from smtpdfix.fixture import _Authenticator
def encode(message):
message_bytes = message.encode()
base64_bytes = b64encode(message_bytes)
base64_message = base64_bytes.decode()
return base64_message
def test_init(smtpd):
assert smtpd
assert len(smtpd.messages) == 0
def test_init_ssl(smtpd, msg):
smtpd.config.use_ssl = True
with SMTP_SSL(smtpd.hostname, smtpd.port) as client:
client.send_message(msg)
assert len(smtpd.messages) == 1
def test_AUTH_unknown_mechanism(smtpd):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
code, response = client.docmd("AUTH", args="FAKEMECH")
assert code == 504
def test_AUTH_LOGIN_abort(smtpd, user):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
code, resp = client.docmd("AUTH", f"LOGIN {encode(user.username)}")
assert code == 334
code, resp = client.docmd("*")
assert code == 501
def test_AUTH_LOGIN_success(smtpd, user):
smtpd.config.use_starttls = True
username = encode(user.username)
password = encode(<PASSWORD>)
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
code, resp = client.docmd("AUTH", f"LOGIN {username}")
assert code == 334
assert resp == bytes(encode("Password"), "ascii")
code, resp = client.docmd(f"{password}")
assert code == 235
def test_AUTH_PLAIN(smtpd, user):
smtpd.config.use_starttls = True
enc = encode(f"{user.username} {user.password}")
cmd_text = f"PLAIN {enc}"
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
(code, resp) = client.docmd("AUTH", args=cmd_text)
assert code == 235
def test_AUTH_PLAIN_no_encryption(smtpd, user):
enc = encode(f"{user.username} {user.password}")
cmd_text = f"PLAIN {enc}"
with SMTP(smtpd.hostname, smtpd.port) as client:
client.ehlo()
(code, resp) = client.docmd("AUTH", args=cmd_text)
assert code == 538
def test_AUTH_PLAIN_two_parts(smtpd, user):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
code, resp = client.docmd("AUTH", "PLAIN")
assert (code, resp) == (334, b"")
enc = encode(f"{user.username} {user.password}")
code, resp = client.docmd(enc)
assert code == 235
def test_AUTH_PLAIN_failure(smtpd, user):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.ehlo()
enc = encode(f"{user.username} {user.password[:0:-1]}")
code, resp = client.docmd("AUTH", f"PLAIN {enc}")
assert code == 535
assert resp == b"5.7.8 Authentication credentials invalid"
def test_alt_port(smtpd):
smtpd.config.port = 5025
assert smtpd.port == 5025
def test_login(smtpd, user):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
assert client.login(user.username, user.password)
def test_login_fail(smtpd, user):
smtpd.config.use_starttls = True
with pytest.raises(SMTPAuthenticationError) as ex:
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.login(user.username, user.password[:0:-1])
assert ex.type is SMTPAuthenticationError
def test_login_no_tls(smtpd, user):
smtpd.config.auth_require_tls = False
with SMTP(smtpd.hostname, smtpd.port) as client:
assert client.login(user.username, user.password)
def test_login_already_done(smtpd, user):
smtpd.config.enforce_auth = True
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.login(user.username, user.password)
# we need to explicitly get the response from the the second AUTH
# command because smtplib doesn't treat it as an error.
code, resp = client.docmd("AUTH", "LOGIN")
assert code == 503
assert resp == b"Already authenticated"
def test_no_messages(smtpd):
assert len(smtpd.messages) == 0
def test_send_message(smtpd, msg):
with SMTP(smtpd.hostname, smtpd.port) as client:
client.send_message(msg)
assert len(smtpd.messages) == 1
def test_send_message_logged_in(smtpd, user, msg):
smtpd.config.use_starttls = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.starttls()
client.login(user.username, user.password)
client.send_message(msg)
assert len(smtpd.messages) == 1
def test_send_message_auth_not_complete(smtpd, msg):
smtpd.config.enforce_auth = True
with pytest.raises(SMTPResponseException) as er:
with SMTP(smtpd.hostname, smtpd.port) as client:
client.send_message(msg)
assert er.match(r"^\(530")
def test_sendmail(smtpd):
from_addr = "<EMAIL>"
to_addr = "<EMAIL>"
msg = (f"From: {from_addr}\r\n"
f"To: {to_addr}\r\n"
f"Subject: Foo\r\n\r\n"
f"Foo bar")
with SMTP(smtpd.hostname, smtpd.port) as client:
client.sendmail(from_addr, to_addr, msg)
assert len(smtpd.messages) == 1
@mock.patch.object(Config, "enforce_auth", True)
def test_mock_patch(smtpd):
with SMTP(smtpd.hostname, smtpd.port) as client:
client.helo()
code, repl = client.docmd("DATA", "")
assert code == 530
assert repl.startswith(b"5.7.0 Authentication required")
def test_monkeypatch(monkeypatch, smtpd):
monkeypatch.setattr(smtpd.config, "enforce_auth", True)
with SMTP(smtpd.hostname, smtpd.port) as client:
client.helo()
code, repl = client.docmd("DATA", "")
assert code == 530
assert repl.startswith(b"5.7.0 Authentication required")
class TestDefaultAuthenticator:
_config = Config()
_auth = _Authenticator(_config)
def test_validate(cls, user):
assert cls._auth.validate(user.username, user.password)
def test_verify(cls, user):
with pytest.raises(NotImplementedError):
cls._auth.verify(user.username)
def test_get_password(cls, user):
password = cls._auth.get_password(user.username)
assert password == user.password
|
bebleo/bebleo_smtpd_fixture | smtpdfix/authenticator.py | from abc import ABCMeta, abstractmethod
class Authenticator(metaclass=ABCMeta):
@abstractmethod
def validate(self, username: str, password: str) -> bool:
"""Validate that the passward authenticates the username."""
raise NotImplementedError() # pragma: no cover
@abstractmethod
def verify(self, username: str) -> bool:
"""Method to verify that an address or username is correct.
Possible inputs are:
- a user name (e.g. "user")
- an email address (e.g. "<EMAIL>")
Should return a string in the form of "User <<EMAIL>>" if
the address provided is valid. If there the valid is invalid return
None. In this case we are returning a boolean true instead.
"""
raise NotImplementedError() # pragma: no cover
@abstractmethod
def get_password(self, username: str) -> str:
"""Returns the password for a given username."""
raise NotImplementedError() # pragma: no cover
|
bebleo/bebleo_smtpd_fixture | tests/test_smtpdfix.py | <gh_stars>1-10
from smtplib import SMTP
from smtpdfix import SMTPDFix
def test_smtpdfix(msg):
with SMTPDFix() as server, SMTP(server.hostname, server.port) as client:
client.send_message(msg)
assert len(server.messages) == 1
|
bebleo/bebleo_smtpd_fixture | smtpdfix/typing.py | <reponame>bebleo/bebleo_smtpd_fixture<gh_stars>0
import asyncio
import os
from typing import Any, Callable, Coroutine, Dict, Optional, Union
from pytest import __version__ as pytest_version
# Type aliases
AsyncServer = asyncio.base_events.Server
CallableHandler = Callable[..., Optional[Dict[Any, Any]]]
PathType = Union[str, os.PathLike]
ServerCoroutine = Coroutine[Any, Any, asyncio.base_events.Server]
# Because the location of the TempPathFactory type definition is moved in
# version 6.2.0 we need to do some acrobats to make it pretty.
if pytest_version < "6.2.0": # pragma: no cover
from _pytest.tmpdir import TempPathFactory as TPF
else:
from pytest import TempPathFactory as TPF
TempPathFactory = TPF
|
bebleo/bebleo_smtpd_fixture | smtpdfix/certs.py | <gh_stars>0
import logging
import socket
from datetime import datetime, timedelta
from ipaddress import ip_address
from pathlib import Path
from typing import Union
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509 import (BasicConstraints, CertificateBuilder, DNSName,
IPAddress, Name, NameAttribute,
SubjectAlternativeName, random_serial_number)
from cryptography.x509.oid import NameOID
log = logging.getLogger(__name__)
def _generate_certs(path: Union[Path, str],
days: int = 3652,
key_size: int = 2048,
separate_key: bool = False) -> None:
# DO NOT USE THIS FOR ANYTHING PRODUCTION RELATED, EVER!
# Generate private key
# 2048 is the minimum that works as of 3.9
key = rsa.generate_private_key(public_exponent=65537, key_size=key_size)
key_file = "key.pem" if separate_key else "cert.pem"
key_path = Path(path).joinpath(key_file)
with open(key_path, "ab") as f:
f.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
encryption_algorithm=serialization.NoEncryption(),
format=serialization.PrivateFormat.TraditionalOpenSSL
))
log.debug("Private key generated")
# Generate public certificate
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
subject = Name([NameAttribute(NameOID.COMMON_NAME, "smtpdfix_cert")])
alt_names = [
DNSName("localhost"),
DNSName("localhost.localdomain"),
DNSName(hostname),
IPAddress(ip_address("127.0.0.1")),
IPAddress(ip_address("0.0.0.1")),
IPAddress(ip_address("::1")),
IPAddress(ip_address(ip)),
]
# Set it so the certificate can be a root certificate with
# ca=true, path_length=0 means it can only sign itself.
constraints = BasicConstraints(ca=True, path_length=0)
cert = (CertificateBuilder()
.issuer_name(subject)
.subject_name(subject)
.serial_number(random_serial_number())
.not_valid_before(datetime.utcnow())
.not_valid_after(datetime.utcnow() + timedelta(days=days))
.add_extension(SubjectAlternativeName(alt_names), critical=False)
.public_key(key.public_key())
.add_extension(constraints, critical=False)
.sign(private_key=key, algorithm=hashes.SHA256()))
cert_path = Path(path).joinpath("cert.pem")
with open(cert_path, "ab") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
log.debug("Certificate generated")
|
bebleo/bebleo_smtpd_fixture | tests/test_configuration.py | import functools
import os
from pathlib import Path
import pytest
from smtpdfix.configuration import Config, _strtobool
values = [
("host", "mail.localhost", "mail.localhost", str),
("port", 5025, 5025, int),
("port", "5025", 5025, int),
("ready_timeout", 2.5, 2.5, float),
("ready_timeout", "2.5", 2.5, float),
("login_name", "name", "name", str),
("login_password", "<PASSWORD>", "word", str),
("enforce_auth", "True", True, bool),
("enforce_auth", True, True, bool),
("enforce_auth", 1, True, bool),
("auth_require_tls", 0, False, bool),
("auth_require_tls", "0", False, bool),
("auth_require_tls", False, False, bool),
("auth_require_tls", "False", False, bool),
("ssl_certs_path", "./certs", "./certs", str),
("ssl_cert_files",
("./certs/cert.pem", "./certs/key.pem"),
("./certs/cert.pem", "./certs/key.pem"),
tuple),
("ssl_cert_files",
"./certs/key.pem",
("./certs/key.pem", None),
tuple),
("use_starttls", False, False, bool),
("use_tls", True, True, bool),
("use_ssl", True, True, bool),
]
props = [p for p in dir(Config) if isinstance(getattr(Config, p), property)]
@pytest.fixture
def handler():
class Handler():
def handle(self, result):
result.append(True)
yield Handler()
@pytest.mark.parametrize("val", ["y", "yes", "t", "true", "on", "1"])
def test_strtobool_true(val):
assert _strtobool(val) is True
@pytest.mark.parametrize("val", ["n", "no", "f", "false", "off", "0"])
def test_strtobool_false(val):
assert _strtobool(val) is False
@pytest.mark.parametrize("val", ["-1", "maybe", "error"])
def test_strtobool_error(val):
with pytest.raises(ValueError):
assert _strtobool(val) is False
def test_init():
config = Config()
assert isinstance(config, Config)
def test_init_envfile():
original_env = os.environ.copy()
config_file = Path(__file__).parent.joinpath("assets/.test.env")
config = Config(filename=config_file, override=True)
assert config.port == 5025
os.environ.clear()
os.environ.update(original_env)
@pytest.mark.parametrize("attr, value, expected, type", values)
def test_set_values(attr, value, expected, type):
config = Config()
setattr(config, attr, value)
assert isinstance(getattr(config, attr), type)
assert getattr(config, attr) == expected
@pytest.mark.parametrize("prop", props)
def test_event_handler_fires(prop, handler):
config = Config()
result = []
config.OnChanged += functools.partial(handler.handle, result)
setattr(config, prop, 1)
assert result.pop() is True
def test_unset_event_handler(handler):
config = Config()
result = []
prop = "auth_require_tls" # chosen because it is alphabetically mo. 1
func = functools.partial(handler.handle, result)
config.OnChanged += func
setattr(config, prop, 1)
assert result.pop() is True
config.OnChanged -= func
setattr(config, prop, 0)
assert not result
|
bebleo/bebleo_smtpd_fixture | tests/test_smtp.py | <filename>tests/test_smtp.py
from smtplib import SMTP
import pytest
@pytest.mark.parametrize("cmd", ["DATA", "MAIL", "RCPT"])
def test_auth_first(cmd, smtpd):
smtpd.config.enforce_auth = True
with SMTP(smtpd.hostname, smtpd.port) as client:
client.ehlo()
code, repl = client.docmd(cmd, "")
assert code == 530
assert repl.startswith(b"5.7.0 Authentication required")
|
bebleo/bebleo_smtpd_fixture | tests/conftest.py | <gh_stars>1-10
import os
from collections import namedtuple
from email.message import EmailMessage
import pytest
# Because we need to test the fixture we include the plugin here, but generally
# this is not necessary and the fixture is loaded automatically.
pytest_plugins = ["smtpdfix", "pytester"]
def pytest_collection_modifyitems(items):
# Mark each test as timing out after 10 seconds to prevent the server
# hanging on errors. Note that this can lead to the entire test run
# failing.
timeout_secs = 10
for item in items:
if item.get_closest_marker("timeout") is None:
item.add_marker(pytest.mark.timeout(timeout_secs))
@pytest.fixture
def msg():
msg = EmailMessage()
msg["Subject"] = "Foo"
msg["Sender"] = "<EMAIL>"
msg["To"] = "<EMAIL>"
msg.set_content("foo bar")
return msg
@pytest.fixture
def user():
user = namedtuple("User", "username, password")
user.username = os.getenv("SMTPD_USERNAME", "user")
user.password = os.getenv("SMTPD_PASSWORD", "password")
return user
|
cloudmesh/cloudmesh-flow | example/vm-flow.py | <reponame>cloudmesh/cloudmesh-flow
from cloudmesh.compute.aws import Provider as AWSProvider
from cloudmesh.compute.azure import AzProvider
from cloudmesh.flow.Flow import Flow
class MyFlow(Flow):
def spawn_aws(self):
pass
def spawn_azure(self):
pass
def ping_aws(self):
pass
def ping_azure(self):
pass
if __name__ == "__main__":
flow = MyFlow(sys.argv[0])
|
cloudmesh/cloudmesh-flow | example/test/test_list_flow.py | from unittest import TestCase
import webbrowser
class Test_Visualize(TestCase):
def test_list_flow(self):
flow_name = "testflow2-flow"
url = "http://127.0.0.1:8080/flow/monitor/" + flow_name
webbrowser.open(url)
|
cloudmesh/cloudmesh-flow | example/test/test.py | from cloudmesh.flow.worflowdb import WorkFlowDB
from cloudmesh.flow.Node import Node
from cloudmesh.flow.visualize import manager
import webbrowser
def define_flow():
FLOWNAME = "testflow2"
mydb = WorkFlowDB(FLOWNAME)
node = Node("d")
node.workflow = FLOWNAME
mydb.add_node(node.toDict())
node = Node("e")
node.workflow = FLOWNAME
mydb.add_node(node.toDict())
node = Node("f")
node.workflow = FLOWNAME
mydb.add_node(node.toDict())
node = Node("g")
node.workflow = FLOWNAME
mydb.add_node(node.toDict())
node = Node("h")
node.workflow = FLOWNAME
mydb.add_node(node.toDict())
mydb.add_edge("d", "e")
mydb.add_edge("e", "f")
mydb.add_edge("g", "h")
def list_flow(flow_name):
flow_name = flow_name[:-5]
mydb = WorkFlowDB(flow_name)
tasks = mydb.list_nodes()
for node in tasks:
print(node.name)
print(node.status)
for dependency in node.dependencies:
print(dependency)
nodes = []
edges = []
nodes.append({'id': 'start', 'label': 'start'})
nodes.append({'id': 'end', 'label': 'end'})
to_end_nodes = [x.name for x in tasks]
for task in tasks:
nodes.append({'id': task.name, 'label': task.name})
if len(task.dependencies) == 0:
edges.append({'from': 'start', 'to': task.name, "arrows": 'to'})
for dependency in task.dependencies:
edges.append({'from': dependency, 'to': task.name, "arrows": 'to'})
to_end_nodes.remove(dependency)
for end in to_end_nodes:
edges.append({'from': end, 'to': 'end', "arrows": 'to'})
for edge in edges:
print(edge["from"] , " -> " , edge["to"])
def list_flows():
mydb = WorkFlowDB()
all_workflows = mydb.list_all_workflows()
for workflow in all_workflows:
print(workflow)
list_flow("testflow2-flow")
manager.start()
flow_name = "testflow2-flow"
url = "http://127.0.0.1:8080/flow/monitor/" + flow_name
webbrowser.open(url) |
cloudmesh/cloudmesh-flow | example/cloudflow-flow.py | from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.flow.Flow import Flow
#
# we assume image is ubuntu 19.04 in cloudmesh.yaml
#
def start_vm(cloud, name=None):
#
# complete me
#
provider = Provider(cloud)
vm = provider.boot(name=name)
vm.wiat()
return vm
class MyFlow(Flow):
def start_aws(self):
vm = start_vm("aws", name="aws01")
def start_azure(self):
vm = start_vm("azure", name="aazure01")
r = vm.ssh("uname -a")
def ping_aws(self):
provider = Provider("aws")
provider.ping(name="aws01")
def ping_azure(self):
provider = Provider("azure")
provider.ping(name="azure01")
def ssh_azure(self):
provider = Provider("azure")
r = provider.ssh(name="azure01")
def ssh_aws(self):
provider = Provider("aws")
r = provider.ssh(name="aws01")
if __name__ == "__main__":
Flow = MyFlow(sys.argv[0])
Flow._run(sys.argv[1])
# please specify workflow here
"""
start -> start_aws -> ping_aws -> ssh_aws -> end
start -> start_azure -> ping_azure -> ssh_aws -> end
"""
|
cloudmesh/cloudmesh-flow | example/workflow-flow.py | <filename>example/workflow-flow.py<gh_stars>1-10
import sys
import time
from cloudmesh.flow.Flow import Flow
from cloudmesh.flow.Flow import Get, Put
from pprint import pprint
class MyFlow(Flow):
edges = [["a","b"], ["b","c"]]
def a(self):
print("in a!")
time.sleep(5)
def b(self):
print("in b!")
time.sleep(10)
def c(self):
print("in c!")
time.sleep(10)
if __name__ == "__main__":
# argv[0] : the name of the workflow
# argv[1] : a node to be executed
# if only the first argument is provided the workflow will be uploaded to mongo
flow = MyFlow(sys.argv)
|
cloudmesh/cloudmesh-flow | example/flow-flow.py | import sys
import time
from cloudmesh.flow.Flow import Flow
class MyFlow(Flow):
def a(self):
print("in a!")
time.sleep(5)
def b(self):
print("in b!")
time.sleep(10)
def c(self):
print("in c!")
time.sleep(10)
if __name__ == "__main__":
Flow = MyFlow(sys.argv[0])
Flow._run(sys.argv[1]) |
cloudmesh/cloudmesh-flow | tests/test_baseclass.py | ###############################################################
# pip install .; pytest -v --capture=no -v --nocapture tests/test_baseclass..py::Test_baseclass.test_001
# pytest -v --capture=no tests/test_baseclass.py
# pytest -v tests/test_baseclass.py
###############################################################
import os
from cloudmesh.common.ConfigDict import ConfigDict
from cloudmesh.common.util import HEADING
from cloudmesh.flow.Flow import Flow
from cloudmesh.flow.Flow import FlowDatabase
import pytest
class SampleFlow(Flow):
def a(self):
return {"name" : "a", "result" : {"everything" : "ok"}}
@pytest.mark.incremental
class Test_baseclass:
# noinspection PyPep8Naming
def tearDown(self):
pass
def setup(self):
self.db = FlowDatabase("test")
self.db.collection.delete_many({})
self.db.add_node({"name" : "a", "dependencies" : []})
self.db.start_flow()
self.flow = SampleFlow("test-flow.py")
def test_runmethod(self):
result = self.flow._run("a")
assert result["name"] == "a"
def test_database_insertion(self):
result = self.flow._run("a")
dbresult = self.db.get_node("a")
assert dbresult.result["name"] == "a"
assert dbresult.result["result"]["everything"] == "ok"
|
cloudmesh/cloudmesh-flow | tests/test_flownode.py | ###############################################################
# pip install .; pytest -v --capture=no -v --nocapture tests/test_flownode..py::Test_flownode.test_001
# pytest -v --capture=no tests/test_flownode.py
# pytest -v tests/test_flownode.py
###############################################################
import os
from cloudmesh.common.ConfigDict import ConfigDict
from cloudmesh.common.util import HEADING
from cloudmesh.flow.Node import Node
import pytest
@pytest.mark.incremental
class Test_flownode:
# noinspection PyPep8Naming
def tearDown(self):
pass
def test_create(self):
mynode = Node("test")
def test_get_command(self):
mynode = Node("test2")
mynode.workflow = "test2"
comm_arr = mynode.get_command()
assert comm_arr[0] == "python"
assert comm_arr[1] == "test2-flow.py"
assert comm_arr[2] == "test2"
def test_add_dep(self):
node_1 = Node("node1")
node_2 = Node("node2")
node_1.add_dependency(node_2)
assert len(node_1.dependencies) == 1
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/visualize/render.py | from flask import Flask, jsonify, render_template, render_template_string
from flask import jsonify
import oyaml as yaml
from os import walk
from cloudmesh.flow.worflowdb import WorkFlowDB
class Node:
@staticmethod
def color(task):
color = 'violet'
if task.status == "pending":
color = 'lightblue'
elif task.status == "running":
color = 'orange'
elif task.status == "error":
color = 'red'
elif task.status == "finished":
color = 'green'
return color
@staticmethod
def properties(d):
d['label'] = d['id']
if d['id'] in ['start']:
d['shape'] = 'diamond'
d['color']= 'yellow'
elif d['id'] in ['end']:
d['color']= 'grey'
d['shape'] = 'diamond'
else:
d['shape'] = 'box'
d['widthConstraint'] = 100
d['font.size'] = '24'
d['font.color'] = 'black'
d['font.face'] = 'arial'
d['value'] = 2
d['shadow'] = True
return d
@staticmethod
def start_end():
nodes = [
Node.properties({'id': 'start', 'x': 0, 'y': 0}),
Node.properties({'id': 'end', 'x': 300})]
return nodes
def get_workflow_names():
workflows = []
mydb = WorkFlowDB()
flows = mydb.list_all_workflows()
for flow in flows:
workflow = {"name" : flow, "modified" : ""}
workflows.append(workflow)
return jsonify(workflows)
def refresh(workflowname):
workflowname = workflowname[:-5]
mydb = WorkFlowDB(workflowname)
tasks = mydb.list_nodes()
nodes = Node.start_end()
edges = []
to_end_nodes = [x.name for x in tasks]
for task in tasks:
nodes.append(Node.properties({'id': task.name,
'color': Node.color(task),
"modified" : task.modified ,
"dependencies" : task.dependencies,
"progress" : task.progress,
"done" : task.done}))
if len(task.dependencies) == 0:
edges.append({'from': 'start', 'to': task.name, "arrows": 'to', 'width': 2})
for dependency in task.dependencies:
edges.append({'from': dependency, 'to': task.name, "arrows": 'to', 'width': 2})
to_end_nodes.remove(dependency)
for end in to_end_nodes:
edges.append({'from': end, 'to': 'end', "arrows": 'to', 'width': 2})
flow = []
flow.append({"nodes" : nodes, "edges" : edges})
return jsonify(flow)
def show(workflowname):
workflowname = workflowname[:-5]
mydb = WorkFlowDB(workflowname)
tasks = mydb.list_nodes()
nodes = Node.start_end()
edges = []
to_end_nodes = [x.name for x in tasks]
for task in tasks:
nodes.append(Node.properties({'id': task.name, 'color': Node.color(task)}))
if len(task.dependencies) == 0:
edges.append({'from': 'start', 'to': task.name, "arrows": 'to', 'width': 2})
for dependency in task.dependencies:
edges.append({'from': dependency, 'to': task.name, "arrows": 'to', 'width': 2})
to_end_nodes.remove(dependency)
for end in to_end_nodes:
edges.append({'from': end, 'to': 'end', "arrows": 'to', 'width': 2})
return render_template("workflow.html", nodes=nodes, edges=edges)
def showFromDirectory(workflowname):
filename = "workflows/" + workflowname + ".yaml"
with open(filename, "r") as stream:
data = yaml.load(stream, Loader=yaml.SafeLoader)
tasks = data["tasks"]
for task in tasks:
print(task)
nodes = Node.start_end()
edges = []
for task in tasks:
nodes.append(Node.properties({'id': task}))
flows = data["flow"].split("|")
for flow in flows:
arrows = flow.split(";")
for i in range(0, len(arrows) - 1):
edges.append({'from': arrows[i], 'to': arrows[i + 1], "arrows": 'to', 'width': 2})
return render_template("workflow.html", nodes=nodes, edges=edges)
def update(workflow):
flow = workflow.get("flowyaml", None)
workflowname = workflow.get("name", None)
filename = "workflows/" + workflowname + ".yaml"
data = yaml.load(flow, Loader=yaml.SafeLoader)
with open(filename, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
show(workflowname)
def getworkflownamesFromDirectory():
workflows = []
for (dirpath, dirnames, filenames) in walk("workflows"):
for filename in filenames:
if filename.endswith(".yaml"):
flow = {"name" : filename[:-5]}
workflows.append(flow)
return jsonify(workflows)
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/command/flow.py | from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.flow.Flow import FlowDatabase, parse_yaml_to_workflow
from cloudmesh.flow.FlowRunner import FlowRunner
from cloudmesh.common.debug import VERBOSE
from cloudmesh.flow.Node import Node
from cloudmesh.common.Printer import Printer
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.flow.visualize import manager
class FlowCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_flow(self, args, arguments):
"""
::
Usage:
flow list [--flow=NAME] [--output=FORMAT]
flow add [--flowname=FLOWNAME] --flowfile=FILENAME
flow run [--flowname=FLOWNAME] [--flowfile=FILENAME]
flow node add NODENAME [--flowname=FLOWNAME]
flow edge add FROM TO [--flowname=FLOWNAME]
flow node delete NODENAME
flow edge delete FROM TO
flow edge invert FROM TO
flow visualize start
flow visualize stop
flow refresh
This command manages and executes workflows
The default workflow is just named "workflow" but you can specify multiple
Arguments:
NAME the name of the workflow
FILENAME a file name
NODENAME the name of the node
FROM the edge source (a node name)
TO the edge destination (a node name)
NODE the name of the node
Options:
--flow=NAME the name or the flow
--file specify the file
--log specify the log file
--flowname=FLOWNAME the name or the workflow
--output=OUTPUT the output format [default: table]
"""
arguments.FLOWNAME = arguments["--flowname"] or "workflow"
arguments.FLOWFILE = arguments["--flowfile"] or f"{arguments.FLOWNAME}-flow.py"
arguments.output = arguments["--output"]
VERBOSE(arguments, verbose=0)
if arguments["add"] and arguments.edge:
db = FlowDatabase(arguments.FLOWNAME)
db.add_edge(arguments.FROM, arguments.TO)
elif arguments["add"]:
print("adding a node")
if arguments.NODENAME:
node = Node(arguments.NODENAME)
node.workflow = arguments.FLOWNAME
try:
db = FlowDatabase(arguments.FLOWNAME)
db.add_node(node.toDict())
except Exception as e:
print ("error executing", e)
elif arguments["--flowfile"]:
filename = arguments["--flowfile"]
print("load from file", filename)
parse_yaml_to_workflow(filename)
elif arguments["list"]:
arguments.flow = arguments["--flow"] or "workflow"
db = CmDatabase()
name = arguments["--flow"]
if name is not None:
flows = [name]
else:
candidates = db.collections()
flows = []
for flow in candidates:
if flow.endswith("-flow"):
flows.append(flow)
entries = []
for name in flows:
nodes = db.find(collection=f"{name}-flow")
for node in nodes:
node["dependencies"] = ", ".join(node["dependencies"])
entries = entries + nodes
order = ["name", "workflow", "dependencies", "cm.modified"]
header = ["Name", "Workflow", "Dependencies", "Modified"]
print(Printer.flatwrite(nodes,
order=order,
header=header,
output=arguments.output))
elif arguments._run:
runner = FlowRunner(arguments.FLOWNAME, arguments.FLOWFILE)
runner.start_flow()
elif arguments.visualize:
if arguments["start"]:
manager.start()
print("The visualization servive started at http://127.0.0.1:8080/flow/")
elif arguments["stop"]:
manager.stop()
elif arguments["delete"] and arguments.edge:
db = FlowDatabase(arguments.FLOWNAME)
db.remove_edge(arguments.FROM, arguments.TO)
elif arguments["delete"] and arguments.node:
db = FlowDatabase(arguments.FLOWNAME)
db.remove_node(arguments.NODENAME)
elif arguments["invert"] and arguments.edge:
db = FlowDatabase(arguments.FLOWNAME)
db.remove_edge(arguments.TO, arguments.FROM)
db.add_edge(arguments.FROM, arguments.TO)
elif arguments.refresh:
raise NotImplementedError
# shuld refresh the viz
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/visualize/server.py |
import connexion
from flask import render_template, request
app = connexion.App(__name__, specification_dir="./")
# Read the yaml file to configure the endpoints
app.add_api("server.yaml")
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
# create a URL route in our application for "/"
@app.route("/flow/")
def home():
return render_template("home.html")
@app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server is shutting down...'
def run():
try:
app.run(host="127.0.0.1", port=8080, debug=True)
except RuntimeError as error:
if str(error) == "The server is shutting down.":
pass
else:
print("An unexpected error has happended.")
if __name__ == "__main__":
run()
|
cloudmesh/cloudmesh-flow | tests/test_flowdb.py | ###############################################################
# pip install .; pytest -v --capture=no -v --nocapture tests/test_flowdb..py::Test_flowdb.test_001
# pytest -v --capture=no tests/test_flowdb.py
# pytest -v tests/test_flowdb.py
###############################################################
import os
from cloudmesh.common.ConfigDict import ConfigDict
from cloudmesh.common.util import HEADING
from cloudmesh.flow.Flow import FlowDatabase, parse_string_to_workflow
from cloudmesh.flow.Node import Node
import pytest
@pytest.mark.incremental
class Test_flowdb:
def tearDown(self):
pass
def setup(self):
self.db = FlowDatabase("test")
self.db.collection.delete_many({})
def test_add_node(self):
test_node = Node("test test")
self.db.add_node(test_node.toDict())
num_nodes = self.db.collection.count_documents({})
assert num_nodes == 1
def test_add_edge(self):
node_1 = Node("testsource")
node_2 = Node("testdest")
self.db.add_node(node_1.toDict())
self.db.add_node(node_2.toDict())
self.db.add_edge(node_1.name, node_2.name)
deps = self.db.collection.count_documents({"dependencies.0" : {"$exists" : True}})
assert deps == 1
def test_set_node_status(self):
node_name = "status_test"
status = "testing"
node_1 = Node(node_name)
self.db.add_node(node_1.toDict())
inserted_node = self.db.get_node(node_name)
print(inserted_node.status)
self.db.set_node_status(node_name, status)
reset_node = self.db.get_node(node_name)
assert reset_node.status == status
def test_start_flow(self):
self.db.collection.delete_many({})
node_1 = Node("node1")
node_2 = Node("node2")
node_3 = Node("node3")
node_1.add_dependency(node_2)
for node in [node_1, node_2, node_3]:
self.db.add_node(node.toDict())
self.db.start_flow()
new_collection = self.db.collection
print(new_collection)
new_nodes = self.db.list_nodes()
for node in new_nodes:
print(node.status)
assert node.status == "pending"
self.db = FlowDatabase("test")
def test_remove(self):
self.db.collection.delete_many({})
node_1 = Node("testsource")
node_2 = Node("testdest")
self.db.add_node(node_1.toDict())
self.db.add_node(node_2.toDict())
self.db.add_edge(node_1.name, node_2.name)
deps = self.db.collection.count_documents({"dependencies.0" : {"$exists" : True}})
assert deps == 1
self.db.remove_edge(node_1.name, node_2.name)
deps = self.db.collection.count_documents({"dependencies.0" : {"$exists" : True}})
assert deps == 0
self.db.remove_node(node_1.name)
nodes = self.db.collection.count_documents({"name" : node_1.name})
assert nodes == 0
def test_flowstring_parser(self):
self.db.collection.delete_many({})
flowstring = "( pytesta; pytestb ) ; ( pytestc; ( pytestd || pyteste ) ) || pytestf"
parse_string_to_workflow(flowstring, "test")
nodes = self.db.list_nodes()
names = [node.name for node in nodes]
for node in nodes:
print(node)
for flow_string_name in ["pytesta", "pytestb", "pytestc", "pytestd", "pyteste", "pytestf"]:
assert flow_string_name in names
node_a = [node for node in nodes if node.name == "pytesta"][0]
assert len(node_a.dependencies) == 0
node_f = [node for node in nodes if node.name == "pytestf"][0]
assert len(node_f.dependencies) == 0
node_b = [node for node in nodes if node.name == "pytestb"][0]
assert len(node_b.dependencies) == 1
assert "pytesta" in node_b.dependencies
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/Node.py | class Node(object):
def __init__(self, name):
self.name = name
self.dependencies = []
self.workflow = ""
self.result = {}
def add_dependency(self, other_node):
self.dependencies.append(other_node.name)
def toDict(self):
return {"name": self.name,
"dependencies": self.dependencies,
"workflow": self.workflow}
def workflow_filename(self):
return f"{self.workflow}-flow.py"
def get_command(self, filename=None):
if not filename:
filename = self.workflow_filename()
return ["python", filename, self.name]
def __repr__(self):
return f"Node name:{self.name} dependencies:{self.dependencies}"
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/Flow.py | <reponame>cloudmesh/cloudmesh-flow<filename>cloudmesh/flow/Flow.py
#!/usr/bin/python
import sys
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.flow.Node import Node
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from lark import Lark, Visitor
import oyaml as yaml
from cloudmesh.common.debug import VERBOSE
import inspect
from cloudmesh.common.console import Console
grammar = """
flownode: /[a-zA-Z]+/
sequence: ";"
parallel: "||"
join: sequence | parallel
expr: group | flownode
basegroup.1: flownode join flownode
group: "(" expr ")" | basegroup | flownode join expr | expr join flownode | expr join expr
%import common.WS
%ignore WS
"""
class Get:
@staticmethod
def nodes(classname):
names = []
method_list = [func for func in dir(classname) if callable(getattr(classname, func))]
for name in method_list:
if name.startswith("_"):
pass
else:
names.append(name)
return names
@staticmethod
def name(classname):
s = classname.__name__
return f"{s}" # flow will be appended later
@staticmethod
def edges(classname):
return classname.edges
class Put:
@staticmethod
def nodes(name, nodes):
db = FlowDatabase(name)
for node in nodes:
node = Node(node)
node.workflow = name
try:
db.add_node(node.toDict())
except Exception as e:
Console.error(str(e))
@staticmethod
def edges(name, edges):
db = FlowDatabase(name)
for edge in edges:
try:
db.add_edge(edge[0], edge[1])
except Exception as e:
Console.error(str(e))
@staticmethod
def upload(classname):
nodes = Get.nodes(classname)
name = Get.name(classname)
edges = Get.edges(classname)
Put.nodes(name, nodes)
Put.edges(name, edges)
parser = Lark(grammar, start = "expr")
class Flow():
def __init__(self, arguments):
self.flowfile = arguments[0]
self.flowname = self.flowfile[:self.flowfile.find("-flow")]
if len(arguments) == 1:
self._upload()
if len(arguments) == 2:
self.node = arguments[1]
self._run(self.node)
def _save(self, task_name, result):
"""
saves the results to the database into the task with the name provided.
:param task_name: The name of the task
:type task_name: string
:param result: The dict of the result
:type result: dict
:return: None
:rtype: None
"""
print("saving result to", self.flowname, result)
db = FlowDatabase(self.flowname, True)
db.add_node_result(task_name, result)
def _run(self, task_name):
"""
execute the python method in the class with the given task_name
:param task_name: the name of the task
:type task_name: string
:return: the dict after the execution of the task
:rtype: dict
"""
method = None
for (name, func) in inspect.getmembers(self):
if name == task_name:
method = func
result = method()
self._save(task_name, result)
return result
def _upload(self):
Put.upload(self.__class__)
class FlowDatabase(object):
def __init__(self, name="workflow", active = False):
self.database = CmDatabase()
self.workflow_name = name
self.collection = self.database.collection(f"{name}-flow")
if active:
self.switch_to_active_flow()
def attributes(self, name):
data = {
"cm": {
"kind": "flow",
"cloud": self.workflow_name,
"name": name
},
"kind": "flow",
"cloud": self.workflow_name,
"name": name,
"status" : "defined"
}
return data
@DatabaseUpdate()
def add_node(self, node):
name = node["name"]
node.update(self.attributes(name))
VERBOSE(node)
return node
def add_edge(self, node, depends_on):
edge = {
"node": node,
"depends_on": depends_on
}
VERBOSE(edge)
self.collection.update_one(
{"name" : node},
{"$push" : {"dependencies" : depends_on}})
def _node_from_db(self, db_obj):
reconstructed = Node(db_obj["name"])
reconstructed.workflow = self.workflow_name
reconstructed.dependencies = db_obj["dependencies"]
reconstructed.status = db_obj.get("status", "pending")
reconstructed.result = db_obj.get("result", {})
return reconstructed
def remove_node(self, name):
self.collection.delete_one({"name" : name})
self.collection.update_many({}, {"$pull" : {"dependencies" : "name"}})
def remove_edge(self, node, depends_on):
self.collection.update_one({"name" : node}, {"$pull" : {"dependencies" : depends_on}})
def get_node(self, name=None):
return self._node_from_db(self.collection.find_one({"name" : name}))
def list(self, node=None, edge=None):
query = {}
if node: query["name"] = node
if edge: query["dependencies"] = edge
return self.collection.find(query)
def list_nodes(self):
return [self._node_from_db(node) for node in self.list()]
def list_edges(self):
return self.collection.aggregate(
[{"$unwind" : "$dependencies"},
{"$project" : {"to" : "$name", "from" : "$dependencies"}}])
def list_all_workflows(self):
all_colls = self.database.collections()
return [name for name in all_colls if "flow" in name and "active" not in name]
def set_node_status(self, node, status):
return self.collection.update_one(
{"name" : node}, {"$set" : {"status" : status}})
def find_root_nodes(self):
root_nodes = self.collection.find(
{"dependencies.0" : {"$exists" : False}, "status" : "pending"})
return [self._node_from_db(node) for node in root_nodes]
def switch_to_active_flow(self):
started_collection = f"{self.workflow_name}-flow-active"
self.collection = self.database.collection(started_collection)
def resolve_node_dependencies(self, name=None):
return self.collection.update_many(
{"dependencies" : name}, {"$pull" : {"dependencies" : name}})
def add_specification(self, spec):
pass
def start_flow(self):
VERBOSE("START")
started_collection = f"{self.workflow_name}-flow-active"
self.collection.aggregate([
{"$project" : {
"dependencies" :1,
"cm" :1,
"kind" : 1,
"cloud" : 1,
"name" : 1,
"status" : "pending"}},
{"$out" : started_collection}])
self.switch_to_active_flow()
def add_node_result(self, nodename, result):
return self.collection.update_one({"name" : nodename}, {"$set" : {"result" : result}})
def add_graph(self, yamlfile):
pass
def last_update(self, workflow=None):
"""
This method returns the last modified value associated with a
database update to a node.
:param workflow: The name of the workflow
:type workflow: string
:return: The time of the last update
:rtype: string
"""
raise NotImplementedError
# find all modified
# return self.collection.find(query)
t = "the time string"
return t
class FlowConstructor(Visitor):
def flownode(self, val):
name = val.children[0]
node = Node(name)
node.workflow = self.flowname
self.db.add_node(node.toDict())
def join(self, val):
join_type = val.children[0].data
#print("join", val.children, "as", join_type)
def basegroup(self, val):
#print(val, len(val.children))
#expressions are either a single node or a group
if len(val.children) == 1: return
lhs = val.children[0]
join = val.children[1]
rhs = val.children[2]
join_type = join.children[0].data
#print("join of type", join_type)
if join_type == "sequence":
lhs_node_name = lhs.children[0]
rhs_node_name = rhs.children[0]
#print("join", lhs_node_name, "with", rhs_node_name, "in type", join_type)
self.db.add_edge(rhs_node_name, lhs_node_name)
def group(self, val):
if len(val.children) == 1: return
lhs = val.children[0]
join = val.children[1]
rhs = val.children[2]
join_type = join.children[0].data
lhs_node = self.resolve_to_node(lhs)
rhs_node = self.resolve_to_node(rhs)
if join_type == "sequence":
lhs_node_name = lhs_node.children[0]
rhs_node_name = rhs_node.children[0]
#print("join", lhs_node_name, "with", rhs_node_name, "in type", join_type)
self.db.add_edge(rhs_node_name, lhs_node_name)
def _is_node(self, val):
#print("is node", val.data)
return val.data == "flownode"
def resolve_to_node(self, val):
if self._is_node(val): return val
else:
#print(val.data, val.children)
return self.resolve_to_node(val.children[-1])
def parse_string_to_workflow(flowstring, flowname):
tree = parser.parse(flowstring)
db = FlowDatabase(flowname)
flow = FlowConstructor()
flow.db = db
flow.flowname = flowname
flow.visit(tree)
def parse_yaml_to_workflow(yaml_file):
with open(yaml_file) as yaml_contents:
data = yaml.load(yaml_contents, Loader=yaml.SafeLoader)
flowstring = data["flow"]
flowname = data["name"]
return parse_string_to_workflow(flowstring, flowname)
if __name__ == "__main__":
flowstring = sys.argv[2]
flowname = sys.argv[1]
db = FlowDatabase()
flows = db.list_all_workflows()
for flow in flows:
print(flow)
parse_string_to_workflow(flowstring, flowname)
#pydot__tree_to_png(tree, "ee.png")
#flow = WorkFlow(flowname, flowstring)
# print(flow)
# flow.run()
#w = WorkFlowDB("workflow01")
#node = {"name": "world"}
#w.add_node(node)
|
cloudmesh/cloudmesh-flow | tests/test_flowrunner.py | <filename>tests/test_flowrunner.py<gh_stars>1-10
###############################################################
# pip install .; pytest -v --capture=no -v --nocapture tests/test_flowrunner..py::Test_flowrunner.test_001
# pytest -v --capture=no tests/test_flowrunner.py
# pytest -v tests/test_flowrunner.py
###############################################################
import os
from cloudmesh.common.ConfigDict import ConfigDict
from cloudmesh.common.util import HEADING
from cloudmesh.flow import FlowRunner
import pytest
@pytest.mark.incremental
class Test_flowrunner:
# noinspection PyPep8Naming
def tearDown(self):
pass
def test_create(self):
runner = FlowRunner("test")
def test_run(self):
runner = FlowRunner("test")
runner.start_flow()
assert runner.running == True
|
cloudmesh/cloudmesh-flow | benchmark/benchmark_ingest.py | ###############################################################
# pytest -v --capture=no benchmark/benchmark_ingest.py
# pytest -v benchmark/benchmark_ingest.py
# pytest -v --capture=no benchmark/test_cms.py:Test_cms.<METHODNAME>
###############################################################
import pytest
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.Shell import Shell
from cloudmesh.common.StopWatch import StopWatch
from cloudmesh.common.util import HEADING
num_nodes = 50
@pytest.mark.incremental
class TestConfig:
def test_add_nodes(self):
HEADING()
StopWatch.start("add nodes")
results = []
for node in range(num_nodes):
node_name = f"node{str(node)}"
results.append(Shell.execute("cms flow node add " + node_name, shell=True))
StopWatch.stop("add nodes")
VERBOSE(results)
def test_add_edges(self):
HEADING()
StopWatch.start("add edges")
results = []
for node in range(num_nodes - 1):
node_name = f"node{str(node)}"
next_node_name = f"node{str(node + 1)}"
results.append(Shell.execute("cms flow edge add " + node_name + " " + next_node_name, shell=True))
StopWatch.stop("add edges")
VERBOSE(results)
def test_remove_edges(self):
HEADING()
StopWatch.start("del edges")
results = []
for node in range(num_nodes - 1):
node_name = f"node{str(node)}"
next_node_name = f"node{str(node + 1)}"
results.append(Shell.execute("cms flow edge delete " + node_name + " " + next_node_name, shell=True))
StopWatch.stop("del edges")
VERBOSE(results)
def test_remove_nodes(self):
HEADING()
StopWatch.start("del nodes")
results = []
for node in range(num_nodes):
node_name = f"node{str(node)}"
results.append(Shell.execute("cms flow node remove " + node_name, shell=True))
StopWatch.stop("del nodes")
VERBOSE(results)
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/visualize/manager.py | <gh_stars>1-10
import requests
import os
def shutdown():
url = "http://127.0.0.1:8080/shutdown"
response = requests.get(url)
def start():
os.system('python server.py')
|
cloudmesh/cloudmesh-flow | deprecated/FlowDecorator.py | <gh_stars>1-10
import inspect
from cloudmesh.flow.Flow import FlowDatabase
class BaseWorkFlow():
def __init__(self, flowfile):
self.flowname = flowfile[:flowfile.find("-")]
def save_result_to_db(self, nodeName, result):
print("saving result to", self.flowname, result)
db = FlowDatabase(self.flowname, True)
db.add_node_result(nodeName, result)
def runCommand(self, commandName):
method = None
for (name, func) in inspect.getmembers(self):
if name == commandName:
method = func
result = method()
self.save_result_to_db(commandName, result)
return result
|
cloudmesh/cloudmesh-flow | deprecated/worflowdb-deprecated.py | from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.flow.Node import Node
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.common.debug import VERBOSE
class WorkFlowDB(object):
def __init__(self, name="workflow", active=False):
self.database = CmDatabase()
self.workflow_name = name
self.collection = self.database.collection(f"{name}-flow")
if active:
self.switch_to_active_flow()
def attributes(self, name):
data = {
"cm": {
"kind": "flow",
"cloud": self.workflow_name,
"name": name
},
"kind": "flow",
"cloud": self.workflow_name,
"name": name,
"status": "defined"
}
return data
@DatabaseUpdate()
def add_node(self, node):
name = node["name"]
node.update(self.attributes(name))
VERBOSE(node)
return node
def add_edge(self, node, depends_on):
self.collection.update_one(
{"name": node},
{"$push": {"dependencies": depends_on}})
def _node_from_db(self, db_obj):
reconstructed = Node(db_obj["name"])
reconstructed.workflow = self.workflow_name
reconstructed.dependencies = db_obj["dependencies"]
reconstructed.status = db_obj.get("status", "pending")
reconstructed.result = db_obj.get("result", {})
reconstructed.progress = ""
reconstructed.modified = ""
reconstructed.done = ""
return reconstructed
def remove_node(self, name):
self.collection.delete_one({"name": name})
self.collection.update_many({}, {"$pull": {"dependencies": "name"}})
def remove_edge(self, node, depends_on):
self.collection.update_one({"name": node}, {"$pull": {"dependencies": depends_on}})
def get_node(self, name=None):
return self._node_from_db(self.collection.find_one({"name": name}))
def list(self, node=None, edge=None):
query = {}
if node: query["name"] = node
if edge: query["dependencies"] = edge
return self.collection.find(query)
def list_nodes(self):
return [self._node_from_db(node) for node in self.list()]
def list_edges(self):
return self.collection.aggregate(
[{"$unwind": "$dependencies"},
{"$project": {"to": "$name", "from": "$dependencies"}}])
def list_all_workflows(self):
all_colls = self.database.collections()
return [name for name in all_colls if "flow" in name and "active" not in name]
def set_node_status(self, node, status):
return self.collection.update_one(
{"name": node}, {"$set": {"status": status}})
def find_root_nodes(self):
root_nodes = self.collection.find(
{"dependencies.0": {"$exists": False}, "status": "pending"})
return [self._node_from_db(node) for node in root_nodes]
def switch_to_active_flow(self):
started_collection = f"{self.workflow_name}-flow-active"
self.collection = self.database.collection(started_collection)
def resolve_node_dependencies(self, name=None):
return self.collection.update_many(
{"dependencies": name}, {"$pull": {"dependencies": name}})
def add_specification(self, spec):
pass
def start_flow(self):
started_collection = f"{self.workflow_name}-flow-active"
self.collection.aggregate([
{"$project": {
"dependencies": 1,
"cm": 1,
"kind": 1,
"cloud": 1,
"name": 1,
"status": "pending"}},
{"$out": started_collection}])
self.switch_to_active_flow()
def add_node_result(self, nodename, result):
return self.collection.update_one({"name": nodename}, {"$set": {"result": result}})
def add_graph(self, yamlfile):
pass
def last_update(self, workflow=None):
"""
This method returns the last modified value associated with a
database update to a node.
:param workflow: The name of the workflow
:type workflow: string
:return: The time of the last update
:rtype: string
"""
raise NotImplementedError
t = "the time string"
return t
|
cloudmesh/cloudmesh-flow | cloudmesh/flow/FlowRunner.py | from cloudmesh.flow.Flow import FlowDatabase
import subprocess
import time
import json
import webbrowser
from cloudmesh.common.console import Console
import sys
class FlowRunner(object):
def __init__(self, flowname, filename=None):
self.filename = filename or f"{flowname}-flow.py"
self.flowname = flowname
if self.flowname is None:
Console.error("The floname is not defined")
sys.exit(1)
self.db = FlowDatabase(flowname)
self.running_jobs = []
def start_available_nodes(self):
available_nodes = self.db.find_root_nodes()
for node in available_nodes:
print("starting a new node", node)
self.start_node(node)
def start_flow(self):
self.running = True
self.db.start_flow()
self.start_available_nodes()
while(self.running):
self.check_on_running_processes()
self.running = len(self.running_jobs) > 0
def start_node(self, node):
self.db.set_node_status(node.name, "running")
print("running command", node.get_command())
process = subprocess.Popen(node.get_command(), stdout=subprocess.PIPE )
self.running_jobs.append({"handle" : process, "node" : node})
def resolve_node(self, node, status):
resolution = "finished" if status == 0 else "error"
self.db.set_node_status(node.name, resolution)
#self.db.add_node_result(node.name, output)
if status == 0:
self.db.resolve_node_dependencies(node.name)
#easiest way to remove object, but will be slow for large workflows. to improve later
self.running_jobs = [job for job in self.running_jobs if job["node"].name != node.name]
def check_on_running_processes(self):
for process in self.running_jobs:
process_handle = process["handle"]
status = process_handle.poll()
if status is None:
continue
else:
#printed_output = process_handle.communicate()[0]
#print(printed_output)
#output = json.loads(printed_output)
self.resolve_node(process["node"], status)
self.start_available_nodes()
time.sleep(3)
def visualize(self):
url = "http://127.0.0.1:8080/flow/monitor/" + self.flowname + "-flow"
webbrowser.open(url)
if __name__ == "__main__":
runner = FlowRunner("flow")
runner.start_flow()
|
aTeK7/deep-stereo1.4 | prio_queue_test.py | <reponame>aTeK7/deep-stereo1.4
import multiprocessing
import os
import time
#processing queue
input_queue = multiprocessing.Queue(maxsize=5)
output_queue = multiprocessing.Queue(maxsize=5)
def worker_main(in_queue, out_queue):
#print os.getpid(),"working"
while True:
item = in_queue.get(True)
#print os.getpid(), "got", item
time.sleep(2) # simulate a "long" operation
out_queue.put(item + "-Done")
the_pool = multiprocessing.Pool(5, worker_main,(input_queue, output_queue,))
# don't forget the coma here ^
for q in range(10):
print "Adding minibatch to processing queue: %s" % q
for i in range(5):
print("%s - Put HEllo" % i)
input_queue.put("hello")
print("\tIN:%s OUT:%s") % (input_queue.full(), output_queue.full())
time.sleep(10) |
aTeK7/deep-stereo1.4 | old/homography.py | import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
img = cv2.imread('beach.jpg')
rows, cols, ch = img.shape
# CAM0 rotation
R = np.array([[0.949462, 0.046934, 0.310324],
[-0.042337, 0.998867, -0.021532],
[-0.310985, 0.007308, 0.950373]])
# Sweep plane
z_0 = np.array([0, 0, 100])
# Translate vector
t = np.array([0, 0, 0])
# Camera Intrinsic Parameters (CAM0)
Intrinsics_CAM0 = np.array([[1918.270000, 2.489820, 494.085000],
[0, 1922.580000, 447.736000],
[0, 0, 1]])
nx = 1024
ny = 768
fc_left = np.array([Intrinsics_CAM0[0, 0], Intrinsics_CAM0[1, 1]])
print "Focal length: "
print fc_left
cc_left = np.array([Intrinsics_CAM0[0, 2], Intrinsics_CAM0[1, 2]])
print "Principal Point Offset: "
print cc_left
# FC = focal lenght
a = np.array([ [1/fc_left[0], 0, 0],
[0,1/fc_left[1],0],
[0,0,1]])
# CC = principal point
b = np.array([ [1,0,-cc_left[0]],
[0,1,-cc_left[1]],
[0,0,1]])
c = np.array([[0, nx-1, nx-1, 0, 0],
[0, 0, ny-1, ny-1, 0],
[1, 1, 1, 1, 1]])
I_CAM = reduce(np.dot, [a, b, c])
BASE_left = np.array([[0,1,0,0,0,0],[0,0,0,1,0,0],[0,0,0,0,0,1]])
q = np.vstack(BASE_left[:, 0]).dot(np.ones((1, 5)))
I_CAM = np.reshape(np.concatenate([I_CAM, q, I_CAM], axis=1), (3, 15))
print I_CAM
print I_CAM.transpose()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(I_CAM[0,:], I_CAM[2,:], -I_CAM[1,:])
plt.show()
print "Script ends" |
aTeK7/deep-stereo1.4 | reprojection/process_video.py | from PIL import Image
from yuv_reader import YUVReader
from reprojection import Reprojection
from optparse import OptionParser
from six.moves import cPickle
import matplotlib.pyplot as plt
import numpy as np
import os
from dataset_preparation.ballet_camera import BalletCamera
def main():
parser = OptionParser()
parser.add_option("-o", "--camera-original", dest="cameraOriginalFile",
help="Original camera file to load intrinsic matrix from", metavar="FILE")
parser.add_option("-v", "--camera-virtual", dest="cameraVirtualFile",
help="Virtual camera file to load intrinsic matrix from", metavar="FILE")
parser.add_option("-i", "--input", dest="video",
help="video file to process", metavar="FILE")
parser.add_option("-r", "--result", dest="output",
help="Save output images on that path")
parser.add_option("-p", "--prefix", dest="prefix", default="depth",
help="Output prefix")
parser.add_option("-g", "--generate-images", dest="generate_images", default=False, action="store_true",
help="Generate PNG images for each depth plane")
# Depth sweeping options
parser.add_option("--depth_start", dest="depth_start", default=1,
help="Depth sweep start")
parser.add_option("--depth_stop", dest="depth_stop", default=40,
help="Depth sweep stop")
parser.add_option("--depth_step", dest="depth_step", default=0.5,
help="Depth sweep steep increment", metavar="FILE")
(options, args) = parser.parse_args()
# Create output dir if it does not exist
if not os.path.exists(options.output):
os.makedirs(options.output)
# Read camera parameters from cameraFile argument
print "Original camera file: %s" % options.cameraOriginalFile
print "Virtual camera file: %s" % options.cameraVirtualFile
cam_original = BalletCamera(options.cameraOriginalFile)
cam_virtual = BalletCamera(options.cameraVirtualFile)
# Get a frame for YUV video
print "Load YUV (i480) video file: %s" % options.video
video = YUVReader(options.video, (1024, 768))
frame = video.getRGBFrame(0)
# Reproject at all needed depths
print "Saving plane sweep volume: %s" % options.output
r = Reprojection(cam_original, cam_virtual, frame)
# Reproject at depths 1m to 200m in steps of 2 meters (total 100 images)
volume_array = []
i = 0
depth_from = options.depth_start
depth_to = options.depth_stop
depth_step = options.depth_step
print "Sweeping from (%s meters)->(%s meters) in steps of (%s meters)" % (depth_from, depth_to, depth_step)
for depth in np.arange(depth_from, depth_to, depth_step):
print "Calculate projection at depth: (%s meters)" % depth
result_prj = r.reproject(depth=depth)
volume_array.append(result_prj)
# if the generate image is selected then output an image for each depth plane
if options.generate_images:
im = Image.fromarray(result_prj, 'RGBA')
path = os.path.join(options.output, "%s_%03d_%03d.png" % (options.prefix, depth, i))
print "Saving image: %s" % path
im.save(path)
i += 1
print "Depth sweep plane calculation done."
# Save plane sweep volume in a cPickle file with stacked images
if not options.generate_images:
print "Saving pickle file..."
psw_volume = np.dstack(volume_array)
vol_name = os.path.join(options.output, "psw_%s_%s.pkl" % (depth_from, depth_to))
cPickle.dump(psw_volume, open(vol_name, "wb"))
print "Plane sweep volume file saved in %s" % vol_name
if __name__ == "__main__": main()
|
aTeK7/deep-stereo1.4 | dataset_preparation/kitti_camera.py | from reprojection.camera import Camera
import os
import numpy as np
class KittiCamera:
def __init__(self, calibration_path, poses_path):
self.calibration_path = calibration_path
self.poses_path = poses_path
self.rotations = {}
self.translations = {}
self.intrinsics = {}
self.poses = {}
for s in range(11):
# Cache calibration data
sq_name = "%02d" % s
if s != 3:
self.rotations[sq_name], self.translations[sq_name], self.intrinsics[sq_name] \
= KittiCamera.read_calibration_file(self.calibration_path, sq_name)
self.poses[sq_name] = KittiCamera.read_poses_file(poses_path, sq_name)
@staticmethod
def read_calibration_file(calibration_path, sequence_name):
"""
Read calibration file from Kitti dataset
:param self:
:param calibration_path:
:param sequence_name:
:return:
"""
# Read calibration file
filename = os.path.join(calibration_path, sequence_name, "calib_cam_to_cam.txt")
variables = dict()
with open(filename, 'r') as f:
for line in f:
key, value = KittiCamera.read_line(line)
variables[key] = value
rotations = {}
translations = {}
intrinsics = {}
for camera in ["00", "01", "02", "03"]:
r = [float(x) for x in variables['R_' + camera]]
t = [float(x) for x in variables['T_' + camera]]
k = [float(x) for x in variables['K_' + camera]]
rotations[camera] = np.reshape(np.array(r), (3, 3))
translations[camera] = np.array(t)
intrinsics[camera] = np.reshape(np.array(k), (3, 3))
return rotations, translations, intrinsics
@staticmethod
def read_line(line):
key, val = line.split(': ', 1)
val = val.rstrip().split(' ')
return key, val
@staticmethod
def read_poses_file(poses_path, sq_name):
pose_filename = "%s.txt" % sq_name
with open(os.path.join(poses_path, pose_filename), 'r') as f:
matrixes = []
for l in f.readlines():
nums = np.array([float(s) for s in l.split(' ')]).reshape(3, 4)
matrixes.append(nums)
return matrixes
def getCamera(self, camera):
if camera > 3 or camera < 0:
raise NameError("Camera should be an int from 0 to 3")
return Camera(self.rotations[camera],
self.translations[camera],
self.intrinsics[camera])
def getNamedCamera(self, cam_name, sequence, image_number):
if not isinstance(sequence, int):
print("Sequence variable should be int from 0 to 10")
raise ValueError("Sequence variable should be int from 0 to 10")
if sequence < 0 or sequence > 10:
print("Sequence variable should be int from 0 to 10")
raise ValueError("Sequence variable should be int from 0 to 10")
if cam_name not in ["00", "01", "02", "03"]:
print("Camera should be image_2 or image_3")
raise ValueError("Camera should be image_2 or image_3")
sq_name = "%02d" % sequence
pose = self.poses[sq_name][image_number]
R = pose[:3, :3]
T = pose[:, 3]
return Camera(R,
T,
self.intrinsics[sq_name][cam_name])
|
aTeK7/deep-stereo1.4 | reprojection/yuv_reader.py | import numpy as np
class YUVReader(object):
def __init__(self, file, size):
self.width = size[0]
self.height = size[1]
self.fd = open(file, 'rb')
self.plane_size = self.width * self.height
bitsPerPixel = 12
self.frame_size = self.plane_size * bitsPerPixel / 8
def getRGBFrame(self, frame):
self.fd.seek(frame * self.frame_size)
#y = self.fd.read(self.plane_size)
#v = self.fd.read(self.plane_size / 4)
#u = self.fd.read(self.plane_size / 4)
# Load the Y (luminance) data from the stream
Y = np.fromfile(self.fd, dtype=np.uint8, count=self.width * self.height). \
reshape((self.height, self.width))
# Load the UV (chrominance) data from the stream, and double its size
U = np.fromfile(self.fd, dtype=np.uint8, count=(self.width // 2) * (self.height // 2)). \
reshape((self.height // 2, self.width // 2)). \
repeat(2, axis=0).repeat(2, axis=1)
V = np.fromfile(self.fd, dtype=np.uint8, count=(self.width // 2) * (self.height // 2)). \
reshape((self.height // 2, self.width // 2)). \
repeat(2, axis=0).repeat(2, axis=1)
# Stack the YUV channels together, crop the actual resolution, convert to
# floating point for later calculations, and apply the standard biases
YUV = np.dstack((Y, U, V))[:self.height, :self.width, :].astype(np.float)
YUV[:, :, 0] = YUV[:, :, 0] - 16 # Offset Y by 16
YUV[:, :, 1:] = YUV[:, :, 1:] - 128 # Offset UV by 128
# YUV conversion matrix from ITU-R BT.601 version (SDTV)
# Y U V
M = np.array([[1.164, 0.000, 1.596], # R
[1.164, -0.392, -0.813], # G
[1.164, 2.017, 0.000]]) # B
# Take the dot product with the matrix to produce RGB output, clamp the
# results to byte range and convert to bytes
RGB = YUV.dot(M.T).clip(0, 255).astype(np.uint8)
return RGB |
aTeK7/deep-stereo1.4 | tf_deep_stereo/shared.py | import tensorflow as tf
import numpy as np
from sklearn import preprocessing
import math
def conv_relu(input, size, depth, in_depth=None):
# Create variable named "weights".
# http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network
sqared = math.sqrt(size*size)
weights = tf.get_variable("weights", (size, size, in_depth, depth),
initializer=tf.contrib.layers.xavier_initializer())
#initializer=tf.random_normal_initializer(mean=0.0, stddev=sqared))
#initializer=tf.constant_initializer(value=0.0))
bias = tf.get_variable("bias", [depth],
initializer=tf.constant_initializer(value=0.0))
conv = tf.nn.conv2d(input, weights,
strides=[1, 1, 1, 1], padding='VALID', use_cudnn_on_gpu=True)
return tf.nn.relu(tf.nn.bias_add(conv, bias))
def input_definition(inputs, size=30, depths=[], filters=[]):
with tf.variable_scope("conv0_cam0"):
conv_cam0 = conv_relu(inputs["cam0"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam1"):
conv_cam1 = conv_relu(inputs["cam1"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam3"):
conv_cam2 = conv_relu(inputs["cam3"], size=5, in_depth=4, depth=64)
with tf.variable_scope("conv0_cam4"):
conv_cam3 = conv_relu(inputs["cam4"], size=5, in_depth=4, depth=64)
conc_input = tf.concat([conv_cam0, conv_cam1, conv_cam2, conv_cam3], 3)
with tf.variable_scope("conv1"):
r1 = conv_relu(conc_input, size=filters[0], in_depth=256, depth=depths[0])
with tf.variable_scope("conv2"):
r2 = conv_relu(r1, size=filters[1], in_depth=depths[0], depth=depths[1])
# only if third element is defined
if len(depths) == 3:
with tf.variable_scope("conv3"):
r3 = conv_relu(r2, size=filters[2], in_depth=depths[1], depth=depths[2])
else:
r3 = r2
out = r3
return out
def input_structure(input, plane_num):
with tf.variable_scope("in30"):
in_30 = input_definition(input.get_for_plane_size(plane_num, 30),
size=30,
depths=[96, 48, 16],
filters=[3, 5, 5])
with tf.variable_scope("in18"):
in_18 = input_definition(input.get_for_plane_size(plane_num, 18),
size=18,
depths=[40, 40, 8],
filters=[3, 3, 3])
with tf.variable_scope("in12"):
in_12 = input_definition(input.get_for_plane_size(plane_num, 12),
size=12,
depths=[32, 8],
filters=[3, 3])
with tf.variable_scope("in10"):
in_10 = input_definition(input.get_for_plane_size(plane_num, 10),
size=10,
depths=[32, 8],
filters=[3, 3])
c = tf.concat([
in_30,
tf.image.resize_images(in_18, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR),
tf.image.resize_images(in_12, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR),
tf.image.resize_images(in_10, [16, 16], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
], 3)
n_out = 1
mean = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='mean', trainable=False)
variance = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='variance', trainable=False)
variance_epsilon = tf.Variable(tf.constant(0.0001, shape=[n_out]),
name='epsilon', trainable=False)
offset = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='offset', trainable=False)
scale = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='scale', trainable=False)
normalized = tf.nn.batch_normalization(c, mean=mean,
variance=variance,
variance_epsilon=variance_epsilon,
offset=offset,
scale=scale)
return normalized
class InputOrganizer(object):
def __init__(self, batch_size, num_planes=96, meanzero=False):
"""
:param meanzero: Use mean zero when retrieving feed dict
"""
self.meanzero = meanzero
if self.meanzero:
print("Input data will be normalized on range [-1, 1]")
else:
print("Not using mean 0 range [0.0, 1.0]!")
self.cameras = ["cam0", "cam1", "cam3", "cam4"]
self.sizes = [30, 18, 12, 10]
self.num_planes = num_planes
self.num_channels = 4
self.placeholders = {}
with tf.name_scope("input_placeholders"):
for plane in range(self.num_planes):
self.placeholders[plane] = {}
for size in self.sizes:
self.placeholders[plane][size] = {}
for cam in self.cameras:
name = "%s_%s_%s" % (plane, size, cam)
self.placeholders[plane][size][cam] = tf.placeholder(tf.float32,
name=name,
shape=(batch_size, size, size, self.num_channels))
# Target patch
self.target = tf.placeholder(tf.float32,
name="target",
shape=(batch_size, 8, 8, 3))
def get_for_plane_size(self, plane, size):
input = self.placeholders[plane][size]
assert len(input) == 4
return input
def get_target_placeholder(self):
return self.target
def preprocess_batch(self, image_batch):
#s = image_batch.shape
#for im_idx in range(s[0]):
# for ch_idx in range(s[3]):
# image_batch[im_idx, :, :, ch_idx] = preprocessing.scale(image_batch[im_idx, :, :, ch_idx])
return (image_batch * 2.0) - 1.0
def get_feed_dict(self, images_feed):
feed_dict = {}
# Target images
tar = np.concatenate([t['target'] for t in images_feed], axis=0)
if self.meanzero:
feed_dict[self.target.name] = self.preprocess_batch(tar)
else:
feed_dict[self.target.name] = tar
# add 4 images with 4 resolutions for each plane (96 planes in total)
for plane in range(self.num_planes):
for size in self.sizes:
for cam in self.cameras:
item_name = "plane%s_%s_%s" % (plane, cam, size)
images = [im['planes'][item_name] for im in images_feed]
c_images = np.concatenate(images, axis=0)
if self.meanzero:
feed_dict[self.placeholders[plane][size][cam].name] = self.preprocess_batch(c_images)
else:
feed_dict[self.placeholders[plane][size][cam].name] = c_images
return feed_dict
|
aTeK7/deep-stereo1.4 | tf_deep_stereo/tf_samplegraph.py | import tensorflow as tf
import os
import socket
from datetime import datetime
import traceback
from tf_deep_stereo.shared import conv_relu
import matplotlib.image as mpimg
import numpy as np
def inference_graph():
with tf.name_scope("inputs"):
input1 = tf.placeholder(tf.float32, name='inputpicture0', shape=(1, 376, 1241, 3))
input2 = tf.placeholder(tf.float32, name='inputpicture1', shape=(1, 376, 1241, 3))
input3 = tf.placeholder(tf.float32, name='inputpicture2', shape=(1, 376, 1241, 3))
with tf.variable_scope("c1"):
crelu1 = conv_relu(input1, 1, 3, in_depth=3)
with tf.variable_scope("c2"):
crelu2 = conv_relu(crelu1, 1, 3, in_depth=3)
return input1, crelu2
def lossF(net_out):
batch_losses = tf.reduce_sum(net_out)
return batch_losses
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the adagrad optimizer with the given learning rate.
optimizer = tf.train.AdagradOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def run_training():
"""Train MNIST for a number of steps."""
sess = None
try:
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Build a Graph that computes predictions from the inference model.
input, net_out = inference_graph()
print("Graph built! continuing...")
# Add to the Graph the Ops for loss calculation.
loss = lossF(net_out)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = training(loss, 0.001)
print("Merging summaries continuing...")
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
print("Initialize variables...")
# Add the variable initializer Op.
init = tf.initialize_all_variables()
print("Starting session...")
# Create a session for running Ops on the Graph.
sess = tf.Session()
print("Creating SummaryWritter...")
summary_name = datetime.now().strftime("%Y_%B_%d_%H_%M_%S")
summary_name = "%s-%s" % (summary_name, socket.gethostname())
summary_dir = os.path.join("/Users/boyander/test-tf", summary_name)
# Run the Op to initialize the variables.
sess.run(init)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(summary_dir, sess.graph)
print("Started SummaryWriter -> %s" % summary_dir)
# And then after everything is built:
feed_dict = {
input: np.expand_dims(mpimg.imread('/Volumes/Bahia/kitti-dataset/sequences/00/image_2/000000.png'), axis=0)
}
sess.run(train_op, feed_dict=feed_dict)
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str,0)
summary_writer.flush()
# read validation batch
except Exception as e:
print("Exception on TRAIN: %s" % e)
traceback.print_exc()
if sess:
sess.close()
def main():
run_training()
# Run main
if __name__ == "__main__":
main()
|
aTeK7/deep-stereo1.4 | tf_deep_stereo/select_tower.py | import tensorflow as tf
from shared import input_definition, conv_relu, input_structure
def select_tower(input, num_planes=96):
plane_concats = []
with tf.variable_scope("select_tower"):
with tf.variable_scope("input") as scope:
for plane in range(num_planes):
if plane == 1:
scope.reuse_variables()
print("Select tower (plane=%s)" % plane)
c = input_structure(input, plane)
with tf.variable_scope("conv4-ac"):
conv4 = conv_relu(c, size=1, in_depth=40, depth=50)
with tf.variable_scope("conv5"):
conv5 = conv_relu(conv4, size=1, in_depth=50, depth=32)
with tf.variable_scope("conv6"):
conv6 = conv_relu(conv5, size=3, in_depth=32, depth=4)
plane_concats.append(conv6)
with tf.variable_scope("convolution"):
out = tf.concat(plane_concats, 3)
with tf.variable_scope("conv7"):
conv7 = conv_relu(out, size=3, in_depth=num_planes*4, depth=480)
with tf.variable_scope("conv8"):
conv8 = conv_relu(conv7, size=3, in_depth=480, depth=480)
with tf.variable_scope("conv9_before_tanh"):
conv9 = conv_relu(conv8, size=3, in_depth=480, depth=num_planes)
tanh = tf.tanh(conv9)
# Some info summaries
tf.summary.scalar(conv9.op.name, tf.reduce_mean(tf.reduce_sum(conv9, [1, 2, 3])))
tf.summary.scalar(tanh.op.name, tf.reduce_mean(tf.reduce_sum(tanh, [1, 2, 3])))
softmaxes = []
#
# tanh[:, 0, 0, :]
# splits = tf.split(3, 96, tanh)
# split = 0
# for s in splits:
# q = tf.reshape(s, shape=(batch_size, 64))
# # softmax bias
# b = tf.get_variable("softmax_bias_%s" % split, [64], initializer=tf.constant_initializer(value=0.0))
# # do softmax
# nnaming = "softmax%s" % split
# probs = tf.nn.softmax(q + b, name=nnaming)
# #probs = tf.nn.softmax(q, name=nnaming)
# m = tf.reshape(probs, shape=[batch_size, 8, 8, 1])
# softmaxes.append(m)
# if s == 0:
# tf.summary.scalar("softmax_sum_topleft", tf.reduce_sum(probs))
#
# split += 1
# maxes = tf.concat(3, softmaxes, name="select_tower_out")
col_r = []
for i in range(8):
row_r = []
for j in range(8):
ind_name = "%s_%s" % (i, j)
#print ind_name
bias = tf.get_variable("softmax_bias_%s" % ind_name,
[num_planes], initializer=tf.constant_initializer(value=0.0))
subtensor = tanh[:, i, j, :]
row_r.append(tf.nn.softmax(subtensor + bias))
col_r.append(tf.concat([tf.expand_dims(row, 1) for row in row_r], 1))
result = tf.concat([tf.expand_dims(col, 1) for col in col_r], 1)
# view selection tower for first element in batch
extraction = tf.expand_dims(tf.transpose(result[0, :, :, :], perm=[2, 0, 1]), 3)
tf.summary.image("sample_0_10",
extraction,
max_outputs=num_planes)
return result
|
aTeK7/deep-stereo1.4 | dataset_preparation/test_posed.py | import numpy as np
np.set_printoptions(suppress=True)
import unittest
from kitti_camera import KittiCamera
class CameraParserTest(unittest.TestCase):
sequences_path = "/Volumes/Bahia/kitti-dataset/sequences"
calibration_path = "/Volumes/Bahia/kitti-dataset/calibration"
def test_posed_camera(self):
kittiCams = KittiCamera(self.calibration_path, "00")
camera0 = kittiCams.getCamera(0)
camera1 = kittiCams.getCamera(1)
camera2 = kittiCams.getCamera(2)
camera3 = kittiCams.getCamera(3)
print "Camera 0\n", camera0
print "Camera 1\n", camera1
print "Camera 2\n", camera2
print "Camera 3\n", camera3
print "Done Test posed cameras"
|
aTeK7/deep-stereo1.4 | dataset_preparation/ballet_camera.py | from reprojection.camera import Camera
import numpy as np
class BalletCamera(Camera):
def __init__(self, filename):
"""
[Image size:]
1024
768
[Rotation matrix:]
0.949462 0.046934 0.310324
-0.042337 0.998867 -0.021532
-0.310985 0.007308 0.950373
[Translation vector:]
-15.094651 0.189829 1.383263
[Calibration matrix:]
1918.270000 2.489820 494.085000
0.000000 1922.580000 447.736000
0.000000 0.000000 1.000000
[Projection matrix:]
1667.566036 96.129856 1064.796652 -28271.694034
-220.635449 1923.673772 384.119213 984.298081
-0.310985 0.007308 0.950373 1.383263
[Distortion parameters:]
1.383263
0.000000
0.000000
0.000000
"""
with open(filename, 'r') as f:
lines = f.readlines()
# Parse rotation
r1 = [float(i) for i in lines[5].split('\t')[0:3]]
r2 = [float(i) for i in lines[6].split('\t')[0:3]]
r3 = [float(i) for i in lines[7].split('\t')[0:3]]
rotation = np.array([r1, r2, r3])
# Parse intrinsics
k1 = [float(i) for i in lines[13].split('\t')[0:3]]
k2 = [float(i) for i in lines[14].split('\t')[0:3]]
k3 = [float(i) for i in lines[15].split('\t')[0:3]]
intrinsics = np.array([k1, k2, k3])
# Parse translation
translation = [float(i) for i in lines[10].split('\t')[0:3]]
super(BalletCamera, self).__init__(rotation, translation, intrinsics)
class DepthEncoder:
"""
To convert the intensity image into z or depth values use the following equation:
z(r,c) = 1.0/((P(r,c)/255.0)*(1.0/MinZ - 1.0/MaxZ) + 1.0/MaxZ);
where z(r,c) is the z or depth value (in x,y,z coordinates, the optical center of
the 5th camera is the origin of the world coordinates.)
P(r,c) is the intensity value and MinZ is 42.0 and MaxZ is 130.0.
"""
@staticmethod
def intensity_to_meters(intensity):
MinZ = 42.0
MaxZ = 130.0
meters = 1.0 / ((intensity/255.0) * (1.0/MinZ - 1.0/MaxZ) + 1.0/MaxZ)
return meters
@staticmethod
def meters_to_intensity(meters):
MinZ = 42.0
MaxZ = 130.0
intensity = (1.0/meters - 1.0/MaxZ) / (1.0/MinZ - 1.0/MaxZ) * 255.0
return intensity
@staticmethod
def test_metrics():
intensity = 10.0
meters = DepthEncoder.intensity_to_meters(intensity)
print "Intensity to meters: (%s) -> (%s meters)" % (intensity, meters)
intensity = DepthEncoder.meters_to_intensity(meters)
print "Meters to intensity: (%s meters) -> (%s)" % (meters, intensity)
|
aTeK7/deep-stereo1.4 | old/load_camera.py | import numpy as np
from enum import Enum
from glumpy import gloo, glm
import math
from glumpy.transforms.rotate import _rotation
np.set_printoptions(suppress=True)
class CameraRigType(Enum):
Left = "1-Left"
Virtual = "2-Virtual"
Right = "3-Right"
class Camera(object):
def __init__(self, intrinsics, rotation, translation, nx=1024, ny=768):
self.nx = nx
self.ny = ny
self.intrinsics = np.reshape(intrinsics, (3, 3))
self.rotation = np.reshape(rotation, (3, 3))
self.translation = translation
def get_principal_point(self):
return np.array([self.intrinsics[0, 2], self.intrinsics[1, 2]])
def get_focal_length(self):
return np.array([self.intrinsics[0, 0], self.intrinsics[1, 1]])
def get_skew(self):
return self.intrinsics[0, 1]
def get_extrinsics(self):
return np.concatenate((self.rotation, np.vstack(self.translation)), axis=1)
@property
def rotationX(self):
return math.atan2(self.rotation[2,1], self.rotation[2,2])
@property
def rotationY(self):
return math.atan2(-self.rotation[2,0], math.sqrt((self.rotation[2,1]**2)+(self.rotation[2,2]**2)))
@property
def rotationZ(self):
return math.atan2(self.rotation[1,0], self.rotation[0,0])
@property
def rotationAxis(self):
return np.array([self.rotationX, self.rotationY, self.rotationZ])
def camera_plane_at(self, depth=50.0):
#p = np.array([
# (-1, 1, d),
# (1, 1, d),
# (1, -1, d),
# (-1, -1, d),
# ], dtype=float)
center = self.get_principal_point()
CX = center[0]/2.0
CY = center[1]/2.0
#points = np.array([
# -CX, -CY, depth,
# self.nx - 1-CX, -CY, depth,
# self.nx - 1-CX, self.ny-1-CY, depth,
# -CX, self.ny-1-CY, depth
#])
points = np.array([
(-1, 1, depth),
(1, 1, depth),
(1, -1, depth),
(-1, -1, depth),
])
points = np.reshape(points, (4, 3))
# Face Normals
n = np.array([[0, 0, 1], [0, 0, 1]])
# Texture coords
t = np.array([
[1, 0],
[0, 0],
[0, 1],
[1, 1]
])
faces_p = [0, 1, 2, 3]
faces_n = [0, 0, 0, 0]
faces_t = [0, 1, 2, 3]
vtype = [('a_position', np.float32, 3),
('a_texcoord', np.float32, 2),
('a_normal', np.float32, 3)]
itype = np.uint32
vertices = np.zeros(4, vtype)
vertices['a_position'] = points[faces_p]
vertices['a_normal'] = n[faces_n]
vertices['a_texcoord'] = t[faces_t]
filled = np.resize(
np.array([0, 1, 2, 0, 2, 3], dtype=itype), 6 * (2 * 3))
filled += np.repeat(4 * np.arange(6, dtype=itype), 6)
vertices = vertices.view(gloo.VertexBuffer)
filled = filled.view(gloo.IndexBuffer)
return vertices, filled
def get_projection_matrix(self):
C = np.append(self.rotation, np.vstack(self.translation), axis=1)
return self.intrinsics.dot(C)
def get_intrinsic_opengl(self, near_clip=1, far_clip=100):
"""
:param near_clip: near_clip near clipping plane z-location, can be set arbitrarily > 0, controls the mapping of z-coordinates for OpenGL
:param far_clip: far_clip far clipping plane z-location, can be set arbitrarily > near_clip, controls the mapping of z-coordinate for OpenGL
:return:
"""
# alpha x-axis focal length, from camera intrinsic matrix
alpha = self.intrinsics[0,0]
# alpha y-axis focal length, from camera intrinsic matrix
beta = self.intrinsics[1,1]
# skew x and y axis skew, from camera intrinsic matrix
skew = self.intrinsics[0,1]
# u0 image origin x-coordinate, from camera intrinsic matrix
u0 = self.intrinsics[0,2]
# v0 image origin y-coordinate, from camera intrinsic matrix
v0 = self.intrinsics[1,2]
# These parameters define the final viewport that is rendered into by the camera.
L = 0
R = self.nx
B = 0
T = self.ny
viewport = np.array([L, B, R-L, T-B])
"""
construct an orthographic matrix which maps from projected
coordinates to normalized device coordinates in the range
[-1, 1]. OpenGL then maps coordinates in NDC to the current
"""
ortho = np.zeros((4, 4))
ortho[0, 0] = 2.0 / (R - L)
ortho[0, 3] = -(R + L) / (R - L)
ortho[1, 1] = 2.0 / (T - B)
ortho[1, 3] = -(T + B) / (T - B)
ortho[2, 2] = -2.0 / (far_clip - near_clip)
ortho[2, 3] = -(far_clip + near_clip) / (far_clip - near_clip)
ortho[3, 3] = 1.0
"""
construct a projection matrix, this is identical to the
projection matrix computed for the intrinsic, except an
additional row is inserted to map the z-coordinate to OpenGL.
"""
tproj = np.reshape([
alpha, skew, -u0, 0,
0, beta, -v0, 0,
0, 0, (near_clip+far_clip), near_clip*far_clip,
0, 0, -1.0, 0
], (4, 4))
"""
resulting OpenGL frustum is the product of the orthographic
mapping to normalized device coordinates and the augmented
camera intrinsic matrix
"""
frustum = ortho.dot(tproj)
return frustum
def get_extrinsic_opengl(self):
C = np.append(self.rotation, np.vstack(self.translation), axis=1)
return np.vstack([C, [0, 0, 0, 1]])
def projection_opengl(self, near_clip=1, far_clip=100):
return self.get_intrinsic_opengl(near_clip, far_clip).dot(self.get_extrinsic_opengl())
camera_0 = Camera(
intrinsics=np.array([
1918.270000, 2.489820, 494.085000,
0.000000, 1922.580000, 447.736000,
0.000000, 0.000000, 1.000000
]),
rotation=np.array([
0.949462, 0.046934, 0.310324,
-0.042337, 0.998867, -0.021532,
-0.310985, 0.007308, 0.950373,
]),
translation=np.array([-15.094651, 0.189829, 1.383263])
)
camera_1 = Camera(
intrinsics=np.array([
1913.690000, -0.143610, 533.307000,
0.000000, 1918.170000, 398.171000,
0.000000, 0.000000, 1.000000
]),
rotation=np.array([
0.972850, 0.010365, 0.231187,
-0.012981, 0.999864, 0.009794,
-0.231056, -0.012528, 0.972852,
]),
translation=np.array([-11.589320, -0.355771, 1.045534])
)
camera_2 = Camera(
intrinsics=np.array([
1914.070000, 0.343703, 564.645000,
0.000000, 1918.500000, 428.422000,
0.000000, 0.000000, 1.000000
]),
rotation=np.array([
0.989230, 0.003946, 0.146295,
-0.004391, 0.999983, 0.002724,
-0.146283, -0.003337, 0.989230,
]),
translation=np.array([-7.784865, -0.431597, 1.392058])
)
class CameraLoader(object):
def __init__(self):
self.selectedCamera = CameraRigType.Left
self.c_left = camera_0
self.c_virtual = camera_1
self.c_right = camera_2
# Save distance from virtual to cameras
v_left = self.c_virtual.translation - self.c_left.translation
v_right = self.c_virtual.translation - self.c_right.translation
print "Left distance vector:"
print v_left
print "Right distance vector:"
print v_right
# Make virtual camera origin of coords
self.c_virtual.translation = np.array([0, 0, 0])
self.c_left.translation = v_left
self.c_right.translation = v_right
# Align rotation of virtual camera with axis
rotation = self.c_virtual.rotationAxis
print "Virtual camera rotation [X,Y,Z] (radians):"
print rotation
def set_camera(self, cam):
print("Switch to camera (%s)" % cam)
self.camera = cam
def get_camera(self, cameraType, near_clip=1, far_clip=100):
if cameraType is CameraRigType.Left:
return self.c_left.projection_opengl(near_clip, far_clip)
elif cameraType is CameraRigType.Virtual:
return self.c_virtual.projection_opengl(near_clip, far_clip)
elif cameraType is CameraRigType.Right:
return self.c_right.projection_opengl(near_clip, far_clip)
raise AttributeError("Camera not found!")
def get_virtual_rotation(self):
return camera_1.rotation
def get_virtual_translation(self):
return camera_1.translation
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.