id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
13,600
noxdafox/pebble
pebble/pool/thread.py
worker_thread
def worker_thread(context): """The worker thread routines.""" queue = context.task_queue parameters = context.worker_parameters if parameters.initializer is not None: if not run_initializer(parameters.initializer, parameters.initargs): context.state = ERROR return for task in get_next_task(context, parameters.max_tasks): execute_next_task(task) queue.task_done()
python
def worker_thread(context): """The worker thread routines.""" queue = context.task_queue parameters = context.worker_parameters if parameters.initializer is not None: if not run_initializer(parameters.initializer, parameters.initargs): context.state = ERROR return for task in get_next_task(context, parameters.max_tasks): execute_next_task(task) queue.task_done()
[ "def", "worker_thread", "(", "context", ")", ":", "queue", "=", "context", ".", "task_queue", "parameters", "=", "context", ".", "worker_parameters", "if", "parameters", ".", "initializer", "is", "not", "None", ":", "if", "not", "run_initializer", "(", "parameters", ".", "initializer", ",", "parameters", ".", "initargs", ")", ":", "context", ".", "state", "=", "ERROR", "return", "for", "task", "in", "get_next_task", "(", "context", ",", "parameters", ".", "max_tasks", ")", ":", "execute_next_task", "(", "task", ")", "queue", ".", "task_done", "(", ")" ]
The worker thread routines.
[ "The", "worker", "thread", "routines", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/thread.py#L155-L167
13,601
noxdafox/pebble
pebble/common.py
stop_process
def stop_process(process): """Does its best to stop the process.""" process.terminate() process.join(3) if process.is_alive() and os.name != 'nt': try: os.kill(process.pid, signal.SIGKILL) process.join() except OSError: return if process.is_alive(): raise RuntimeError("Unable to terminate PID %d" % os.getpid())
python
def stop_process(process): """Does its best to stop the process.""" process.terminate() process.join(3) if process.is_alive() and os.name != 'nt': try: os.kill(process.pid, signal.SIGKILL) process.join() except OSError: return if process.is_alive(): raise RuntimeError("Unable to terminate PID %d" % os.getpid())
[ "def", "stop_process", "(", "process", ")", ":", "process", ".", "terminate", "(", ")", "process", ".", "join", "(", "3", ")", "if", "process", ".", "is_alive", "(", ")", "and", "os", ".", "name", "!=", "'nt'", ":", "try", ":", "os", ".", "kill", "(", "process", ".", "pid", ",", "signal", ".", "SIGKILL", ")", "process", ".", "join", "(", ")", "except", "OSError", ":", "return", "if", "process", ".", "is_alive", "(", ")", ":", "raise", "RuntimeError", "(", "\"Unable to terminate PID %d\"", "%", "os", ".", "getpid", "(", ")", ")" ]
Does its best to stop the process.
[ "Does", "its", "best", "to", "stop", "the", "process", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/common.py#L143-L156
13,602
noxdafox/pebble
pebble/common.py
send_result
def send_result(pipe, data): """Send result handling pickling and communication errors.""" try: pipe.send(data) except (pickle.PicklingError, TypeError) as error: error.traceback = format_exc() pipe.send(RemoteException(error, error.traceback))
python
def send_result(pipe, data): """Send result handling pickling and communication errors.""" try: pipe.send(data) except (pickle.PicklingError, TypeError) as error: error.traceback = format_exc() pipe.send(RemoteException(error, error.traceback))
[ "def", "send_result", "(", "pipe", ",", "data", ")", ":", "try", ":", "pipe", ".", "send", "(", "data", ")", "except", "(", "pickle", ".", "PicklingError", ",", "TypeError", ")", "as", "error", ":", "error", ".", "traceback", "=", "format_exc", "(", ")", "pipe", ".", "send", "(", "RemoteException", "(", "error", ",", "error", ".", "traceback", ")", ")" ]
Send result handling pickling and communication errors.
[ "Send", "result", "handling", "pickling", "and", "communication", "errors", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/common.py#L177-L183
13,603
noxdafox/pebble
pebble/concurrent/process.py
process
def process(*args, **kwargs): """Runs the decorated function in a concurrent process, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The timeout parameter will set a maximum execution time for the decorated function. If the execution exceeds the timeout, the process will be stopped and the Future will raise TimeoutError. """ timeout = kwargs.get('timeout') # decorator without parameters if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _process_wrapper(args[0], timeout) else: # decorator with parameters if timeout is not None and not isinstance(timeout, (int, float)): raise TypeError('Timeout expected to be None or integer or float') def decorating_function(function): return _process_wrapper(function, timeout) return decorating_function
python
def process(*args, **kwargs): """Runs the decorated function in a concurrent process, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The timeout parameter will set a maximum execution time for the decorated function. If the execution exceeds the timeout, the process will be stopped and the Future will raise TimeoutError. """ timeout = kwargs.get('timeout') # decorator without parameters if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _process_wrapper(args[0], timeout) else: # decorator with parameters if timeout is not None and not isinstance(timeout, (int, float)): raise TypeError('Timeout expected to be None or integer or float') def decorating_function(function): return _process_wrapper(function, timeout) return decorating_function
[ "def", "process", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ")", "# decorator without parameters", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "return", "_process_wrapper", "(", "args", "[", "0", "]", ",", "timeout", ")", "else", ":", "# decorator with parameters", "if", "timeout", "is", "not", "None", "and", "not", "isinstance", "(", "timeout", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "'Timeout expected to be None or integer or float'", ")", "def", "decorating_function", "(", "function", ")", ":", "return", "_process_wrapper", "(", "function", ",", "timeout", ")", "return", "decorating_function" ]
Runs the decorated function in a concurrent process, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The timeout parameter will set a maximum execution time for the decorated function. If the execution exceeds the timeout, the process will be stopped and the Future will raise TimeoutError.
[ "Runs", "the", "decorated", "function", "in", "a", "concurrent", "process", "taking", "care", "of", "the", "result", "and", "error", "management", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L36-L61
13,604
noxdafox/pebble
pebble/concurrent/process.py
_worker_handler
def _worker_handler(future, worker, pipe, timeout): """Worker lifecycle manager. Waits for the worker to be perform its task, collects result, runs the callback and cleans up the process. """ result = _get_result(future, pipe, timeout) if isinstance(result, BaseException): if isinstance(result, ProcessExpired): result.exitcode = worker.exitcode future.set_exception(result) else: future.set_result(result) if worker.is_alive(): stop_process(worker)
python
def _worker_handler(future, worker, pipe, timeout): """Worker lifecycle manager. Waits for the worker to be perform its task, collects result, runs the callback and cleans up the process. """ result = _get_result(future, pipe, timeout) if isinstance(result, BaseException): if isinstance(result, ProcessExpired): result.exitcode = worker.exitcode future.set_exception(result) else: future.set_result(result) if worker.is_alive(): stop_process(worker)
[ "def", "_worker_handler", "(", "future", ",", "worker", ",", "pipe", ",", "timeout", ")", ":", "result", "=", "_get_result", "(", "future", ",", "pipe", ",", "timeout", ")", "if", "isinstance", "(", "result", ",", "BaseException", ")", ":", "if", "isinstance", "(", "result", ",", "ProcessExpired", ")", ":", "result", ".", "exitcode", "=", "worker", ".", "exitcode", "future", ".", "set_exception", "(", "result", ")", "else", ":", "future", ".", "set_result", "(", "result", ")", "if", "worker", ".", "is_alive", "(", ")", ":", "stop_process", "(", "worker", ")" ]
Worker lifecycle manager. Waits for the worker to be perform its task, collects result, runs the callback and cleans up the process.
[ "Worker", "lifecycle", "manager", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L92-L110
13,605
noxdafox/pebble
pebble/concurrent/process.py
_function_handler
def _function_handler(function, args, kwargs, pipe): """Runs the actual function in separate process and returns its result.""" signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result)
python
def _function_handler(function, args, kwargs, pipe): """Runs the actual function in separate process and returns its result.""" signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result)
[ "def", "_function_handler", "(", "function", ",", "args", ",", "kwargs", ",", "pipe", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "result", "=", "process_execute", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "send_result", "(", "pipe", ",", "result", ")" ]
Runs the actual function in separate process and returns its result.
[ "Runs", "the", "actual", "function", "in", "separate", "process", "and", "returns", "its", "result", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L113-L119
13,606
noxdafox/pebble
pebble/concurrent/process.py
_get_result
def _get_result(future, pipe, timeout): """Waits for result and handles communication errors.""" counter = count(step=SLEEP_UNIT) try: while not pipe.poll(SLEEP_UNIT): if timeout is not None and next(counter) >= timeout: return TimeoutError('Task Timeout', timeout) elif future.cancelled(): return CancelledError() return pipe.recv() except (EOFError, OSError): return ProcessExpired('Abnormal termination') except Exception as error: return error
python
def _get_result(future, pipe, timeout): """Waits for result and handles communication errors.""" counter = count(step=SLEEP_UNIT) try: while not pipe.poll(SLEEP_UNIT): if timeout is not None and next(counter) >= timeout: return TimeoutError('Task Timeout', timeout) elif future.cancelled(): return CancelledError() return pipe.recv() except (EOFError, OSError): return ProcessExpired('Abnormal termination') except Exception as error: return error
[ "def", "_get_result", "(", "future", ",", "pipe", ",", "timeout", ")", ":", "counter", "=", "count", "(", "step", "=", "SLEEP_UNIT", ")", "try", ":", "while", "not", "pipe", ".", "poll", "(", "SLEEP_UNIT", ")", ":", "if", "timeout", "is", "not", "None", "and", "next", "(", "counter", ")", ">=", "timeout", ":", "return", "TimeoutError", "(", "'Task Timeout'", ",", "timeout", ")", "elif", "future", ".", "cancelled", "(", ")", ":", "return", "CancelledError", "(", ")", "return", "pipe", ".", "recv", "(", ")", "except", "(", "EOFError", ",", "OSError", ")", ":", "return", "ProcessExpired", "(", "'Abnormal termination'", ")", "except", "Exception", "as", "error", ":", "return", "error" ]
Waits for result and handles communication errors.
[ "Waits", "for", "result", "and", "handles", "communication", "errors", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L122-L137
13,607
noxdafox/pebble
pebble/concurrent/process.py
_trampoline
def _trampoline(name, module, *args, **kwargs): """Trampoline function for decorators. Lookups the function between the registered ones; if not found, forces its registering and then executes it. """ function = _function_lookup(name, module) return function(*args, **kwargs)
python
def _trampoline(name, module, *args, **kwargs): """Trampoline function for decorators. Lookups the function between the registered ones; if not found, forces its registering and then executes it. """ function = _function_lookup(name, module) return function(*args, **kwargs)
[ "def", "_trampoline", "(", "name", ",", "module", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "function", "=", "_function_lookup", "(", "name", ",", "module", ")", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Trampoline function for decorators. Lookups the function between the registered ones; if not found, forces its registering and then executes it.
[ "Trampoline", "function", "for", "decorators", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L152-L161
13,608
noxdafox/pebble
pebble/concurrent/process.py
_function_lookup
def _function_lookup(name, module): """Searches the function between the registered ones. If not found, it imports the module forcing its registration. """ try: return _registered_functions[name] except KeyError: # force function registering __import__(module) mod = sys.modules[module] getattr(mod, name) return _registered_functions[name]
python
def _function_lookup(name, module): """Searches the function between the registered ones. If not found, it imports the module forcing its registration. """ try: return _registered_functions[name] except KeyError: # force function registering __import__(module) mod = sys.modules[module] getattr(mod, name) return _registered_functions[name]
[ "def", "_function_lookup", "(", "name", ",", "module", ")", ":", "try", ":", "return", "_registered_functions", "[", "name", "]", "except", "KeyError", ":", "# force function registering", "__import__", "(", "module", ")", "mod", "=", "sys", ".", "modules", "[", "module", "]", "getattr", "(", "mod", ",", "name", ")", "return", "_registered_functions", "[", "name", "]" ]
Searches the function between the registered ones. If not found, it imports the module forcing its registration.
[ "Searches", "the", "function", "between", "the", "registered", "ones", ".", "If", "not", "found", "it", "imports", "the", "module", "forcing", "its", "registration", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/process.py#L164-L176
13,609
noxdafox/pebble
pebble/pool/process.py
worker_process
def worker_process(params, channel): """The worker process routines.""" signal(SIGINT, SIG_IGN) if params.initializer is not None: if not run_initializer(params.initializer, params.initargs): os._exit(1) try: for task in worker_get_next_task(channel, params.max_tasks): payload = task.payload result = process_execute( payload.function, *payload.args, **payload.kwargs) send_result(channel, Result(task.id, result)) except (EnvironmentError, OSError, RuntimeError) as error: os._exit(error.errno if error.errno else 1) except EOFError: os._exit(0)
python
def worker_process(params, channel): """The worker process routines.""" signal(SIGINT, SIG_IGN) if params.initializer is not None: if not run_initializer(params.initializer, params.initargs): os._exit(1) try: for task in worker_get_next_task(channel, params.max_tasks): payload = task.payload result = process_execute( payload.function, *payload.args, **payload.kwargs) send_result(channel, Result(task.id, result)) except (EnvironmentError, OSError, RuntimeError) as error: os._exit(error.errno if error.errno else 1) except EOFError: os._exit(0)
[ "def", "worker_process", "(", "params", ",", "channel", ")", ":", "signal", "(", "SIGINT", ",", "SIG_IGN", ")", "if", "params", ".", "initializer", "is", "not", "None", ":", "if", "not", "run_initializer", "(", "params", ".", "initializer", ",", "params", ".", "initargs", ")", ":", "os", ".", "_exit", "(", "1", ")", "try", ":", "for", "task", "in", "worker_get_next_task", "(", "channel", ",", "params", ".", "max_tasks", ")", ":", "payload", "=", "task", ".", "payload", "result", "=", "process_execute", "(", "payload", ".", "function", ",", "*", "payload", ".", "args", ",", "*", "*", "payload", ".", "kwargs", ")", "send_result", "(", "channel", ",", "Result", "(", "task", ".", "id", ",", "result", ")", ")", "except", "(", "EnvironmentError", ",", "OSError", ",", "RuntimeError", ")", "as", "error", ":", "os", ".", "_exit", "(", "error", ".", "errno", "if", "error", ".", "errno", "else", "1", ")", "except", "EOFError", ":", "os", ".", "_exit", "(", "0", ")" ]
The worker process routines.
[ "The", "worker", "process", "routines", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L375-L392
13,610
noxdafox/pebble
pebble/pool/process.py
task_transaction
def task_transaction(channel): """Ensures a task is fetched and acknowledged atomically.""" with channel.lock: if channel.poll(0): task = channel.recv() channel.send(Acknowledgement(os.getpid(), task.id)) else: raise RuntimeError("Race condition between workers") return task
python
def task_transaction(channel): """Ensures a task is fetched and acknowledged atomically.""" with channel.lock: if channel.poll(0): task = channel.recv() channel.send(Acknowledgement(os.getpid(), task.id)) else: raise RuntimeError("Race condition between workers") return task
[ "def", "task_transaction", "(", "channel", ")", ":", "with", "channel", ".", "lock", ":", "if", "channel", ".", "poll", "(", "0", ")", ":", "task", "=", "channel", ".", "recv", "(", ")", "channel", ".", "send", "(", "Acknowledgement", "(", "os", ".", "getpid", "(", ")", ",", "task", ".", "id", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Race condition between workers\"", ")", "return", "task" ]
Ensures a task is fetched and acknowledged atomically.
[ "Ensures", "a", "task", "is", "fetched", "and", "acknowledged", "atomically", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L410-L419
13,611
noxdafox/pebble
pebble/pool/process.py
PoolManager.schedule
def schedule(self, task): """Schedules a new Task in the PoolManager.""" self.task_manager.register(task) self.worker_manager.dispatch(task)
python
def schedule(self, task): """Schedules a new Task in the PoolManager.""" self.task_manager.register(task) self.worker_manager.dispatch(task)
[ "def", "schedule", "(", "self", ",", "task", ")", ":", "self", ".", "task_manager", ".", "register", "(", "task", ")", "self", ".", "worker_manager", ".", "dispatch", "(", "task", ")" ]
Schedules a new Task in the PoolManager.
[ "Schedules", "a", "new", "Task", "in", "the", "PoolManager", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L194-L197
13,612
noxdafox/pebble
pebble/pool/process.py
PoolManager.process_next_message
def process_next_message(self, timeout): """Processes the next message coming from the workers.""" message = self.worker_manager.receive(timeout) if isinstance(message, Acknowledgement): self.task_manager.task_start(message.task, message.worker) elif isinstance(message, Result): self.task_manager.task_done(message.task, message.result)
python
def process_next_message(self, timeout): """Processes the next message coming from the workers.""" message = self.worker_manager.receive(timeout) if isinstance(message, Acknowledgement): self.task_manager.task_start(message.task, message.worker) elif isinstance(message, Result): self.task_manager.task_done(message.task, message.result)
[ "def", "process_next_message", "(", "self", ",", "timeout", ")", ":", "message", "=", "self", ".", "worker_manager", ".", "receive", "(", "timeout", ")", "if", "isinstance", "(", "message", ",", "Acknowledgement", ")", ":", "self", ".", "task_manager", ".", "task_start", "(", "message", ".", "task", ",", "message", ".", "worker", ")", "elif", "isinstance", "(", "message", ",", "Result", ")", ":", "self", ".", "task_manager", ".", "task_done", "(", "message", ".", "task", ",", "message", ".", "result", ")" ]
Processes the next message coming from the workers.
[ "Processes", "the", "next", "message", "coming", "from", "the", "workers", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L199-L206
13,613
noxdafox/pebble
pebble/pool/process.py
PoolManager.update_tasks
def update_tasks(self): """Handles timing out Tasks.""" for task in self.task_manager.timeout_tasks(): self.task_manager.task_done( task.id, TimeoutError("Task timeout", task.timeout)) self.worker_manager.stop_worker(task.worker_id) for task in self.task_manager.cancelled_tasks(): self.task_manager.task_done( task.id, CancelledError()) self.worker_manager.stop_worker(task.worker_id)
python
def update_tasks(self): """Handles timing out Tasks.""" for task in self.task_manager.timeout_tasks(): self.task_manager.task_done( task.id, TimeoutError("Task timeout", task.timeout)) self.worker_manager.stop_worker(task.worker_id) for task in self.task_manager.cancelled_tasks(): self.task_manager.task_done( task.id, CancelledError()) self.worker_manager.stop_worker(task.worker_id)
[ "def", "update_tasks", "(", "self", ")", ":", "for", "task", "in", "self", ".", "task_manager", ".", "timeout_tasks", "(", ")", ":", "self", ".", "task_manager", ".", "task_done", "(", "task", ".", "id", ",", "TimeoutError", "(", "\"Task timeout\"", ",", "task", ".", "timeout", ")", ")", "self", ".", "worker_manager", ".", "stop_worker", "(", "task", ".", "worker_id", ")", "for", "task", "in", "self", ".", "task_manager", ".", "cancelled_tasks", "(", ")", ":", "self", ".", "task_manager", ".", "task_done", "(", "task", ".", "id", ",", "CancelledError", "(", ")", ")", "self", ".", "worker_manager", ".", "stop_worker", "(", "task", ".", "worker_id", ")" ]
Handles timing out Tasks.
[ "Handles", "timing", "out", "Tasks", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L212-L222
13,614
noxdafox/pebble
pebble/pool/process.py
PoolManager.update_workers
def update_workers(self): """Handles unexpected processes termination.""" for expiration in self.worker_manager.inspect_workers(): self.handle_worker_expiration(expiration) self.worker_manager.create_workers()
python
def update_workers(self): """Handles unexpected processes termination.""" for expiration in self.worker_manager.inspect_workers(): self.handle_worker_expiration(expiration) self.worker_manager.create_workers()
[ "def", "update_workers", "(", "self", ")", ":", "for", "expiration", "in", "self", ".", "worker_manager", ".", "inspect_workers", "(", ")", ":", "self", ".", "handle_worker_expiration", "(", "expiration", ")", "self", ".", "worker_manager", ".", "create_workers", "(", ")" ]
Handles unexpected processes termination.
[ "Handles", "unexpected", "processes", "termination", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L224-L229
13,615
noxdafox/pebble
pebble/pool/process.py
TaskManager.task_done
def task_done(self, task_id, result): """Set the tasks result and run the callback.""" try: task = self.tasks.pop(task_id) except KeyError: return # result of previously timeout Task else: if task.future.cancelled(): task.set_running_or_notify_cancel() elif isinstance(result, BaseException): task.future.set_exception(result) else: task.future.set_result(result) self.task_done_callback()
python
def task_done(self, task_id, result): """Set the tasks result and run the callback.""" try: task = self.tasks.pop(task_id) except KeyError: return # result of previously timeout Task else: if task.future.cancelled(): task.set_running_or_notify_cancel() elif isinstance(result, BaseException): task.future.set_exception(result) else: task.future.set_result(result) self.task_done_callback()
[ "def", "task_done", "(", "self", ",", "task_id", ",", "result", ")", ":", "try", ":", "task", "=", "self", ".", "tasks", ".", "pop", "(", "task_id", ")", "except", "KeyError", ":", "return", "# result of previously timeout Task", "else", ":", "if", "task", ".", "future", ".", "cancelled", "(", ")", ":", "task", ".", "set_running_or_notify_cancel", "(", ")", "elif", "isinstance", "(", "result", ",", "BaseException", ")", ":", "task", ".", "future", ".", "set_exception", "(", "result", ")", "else", ":", "task", ".", "future", ".", "set_result", "(", "result", ")", "self", ".", "task_done_callback", "(", ")" ]
Set the tasks result and run the callback.
[ "Set", "the", "tasks", "result", "and", "run", "the", "callback", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L271-L285
13,616
noxdafox/pebble
pebble/pool/process.py
WorkerManager.inspect_workers
def inspect_workers(self): """Updates the workers status. Returns the workers which have unexpectedly ended. """ workers = tuple(self.workers.values()) expired = tuple(w for w in workers if not w.is_alive()) for worker in expired: self.workers.pop(worker.pid) return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0)
python
def inspect_workers(self): """Updates the workers status. Returns the workers which have unexpectedly ended. """ workers = tuple(self.workers.values()) expired = tuple(w for w in workers if not w.is_alive()) for worker in expired: self.workers.pop(worker.pid) return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0)
[ "def", "inspect_workers", "(", "self", ")", ":", "workers", "=", "tuple", "(", "self", ".", "workers", ".", "values", "(", ")", ")", "expired", "=", "tuple", "(", "w", "for", "w", "in", "workers", "if", "not", "w", ".", "is_alive", "(", ")", ")", "for", "worker", "in", "expired", ":", "self", ".", "workers", ".", "pop", "(", "worker", ".", "pid", ")", "return", "(", "(", "w", ".", "pid", ",", "w", ".", "exitcode", ")", "for", "w", "in", "expired", "if", "w", ".", "exitcode", "!=", "0", ")" ]
Updates the workers status. Returns the workers which have unexpectedly ended.
[ "Updates", "the", "workers", "status", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/process.py#L328-L340
13,617
noxdafox/pebble
pebble/pool/base_pool.py
iter_chunks
def iter_chunks(chunksize, *iterables): """Iterates over zipped iterables in chunks.""" iterables = iter(zip(*iterables)) while 1: chunk = tuple(islice(iterables, chunksize)) if not chunk: return yield chunk
python
def iter_chunks(chunksize, *iterables): """Iterates over zipped iterables in chunks.""" iterables = iter(zip(*iterables)) while 1: chunk = tuple(islice(iterables, chunksize)) if not chunk: return yield chunk
[ "def", "iter_chunks", "(", "chunksize", ",", "*", "iterables", ")", ":", "iterables", "=", "iter", "(", "zip", "(", "*", "iterables", ")", ")", "while", "1", ":", "chunk", "=", "tuple", "(", "islice", "(", "iterables", ",", "chunksize", ")", ")", "if", "not", "chunk", ":", "return", "yield", "chunk" ]
Iterates over zipped iterables in chunks.
[ "Iterates", "over", "zipped", "iterables", "in", "chunks", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/base_pool.py#L218-L228
13,618
noxdafox/pebble
pebble/pool/base_pool.py
run_initializer
def run_initializer(initializer, initargs): """Runs the Pool initializer dealing with errors.""" try: initializer(*initargs) return True except Exception as error: logging.exception(error) return False
python
def run_initializer(initializer, initargs): """Runs the Pool initializer dealing with errors.""" try: initializer(*initargs) return True except Exception as error: logging.exception(error) return False
[ "def", "run_initializer", "(", "initializer", ",", "initargs", ")", ":", "try", ":", "initializer", "(", "*", "initargs", ")", "return", "True", "except", "Exception", "as", "error", ":", "logging", ".", "exception", "(", "error", ")", "return", "False" ]
Runs the Pool initializer dealing with errors.
[ "Runs", "the", "Pool", "initializer", "dealing", "with", "errors", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/base_pool.py#L239-L246
13,619
noxdafox/pebble
pebble/pool/base_pool.py
BasePool.join
def join(self, timeout=None): """Joins the pool waiting until all workers exited. If *timeout* is set, it block until all workers are done or raises TimeoutError. """ if self._context.state == RUNNING: raise RuntimeError('The Pool is still running') if self._context.state == CLOSED: self._wait_queue_depletion(timeout) self.stop() self.join() else: self._context.task_queue.put(None) self._stop_pool()
python
def join(self, timeout=None): """Joins the pool waiting until all workers exited. If *timeout* is set, it block until all workers are done or raises TimeoutError. """ if self._context.state == RUNNING: raise RuntimeError('The Pool is still running') if self._context.state == CLOSED: self._wait_queue_depletion(timeout) self.stop() self.join() else: self._context.task_queue.put(None) self._stop_pool()
[ "def", "join", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "_context", ".", "state", "==", "RUNNING", ":", "raise", "RuntimeError", "(", "'The Pool is still running'", ")", "if", "self", ".", "_context", ".", "state", "==", "CLOSED", ":", "self", ".", "_wait_queue_depletion", "(", "timeout", ")", "self", ".", "stop", "(", ")", "self", ".", "join", "(", ")", "else", ":", "self", ".", "_context", ".", "task_queue", ".", "put", "(", "None", ")", "self", ".", "_stop_pool", "(", ")" ]
Joins the pool waiting until all workers exited. If *timeout* is set, it block until all workers are done or raises TimeoutError.
[ "Joins", "the", "pool", "waiting", "until", "all", "workers", "exited", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/pool/base_pool.py#L63-L77
13,620
noxdafox/pebble
pebble/concurrent/thread.py
thread
def thread(function): """Runs the decorated function within a concurrent thread, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. """ @wraps(function) def wrapper(*args, **kwargs): future = Future() launch_thread(_function_handler, function, args, kwargs, future) return future return wrapper
python
def thread(function): """Runs the decorated function within a concurrent thread, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. """ @wraps(function) def wrapper(*args, **kwargs): future = Future() launch_thread(_function_handler, function, args, kwargs, future) return future return wrapper
[ "def", "thread", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "future", "=", "Future", "(", ")", "launch_thread", "(", "_function_handler", ",", "function", ",", "args", ",", "kwargs", ",", "future", ")", "return", "future", "return", "wrapper" ]
Runs the decorated function within a concurrent thread, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called.
[ "Runs", "the", "decorated", "function", "within", "a", "concurrent", "thread", "taking", "care", "of", "the", "result", "and", "error", "management", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/thread.py#L24-L40
13,621
noxdafox/pebble
pebble/concurrent/thread.py
_function_handler
def _function_handler(function, args, kwargs, future): """Runs the actual function in separate thread and returns its result.""" future.set_running_or_notify_cancel() try: result = function(*args, **kwargs) except BaseException as error: error.traceback = format_exc() future.set_exception(error) else: future.set_result(result)
python
def _function_handler(function, args, kwargs, future): """Runs the actual function in separate thread and returns its result.""" future.set_running_or_notify_cancel() try: result = function(*args, **kwargs) except BaseException as error: error.traceback = format_exc() future.set_exception(error) else: future.set_result(result)
[ "def", "_function_handler", "(", "function", ",", "args", ",", "kwargs", ",", "future", ")", ":", "future", ".", "set_running_or_notify_cancel", "(", ")", "try", ":", "result", "=", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "BaseException", "as", "error", ":", "error", ".", "traceback", "=", "format_exc", "(", ")", "future", ".", "set_exception", "(", "error", ")", "else", ":", "future", ".", "set_result", "(", "result", ")" ]
Runs the actual function in separate thread and returns its result.
[ "Runs", "the", "actual", "function", "in", "separate", "thread", "and", "returns", "its", "result", "." ]
d8f3d989655715754f0a65d7419cfa584491f614
https://github.com/noxdafox/pebble/blob/d8f3d989655715754f0a65d7419cfa584491f614/pebble/concurrent/thread.py#L43-L53
13,622
SwoopSearch/pyaddress
address/address.py
create_cities_csv
def create_cities_csv(filename="places2k.txt", output="cities.csv"): """ Takes the places2k.txt from USPS and creates a simple file of all cities. """ with open(filename, 'r') as city_file: with open(output, 'w') as out: for line in city_file: # Drop Puerto Rico (just looking for the 50 states) if line[0:2] == "PR": continue # Per census.gov, characters 9-72 are the name of the city or place. Cut ,off the last part, which is city, town, etc. # print " ".join(line[9:72].split()[:-1]) out.write(" ".join(line[9:72].split()[:-1]) + '\n')
python
def create_cities_csv(filename="places2k.txt", output="cities.csv"): """ Takes the places2k.txt from USPS and creates a simple file of all cities. """ with open(filename, 'r') as city_file: with open(output, 'w') as out: for line in city_file: # Drop Puerto Rico (just looking for the 50 states) if line[0:2] == "PR": continue # Per census.gov, characters 9-72 are the name of the city or place. Cut ,off the last part, which is city, town, etc. # print " ".join(line[9:72].split()[:-1]) out.write(" ".join(line[9:72].split()[:-1]) + '\n')
[ "def", "create_cities_csv", "(", "filename", "=", "\"places2k.txt\"", ",", "output", "=", "\"cities.csv\"", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "city_file", ":", "with", "open", "(", "output", ",", "'w'", ")", "as", "out", ":", "for", "line", "in", "city_file", ":", "# Drop Puerto Rico (just looking for the 50 states)", "if", "line", "[", "0", ":", "2", "]", "==", "\"PR\"", ":", "continue", "# Per census.gov, characters 9-72 are the name of the city or place. Cut ,off the last part, which is city, town, etc.", "# print \" \".join(line[9:72].split()[:-1])", "out", ".", "write", "(", "\" \"", ".", "join", "(", "line", "[", "9", ":", "72", "]", ".", "split", "(", ")", "[", ":", "-", "1", "]", ")", "+", "'\\n'", ")" ]
Takes the places2k.txt from USPS and creates a simple file of all cities.
[ "Takes", "the", "places2k", ".", "txt", "from", "USPS", "and", "creates", "a", "simple", "file", "of", "all", "cities", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L674-L686
13,623
SwoopSearch/pyaddress
address/address.py
AddressParser.parse_address
def parse_address(self, address, line_number=-1): """ Return an Address object from the given address. Passes itself to the Address constructor to use all the custom loaded suffixes, cities, etc. """ return Address(address, self, line_number, self.logger)
python
def parse_address(self, address, line_number=-1): """ Return an Address object from the given address. Passes itself to the Address constructor to use all the custom loaded suffixes, cities, etc. """ return Address(address, self, line_number, self.logger)
[ "def", "parse_address", "(", "self", ",", "address", ",", "line_number", "=", "-", "1", ")", ":", "return", "Address", "(", "address", ",", "self", ",", "line_number", ",", "self", ".", "logger", ")" ]
Return an Address object from the given address. Passes itself to the Address constructor to use all the custom loaded suffixes, cities, etc.
[ "Return", "an", "Address", "object", "from", "the", "given", "address", ".", "Passes", "itself", "to", "the", "Address", "constructor", "to", "use", "all", "the", "custom", "loaded", "suffixes", "cities", "etc", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L83-L88
13,624
SwoopSearch/pyaddress
address/address.py
AddressParser.load_cities
def load_cities(self, filename): """ Load up all cities in lowercase for easier matching. The file should have one city per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy. """ with open(filename, 'r') as f: for line in f: self.cities.append(line.strip().lower())
python
def load_cities(self, filename): """ Load up all cities in lowercase for easier matching. The file should have one city per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy. """ with open(filename, 'r') as f: for line in f: self.cities.append(line.strip().lower())
[ "def", "load_cities", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "self", ".", "cities", ".", "append", "(", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", ")" ]
Load up all cities in lowercase for easier matching. The file should have one city per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy.
[ "Load", "up", "all", "cities", "in", "lowercase", "for", "easier", "matching", ".", "The", "file", "should", "have", "one", "city", "per", "line", "with", "no", "extra", "characters", ".", "This", "isn", "t", "strictly", "required", "but", "will", "vastly", "increase", "the", "accuracy", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L128-L135
13,625
SwoopSearch/pyaddress
address/address.py
AddressParser.load_streets
def load_streets(self, filename): """ Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy. """ with open(filename, 'r') as f: for line in f: self.streets.append(line.strip().lower())
python
def load_streets(self, filename): """ Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy. """ with open(filename, 'r') as f: for line in f: self.streets.append(line.strip().lower())
[ "def", "load_streets", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "self", ".", "streets", ".", "append", "(", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", ")" ]
Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra characters. This isn't strictly required, but will vastly increase the accuracy.
[ "Load", "up", "all", "streets", "in", "lowercase", "for", "easier", "matching", ".", "The", "file", "should", "have", "one", "street", "per", "line", "with", "no", "extra", "characters", ".", "This", "isn", "t", "strictly", "required", "but", "will", "vastly", "increase", "the", "accuracy", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L137-L144
13,626
SwoopSearch/pyaddress
address/address.py
Address.preprocess_address
def preprocess_address(self, address): """ Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the rest of the parsing, and return the cleaned address. """ # Run some basic cleaning address = address.replace("# ", "#") address = address.replace(" & ", "&") # Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now. if re.search(r"-?-?\w+ units", address, re.IGNORECASE): address = re.sub(r"-?-?\w+ units", "", address, flags=re.IGNORECASE) # Sometimes buildings are put in parantheses. # building_match = re.search(r"\(.*\)", address, re.IGNORECASE) # if building_match: # self.building = self._clean(building_match.group().replace('(', '').replace(')', '')) # address = re.sub(r"\(.*\)", "", address, flags=re.IGNORECASE) # Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from # the address. This prevents things like "Unit" being the street name. apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+', r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*', r'style\s\w{1,2}', r'townhouse style\s\w{1,2}'] for regex in apartment_regexes: apartment_match = re.search(regex, address, re.IGNORECASE) if apartment_match: # print "Matched regex: ", regex, apartment_match.group() self.apartment = self._clean(apartment_match.group()) address = re.sub(regex, "", address, flags=re.IGNORECASE) # Now check for things like ", ," which throw off dstk address = re.sub(r"\,\s*\,", ",", address) return address
python
def preprocess_address(self, address): """ Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the rest of the parsing, and return the cleaned address. """ # Run some basic cleaning address = address.replace("# ", "#") address = address.replace(" & ", "&") # Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now. if re.search(r"-?-?\w+ units", address, re.IGNORECASE): address = re.sub(r"-?-?\w+ units", "", address, flags=re.IGNORECASE) # Sometimes buildings are put in parantheses. # building_match = re.search(r"\(.*\)", address, re.IGNORECASE) # if building_match: # self.building = self._clean(building_match.group().replace('(', '').replace(')', '')) # address = re.sub(r"\(.*\)", "", address, flags=re.IGNORECASE) # Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from # the address. This prevents things like "Unit" being the street name. apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+', r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*', r'style\s\w{1,2}', r'townhouse style\s\w{1,2}'] for regex in apartment_regexes: apartment_match = re.search(regex, address, re.IGNORECASE) if apartment_match: # print "Matched regex: ", regex, apartment_match.group() self.apartment = self._clean(apartment_match.group()) address = re.sub(regex, "", address, flags=re.IGNORECASE) # Now check for things like ", ," which throw off dstk address = re.sub(r"\,\s*\,", ",", address) return address
[ "def", "preprocess_address", "(", "self", ",", "address", ")", ":", "# Run some basic cleaning", "address", "=", "address", ".", "replace", "(", "\"# \"", ",", "\"#\"", ")", "address", "=", "address", ".", "replace", "(", "\" & \"", ",", "\"&\"", ")", "# Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now.", "if", "re", ".", "search", "(", "r\"-?-?\\w+ units\"", ",", "address", ",", "re", ".", "IGNORECASE", ")", ":", "address", "=", "re", ".", "sub", "(", "r\"-?-?\\w+ units\"", ",", "\"\"", ",", "address", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "# Sometimes buildings are put in parantheses.", "# building_match = re.search(r\"\\(.*\\)\", address, re.IGNORECASE)", "# if building_match:", "# self.building = self._clean(building_match.group().replace('(', '').replace(')', ''))", "# address = re.sub(r\"\\(.*\\)\", \"\", address, flags=re.IGNORECASE)", "# Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from", "# the address. This prevents things like \"Unit\" being the street name.", "apartment_regexes", "=", "[", "r'#\\w+ & \\w+'", ",", "'#\\w+ rm \\w+'", ",", "\"#\\w+-\\w\"", ",", "r'apt #{0,1}\\w+'", ",", "r'apartment #{0,1}\\w+'", ",", "r'#\\w+'", ",", "r'# \\w+'", ",", "r'rm \\w+'", ",", "r'unit #?\\w+'", ",", "r'units #?\\w+'", ",", "r'- #{0,1}\\w+'", ",", "r'no\\s?\\d+\\w*'", ",", "r'style\\s\\w{1,2}'", ",", "r'townhouse style\\s\\w{1,2}'", "]", "for", "regex", "in", "apartment_regexes", ":", "apartment_match", "=", "re", ".", "search", "(", "regex", ",", "address", ",", "re", ".", "IGNORECASE", ")", "if", "apartment_match", ":", "# print \"Matched regex: \", regex, apartment_match.group()", "self", ".", "apartment", "=", "self", ".", "_clean", "(", "apartment_match", ".", "group", "(", ")", ")", "address", "=", "re", ".", "sub", "(", "regex", ",", "\"\"", ",", "address", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "# Now check for things like \", ,\" which throw off dstk", "address", "=", "re", ".", "sub", "(", "r\"\\,\\s*\\,\"", ",", "\",\"", ",", "address", ")", "return", "address" ]
Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the rest of the parsing, and return the cleaned address.
[ "Takes", "a", "basic", "address", "and", "attempts", "to", "clean", "it", "up", "extract", "reasonably", "assured", "bits", "that", "may", "throw", "off", "the", "rest", "of", "the", "parsing", "and", "return", "the", "cleaned", "address", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L250-L279
13,627
SwoopSearch/pyaddress
address/address.py
Address.check_state
def check_state(self, token): """ Check if state is in either the keys or values of our states list. Must come before the suffix. """ # print "zip", self.zip if len(token) == 2 and self.state is None: if token.capitalize() in self.parser.states.keys(): self.state = self._clean(self.parser.states[token.capitalize()]) return True elif token.upper() in self.parser.states.values(): self.state = self._clean(token.upper()) return True if self.state is None and self.street_suffix is None and len(self.comma_separated_address) > 1: if token.capitalize() in self.parser.states.keys(): self.state = self._clean(self.parser.states[token.capitalize()]) return True elif token.upper() in self.parser.states.values(): self.state = self._clean(token.upper()) return True return False
python
def check_state(self, token): """ Check if state is in either the keys or values of our states list. Must come before the suffix. """ # print "zip", self.zip if len(token) == 2 and self.state is None: if token.capitalize() in self.parser.states.keys(): self.state = self._clean(self.parser.states[token.capitalize()]) return True elif token.upper() in self.parser.states.values(): self.state = self._clean(token.upper()) return True if self.state is None and self.street_suffix is None and len(self.comma_separated_address) > 1: if token.capitalize() in self.parser.states.keys(): self.state = self._clean(self.parser.states[token.capitalize()]) return True elif token.upper() in self.parser.states.values(): self.state = self._clean(token.upper()) return True return False
[ "def", "check_state", "(", "self", ",", "token", ")", ":", "# print \"zip\", self.zip", "if", "len", "(", "token", ")", "==", "2", "and", "self", ".", "state", "is", "None", ":", "if", "token", ".", "capitalize", "(", ")", "in", "self", ".", "parser", ".", "states", ".", "keys", "(", ")", ":", "self", ".", "state", "=", "self", ".", "_clean", "(", "self", ".", "parser", ".", "states", "[", "token", ".", "capitalize", "(", ")", "]", ")", "return", "True", "elif", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "states", ".", "values", "(", ")", ":", "self", ".", "state", "=", "self", ".", "_clean", "(", "token", ".", "upper", "(", ")", ")", "return", "True", "if", "self", ".", "state", "is", "None", "and", "self", ".", "street_suffix", "is", "None", "and", "len", "(", "self", ".", "comma_separated_address", ")", ">", "1", ":", "if", "token", ".", "capitalize", "(", ")", "in", "self", ".", "parser", ".", "states", ".", "keys", "(", ")", ":", "self", ".", "state", "=", "self", ".", "_clean", "(", "self", ".", "parser", ".", "states", "[", "token", ".", "capitalize", "(", ")", "]", ")", "return", "True", "elif", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "states", ".", "values", "(", ")", ":", "self", ".", "state", "=", "self", ".", "_clean", "(", "token", ".", "upper", "(", ")", ")", "return", "True", "return", "False" ]
Check if state is in either the keys or values of our states list. Must come before the suffix.
[ "Check", "if", "state", "is", "in", "either", "the", "keys", "or", "values", "of", "our", "states", "list", ".", "Must", "come", "before", "the", "suffix", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L297-L316
13,628
SwoopSearch/pyaddress
address/address.py
Address.check_city
def check_city(self, token): """ Check if there is a known city from our city list. Must come before the suffix. """ shortened_cities = {'saint': 'st.'} if self.city is None and self.state is not None and self.street_suffix is None: if token.lower() in self.parser.cities: self.city = self._clean(token.capitalize()) return True return False # Check that we're in the correct location, and that we have at least one comma in the address if self.city is None and self.apartment is None and self.street_suffix is None and len( self.comma_separated_address) > 1: if token.lower() in self.parser.cities: self.city = self._clean(token.capitalize()) return True return False # Multi word cities if self.city is not None and self.street_suffix is None and self.street is None: print "Checking for multi part city", token.lower(), token.lower() in shortened_cities.keys() if token.lower() + ' ' + self.city in self.parser.cities: self.city = self._clean((token.lower() + ' ' + self.city).capitalize()) return True if token.lower() in shortened_cities.keys(): token = shortened_cities[token.lower()] print "Checking for shorted multi part city", token.lower() + ' ' + self.city if token.lower() + ' ' + self.city.lower() in self.parser.cities: self.city = self._clean(token.capitalize() + ' ' + self.city.capitalize()) return True
python
def check_city(self, token): """ Check if there is a known city from our city list. Must come before the suffix. """ shortened_cities = {'saint': 'st.'} if self.city is None and self.state is not None and self.street_suffix is None: if token.lower() in self.parser.cities: self.city = self._clean(token.capitalize()) return True return False # Check that we're in the correct location, and that we have at least one comma in the address if self.city is None and self.apartment is None and self.street_suffix is None and len( self.comma_separated_address) > 1: if token.lower() in self.parser.cities: self.city = self._clean(token.capitalize()) return True return False # Multi word cities if self.city is not None and self.street_suffix is None and self.street is None: print "Checking for multi part city", token.lower(), token.lower() in shortened_cities.keys() if token.lower() + ' ' + self.city in self.parser.cities: self.city = self._clean((token.lower() + ' ' + self.city).capitalize()) return True if token.lower() in shortened_cities.keys(): token = shortened_cities[token.lower()] print "Checking for shorted multi part city", token.lower() + ' ' + self.city if token.lower() + ' ' + self.city.lower() in self.parser.cities: self.city = self._clean(token.capitalize() + ' ' + self.city.capitalize()) return True
[ "def", "check_city", "(", "self", ",", "token", ")", ":", "shortened_cities", "=", "{", "'saint'", ":", "'st.'", "}", "if", "self", ".", "city", "is", "None", "and", "self", ".", "state", "is", "not", "None", "and", "self", ".", "street_suffix", "is", "None", ":", "if", "token", ".", "lower", "(", ")", "in", "self", ".", "parser", ".", "cities", ":", "self", ".", "city", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", ")", "return", "True", "return", "False", "# Check that we're in the correct location, and that we have at least one comma in the address", "if", "self", ".", "city", "is", "None", "and", "self", ".", "apartment", "is", "None", "and", "self", ".", "street_suffix", "is", "None", "and", "len", "(", "self", ".", "comma_separated_address", ")", ">", "1", ":", "if", "token", ".", "lower", "(", ")", "in", "self", ".", "parser", ".", "cities", ":", "self", ".", "city", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", ")", "return", "True", "return", "False", "# Multi word cities", "if", "self", ".", "city", "is", "not", "None", "and", "self", ".", "street_suffix", "is", "None", "and", "self", ".", "street", "is", "None", ":", "print", "\"Checking for multi part city\"", ",", "token", ".", "lower", "(", ")", ",", "token", ".", "lower", "(", ")", "in", "shortened_cities", ".", "keys", "(", ")", "if", "token", ".", "lower", "(", ")", "+", "' '", "+", "self", ".", "city", "in", "self", ".", "parser", ".", "cities", ":", "self", ".", "city", "=", "self", ".", "_clean", "(", "(", "token", ".", "lower", "(", ")", "+", "' '", "+", "self", ".", "city", ")", ".", "capitalize", "(", ")", ")", "return", "True", "if", "token", ".", "lower", "(", ")", "in", "shortened_cities", ".", "keys", "(", ")", ":", "token", "=", "shortened_cities", "[", "token", ".", "lower", "(", ")", "]", "print", "\"Checking for shorted multi part city\"", ",", "token", ".", "lower", "(", ")", "+", "' '", "+", "self", ".", "city", "if", "token", ".", "lower", "(", ")", "+", "' '", "+", "self", ".", "city", ".", "lower", "(", ")", "in", "self", ".", "parser", ".", "cities", ":", "self", ".", "city", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", "+", "' '", "+", "self", ".", "city", ".", "capitalize", "(", ")", ")", "return", "True" ]
Check if there is a known city from our city list. Must come before the suffix.
[ "Check", "if", "there", "is", "a", "known", "city", "from", "our", "city", "list", ".", "Must", "come", "before", "the", "suffix", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L318-L346
13,629
SwoopSearch/pyaddress
address/address.py
Address.check_street_suffix
def check_street_suffix(self, token): """ Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized and a period after it. E.g. "St." or "Ave." """ # Suffix must come before street # print "Suffix check", token, "suffix", self.street_suffix, "street", self.street if self.street_suffix is None and self.street is None: # print "upper", token.upper() if token.upper() in self.parser.suffixes.keys(): suffix = self.parser.suffixes[token.upper()] self.street_suffix = self._clean(suffix.capitalize() + '.') return True elif token.upper() in self.parser.suffixes.values(): self.street_suffix = self._clean(token.capitalize() + '.') return True return False
python
def check_street_suffix(self, token): """ Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized and a period after it. E.g. "St." or "Ave." """ # Suffix must come before street # print "Suffix check", token, "suffix", self.street_suffix, "street", self.street if self.street_suffix is None and self.street is None: # print "upper", token.upper() if token.upper() in self.parser.suffixes.keys(): suffix = self.parser.suffixes[token.upper()] self.street_suffix = self._clean(suffix.capitalize() + '.') return True elif token.upper() in self.parser.suffixes.values(): self.street_suffix = self._clean(token.capitalize() + '.') return True return False
[ "def", "check_street_suffix", "(", "self", ",", "token", ")", ":", "# Suffix must come before street", "# print \"Suffix check\", token, \"suffix\", self.street_suffix, \"street\", self.street", "if", "self", ".", "street_suffix", "is", "None", "and", "self", ".", "street", "is", "None", ":", "# print \"upper\", token.upper()", "if", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "suffixes", ".", "keys", "(", ")", ":", "suffix", "=", "self", ".", "parser", ".", "suffixes", "[", "token", ".", "upper", "(", ")", "]", "self", ".", "street_suffix", "=", "self", ".", "_clean", "(", "suffix", ".", "capitalize", "(", ")", "+", "'.'", ")", "return", "True", "elif", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "suffixes", ".", "values", "(", ")", ":", "self", ".", "street_suffix", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", "+", "'.'", ")", "return", "True", "return", "False" ]
Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized and a period after it. E.g. "St." or "Ave."
[ "Attempts", "to", "match", "a", "street", "suffix", ".", "If", "found", "it", "will", "return", "the", "abbreviation", "with", "the", "first", "letter", "capitalized", "and", "a", "period", "after", "it", ".", "E", ".", "g", ".", "St", ".", "or", "Ave", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L377-L393
13,630
SwoopSearch/pyaddress
address/address.py
Address.check_street
def check_street(self, token): """ Let's assume a street comes before a prefix and after a suffix. This isn't always the case, but we'll deal with that in our guessing game. Also, two word street names...well... This check must come after the checks for house_number and street_prefix to help us deal with multi word streets. """ # First check for single word streets between a prefix and a suffix if self.street is None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None: self.street = self._clean(token.capitalize()) return True # Now check for multiple word streets. This check must come after the check for street_prefix and house_number for this reason. elif self.street is not None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None: self.street = self._clean(token.capitalize() + ' ' + self.street) return True if not self.street_suffix and not self.street and token.lower() in self.parser.streets: self.street = self._clean(token) return True return False
python
def check_street(self, token): """ Let's assume a street comes before a prefix and after a suffix. This isn't always the case, but we'll deal with that in our guessing game. Also, two word street names...well... This check must come after the checks for house_number and street_prefix to help us deal with multi word streets. """ # First check for single word streets between a prefix and a suffix if self.street is None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None: self.street = self._clean(token.capitalize()) return True # Now check for multiple word streets. This check must come after the check for street_prefix and house_number for this reason. elif self.street is not None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None: self.street = self._clean(token.capitalize() + ' ' + self.street) return True if not self.street_suffix and not self.street and token.lower() in self.parser.streets: self.street = self._clean(token) return True return False
[ "def", "check_street", "(", "self", ",", "token", ")", ":", "# First check for single word streets between a prefix and a suffix", "if", "self", ".", "street", "is", "None", "and", "self", ".", "street_suffix", "is", "not", "None", "and", "self", ".", "street_prefix", "is", "None", "and", "self", ".", "house_number", "is", "None", ":", "self", ".", "street", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", ")", "return", "True", "# Now check for multiple word streets. This check must come after the check for street_prefix and house_number for this reason.", "elif", "self", ".", "street", "is", "not", "None", "and", "self", ".", "street_suffix", "is", "not", "None", "and", "self", ".", "street_prefix", "is", "None", "and", "self", ".", "house_number", "is", "None", ":", "self", ".", "street", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", "+", "' '", "+", "self", ".", "street", ")", "return", "True", "if", "not", "self", ".", "street_suffix", "and", "not", "self", ".", "street", "and", "token", ".", "lower", "(", ")", "in", "self", ".", "parser", ".", "streets", ":", "self", ".", "street", "=", "self", ".", "_clean", "(", "token", ")", "return", "True", "return", "False" ]
Let's assume a street comes before a prefix and after a suffix. This isn't always the case, but we'll deal with that in our guessing game. Also, two word street names...well... This check must come after the checks for house_number and street_prefix to help us deal with multi word streets.
[ "Let", "s", "assume", "a", "street", "comes", "before", "a", "prefix", "and", "after", "a", "suffix", ".", "This", "isn", "t", "always", "the", "case", "but", "we", "ll", "deal", "with", "that", "in", "our", "guessing", "game", ".", "Also", "two", "word", "street", "names", "...", "well", "..." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L395-L413
13,631
SwoopSearch/pyaddress
address/address.py
Address.check_street_prefix
def check_street_prefix(self, token): """ Finds street prefixes, such as N. or Northwest, before a street name. Standardizes to 1 or two letters, followed by a period. """ if self.street and not self.street_prefix and token.lower().replace('.', '') in self.parser.prefixes.keys(): self.street_prefix = self._clean(self.parser.prefixes[token.lower().replace('.', '')]) return True return False
python
def check_street_prefix(self, token): """ Finds street prefixes, such as N. or Northwest, before a street name. Standardizes to 1 or two letters, followed by a period. """ if self.street and not self.street_prefix and token.lower().replace('.', '') in self.parser.prefixes.keys(): self.street_prefix = self._clean(self.parser.prefixes[token.lower().replace('.', '')]) return True return False
[ "def", "check_street_prefix", "(", "self", ",", "token", ")", ":", "if", "self", ".", "street", "and", "not", "self", ".", "street_prefix", "and", "token", ".", "lower", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", "in", "self", ".", "parser", ".", "prefixes", ".", "keys", "(", ")", ":", "self", ".", "street_prefix", "=", "self", ".", "_clean", "(", "self", ".", "parser", ".", "prefixes", "[", "token", ".", "lower", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", "]", ")", "return", "True", "return", "False" ]
Finds street prefixes, such as N. or Northwest, before a street name. Standardizes to 1 or two letters, followed by a period.
[ "Finds", "street", "prefixes", "such", "as", "N", ".", "or", "Northwest", "before", "a", "street", "name", ".", "Standardizes", "to", "1", "or", "two", "letters", "followed", "by", "a", "period", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L415-L423
13,632
SwoopSearch/pyaddress
address/address.py
Address.check_house_number
def check_house_number(self, token): """ Attempts to find a house number, generally the first thing in an address. If anything is in front of it, we assume it is a building name. """ if self.street and self.house_number is None and re.match(street_num_regex, token.lower()): if '/' in token: token = token.split('/')[0] if '-' in token: token = token.split('-')[0] self.house_number = self._clean(str(token)) return True return False
python
def check_house_number(self, token): """ Attempts to find a house number, generally the first thing in an address. If anything is in front of it, we assume it is a building name. """ if self.street and self.house_number is None and re.match(street_num_regex, token.lower()): if '/' in token: token = token.split('/')[0] if '-' in token: token = token.split('-')[0] self.house_number = self._clean(str(token)) return True return False
[ "def", "check_house_number", "(", "self", ",", "token", ")", ":", "if", "self", ".", "street", "and", "self", ".", "house_number", "is", "None", "and", "re", ".", "match", "(", "street_num_regex", ",", "token", ".", "lower", "(", ")", ")", ":", "if", "'/'", "in", "token", ":", "token", "=", "token", ".", "split", "(", "'/'", ")", "[", "0", "]", "if", "'-'", "in", "token", ":", "token", "=", "token", ".", "split", "(", "'-'", ")", "[", "0", "]", "self", ".", "house_number", "=", "self", ".", "_clean", "(", "str", "(", "token", ")", ")", "return", "True", "return", "False" ]
Attempts to find a house number, generally the first thing in an address. If anything is in front of it, we assume it is a building name.
[ "Attempts", "to", "find", "a", "house", "number", "generally", "the", "first", "thing", "in", "an", "address", ".", "If", "anything", "is", "in", "front", "of", "it", "we", "assume", "it", "is", "a", "building", "name", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L425-L437
13,633
SwoopSearch/pyaddress
address/address.py
Address.check_building
def check_building(self, token): """ Building name check. If we have leftover and everything else is set, probably building names. Allows for multi word building names. """ if self.street and self.house_number: if not self.building: self.building = self._clean(token) else: self.building = self._clean(token + ' ' + self.building) return True return False
python
def check_building(self, token): """ Building name check. If we have leftover and everything else is set, probably building names. Allows for multi word building names. """ if self.street and self.house_number: if not self.building: self.building = self._clean(token) else: self.building = self._clean(token + ' ' + self.building) return True return False
[ "def", "check_building", "(", "self", ",", "token", ")", ":", "if", "self", ".", "street", "and", "self", ".", "house_number", ":", "if", "not", "self", ".", "building", ":", "self", ".", "building", "=", "self", ".", "_clean", "(", "token", ")", "else", ":", "self", ".", "building", "=", "self", ".", "_clean", "(", "token", "+", "' '", "+", "self", ".", "building", ")", "return", "True", "return", "False" ]
Building name check. If we have leftover and everything else is set, probably building names. Allows for multi word building names.
[ "Building", "name", "check", ".", "If", "we", "have", "leftover", "and", "everything", "else", "is", "set", "probably", "building", "names", ".", "Allows", "for", "multi", "word", "building", "names", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L439-L450
13,634
SwoopSearch/pyaddress
address/address.py
Address.guess_unmatched
def guess_unmatched(self, token): """ When we find something that doesn't match, we can make an educated guess and log it as such. """ # Check if this is probably an apartment: if token.lower() in ['apt', 'apartment']: return False # Stray dashes are likely useless if token.strip() == '-': return True # Almost definitely not a street if it is one or two characters long. if len(token) <= 2: return False # Let's check for a suffix-less street. if self.street_suffix is None and self.street is None and self.street_prefix is None and self.house_number is None: # Streets will just be letters if re.match(r"[A-Za-z]", token): if self.line_number >= 0: pass # print "{0}: Guessing suffix-less street: ".format(self.line_number), token else: # print "Guessing suffix-less street: ", token pass self.street = self._clean(token.capitalize()) return True return False
python
def guess_unmatched(self, token): """ When we find something that doesn't match, we can make an educated guess and log it as such. """ # Check if this is probably an apartment: if token.lower() in ['apt', 'apartment']: return False # Stray dashes are likely useless if token.strip() == '-': return True # Almost definitely not a street if it is one or two characters long. if len(token) <= 2: return False # Let's check for a suffix-less street. if self.street_suffix is None and self.street is None and self.street_prefix is None and self.house_number is None: # Streets will just be letters if re.match(r"[A-Za-z]", token): if self.line_number >= 0: pass # print "{0}: Guessing suffix-less street: ".format(self.line_number), token else: # print "Guessing suffix-less street: ", token pass self.street = self._clean(token.capitalize()) return True return False
[ "def", "guess_unmatched", "(", "self", ",", "token", ")", ":", "# Check if this is probably an apartment:", "if", "token", ".", "lower", "(", ")", "in", "[", "'apt'", ",", "'apartment'", "]", ":", "return", "False", "# Stray dashes are likely useless", "if", "token", ".", "strip", "(", ")", "==", "'-'", ":", "return", "True", "# Almost definitely not a street if it is one or two characters long.", "if", "len", "(", "token", ")", "<=", "2", ":", "return", "False", "# Let's check for a suffix-less street.", "if", "self", ".", "street_suffix", "is", "None", "and", "self", ".", "street", "is", "None", "and", "self", ".", "street_prefix", "is", "None", "and", "self", ".", "house_number", "is", "None", ":", "# Streets will just be letters", "if", "re", ".", "match", "(", "r\"[A-Za-z]\"", ",", "token", ")", ":", "if", "self", ".", "line_number", ">=", "0", ":", "pass", "# print \"{0}: Guessing suffix-less street: \".format(self.line_number), token", "else", ":", "# print \"Guessing suffix-less street: \", token", "pass", "self", ".", "street", "=", "self", ".", "_clean", "(", "token", ".", "capitalize", "(", ")", ")", "return", "True", "return", "False" ]
When we find something that doesn't match, we can make an educated guess and log it as such.
[ "When", "we", "find", "something", "that", "doesn", "t", "match", "we", "can", "make", "an", "educated", "guess", "and", "log", "it", "as", "such", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L452-L477
13,635
SwoopSearch/pyaddress
address/address.py
Address.full_address
def full_address(self): """ Print the address in a human readable format """ addr = "" # if self.building: # addr = addr + "(" + self.building + ") " if self.house_number: addr = addr + self.house_number if self.street_prefix: addr = addr + " " + self.street_prefix if self.street: addr = addr + " " + self.street if self.street_suffix: addr = addr + " " + self.street_suffix if self.apartment: addr = addr + " " + self.apartment if self.city: addr = addr + ", " + self.city if self.state: addr = addr + ", " + self.state if self.zip: addr = addr + " " + self.zip return addr
python
def full_address(self): """ Print the address in a human readable format """ addr = "" # if self.building: # addr = addr + "(" + self.building + ") " if self.house_number: addr = addr + self.house_number if self.street_prefix: addr = addr + " " + self.street_prefix if self.street: addr = addr + " " + self.street if self.street_suffix: addr = addr + " " + self.street_suffix if self.apartment: addr = addr + " " + self.apartment if self.city: addr = addr + ", " + self.city if self.state: addr = addr + ", " + self.state if self.zip: addr = addr + " " + self.zip return addr
[ "def", "full_address", "(", "self", ")", ":", "addr", "=", "\"\"", "# if self.building:", "# addr = addr + \"(\" + self.building + \") \"", "if", "self", ".", "house_number", ":", "addr", "=", "addr", "+", "self", ".", "house_number", "if", "self", ".", "street_prefix", ":", "addr", "=", "addr", "+", "\" \"", "+", "self", ".", "street_prefix", "if", "self", ".", "street", ":", "addr", "=", "addr", "+", "\" \"", "+", "self", ".", "street", "if", "self", ".", "street_suffix", ":", "addr", "=", "addr", "+", "\" \"", "+", "self", ".", "street_suffix", "if", "self", ".", "apartment", ":", "addr", "=", "addr", "+", "\" \"", "+", "self", ".", "apartment", "if", "self", ".", "city", ":", "addr", "=", "addr", "+", "\", \"", "+", "self", ".", "city", "if", "self", ".", "state", ":", "addr", "=", "addr", "+", "\", \"", "+", "self", ".", "state", "if", "self", ".", "zip", ":", "addr", "=", "addr", "+", "\" \"", "+", "self", ".", "zip", "return", "addr" ]
Print the address in a human readable format
[ "Print", "the", "address", "in", "a", "human", "readable", "format" ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L479-L502
13,636
SwoopSearch/pyaddress
address/address.py
Address._get_dstk_intersections
def _get_dstk_intersections(self, address, dstk_address): """ Find the unique tokens in the original address and the returned address. """ # Normalize both addresses normalized_address = self._normalize(address) normalized_dstk_address = self._normalize(dstk_address) address_uniques = set(normalized_address) - set(normalized_dstk_address) dstk_address_uniques = set(normalized_dstk_address) - set(normalized_address) if self.logger: self.logger.debug("Address Uniques {0}".format(address_uniques)) if self.logger: self.logger.debug("DSTK Address Uniques {0}".format(dstk_address_uniques)) return (len(address_uniques), len(dstk_address_uniques))
python
def _get_dstk_intersections(self, address, dstk_address): """ Find the unique tokens in the original address and the returned address. """ # Normalize both addresses normalized_address = self._normalize(address) normalized_dstk_address = self._normalize(dstk_address) address_uniques = set(normalized_address) - set(normalized_dstk_address) dstk_address_uniques = set(normalized_dstk_address) - set(normalized_address) if self.logger: self.logger.debug("Address Uniques {0}".format(address_uniques)) if self.logger: self.logger.debug("DSTK Address Uniques {0}".format(dstk_address_uniques)) return (len(address_uniques), len(dstk_address_uniques))
[ "def", "_get_dstk_intersections", "(", "self", ",", "address", ",", "dstk_address", ")", ":", "# Normalize both addresses", "normalized_address", "=", "self", ".", "_normalize", "(", "address", ")", "normalized_dstk_address", "=", "self", ".", "_normalize", "(", "dstk_address", ")", "address_uniques", "=", "set", "(", "normalized_address", ")", "-", "set", "(", "normalized_dstk_address", ")", "dstk_address_uniques", "=", "set", "(", "normalized_dstk_address", ")", "-", "set", "(", "normalized_address", ")", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"Address Uniques {0}\"", ".", "format", "(", "address_uniques", ")", ")", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"DSTK Address Uniques {0}\"", ".", "format", "(", "dstk_address_uniques", ")", ")", "return", "(", "len", "(", "address_uniques", ")", ",", "len", "(", "dstk_address_uniques", ")", ")" ]
Find the unique tokens in the original address and the returned address.
[ "Find", "the", "unique", "tokens", "in", "the", "original", "address", "and", "the", "returned", "address", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L637-L648
13,637
SwoopSearch/pyaddress
address/address.py
Address._normalize
def _normalize(self, address): """ Normalize prefixes, suffixes and other to make matching original to returned easier. """ normalized_address = [] if self.logger: self.logger.debug("Normalizing Address: {0}".format(address)) for token in address.split(): if token.upper() in self.parser.suffixes.keys(): normalized_address.append(self.parser.suffixes[token.upper()].lower()) elif token.upper() in self.parser.suffixes.values(): normalized_address.append(token.lower()) elif token.upper().replace('.', '') in self.parser.suffixes.values(): normalized_address.append(token.lower().replace('.', '')) elif token.lower() in self.parser.prefixes.keys(): normalized_address.append(self.parser.prefixes[token.lower()].lower()) elif token.upper() in self.parser.prefixes.values(): normalized_address.append(token.lower()[:-1]) elif token.upper() + '.' in self.parser.prefixes.values(): normalized_address.append(token.lower()) else: normalized_address.append(token.lower()) return normalized_address
python
def _normalize(self, address): """ Normalize prefixes, suffixes and other to make matching original to returned easier. """ normalized_address = [] if self.logger: self.logger.debug("Normalizing Address: {0}".format(address)) for token in address.split(): if token.upper() in self.parser.suffixes.keys(): normalized_address.append(self.parser.suffixes[token.upper()].lower()) elif token.upper() in self.parser.suffixes.values(): normalized_address.append(token.lower()) elif token.upper().replace('.', '') in self.parser.suffixes.values(): normalized_address.append(token.lower().replace('.', '')) elif token.lower() in self.parser.prefixes.keys(): normalized_address.append(self.parser.prefixes[token.lower()].lower()) elif token.upper() in self.parser.prefixes.values(): normalized_address.append(token.lower()[:-1]) elif token.upper() + '.' in self.parser.prefixes.values(): normalized_address.append(token.lower()) else: normalized_address.append(token.lower()) return normalized_address
[ "def", "_normalize", "(", "self", ",", "address", ")", ":", "normalized_address", "=", "[", "]", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"Normalizing Address: {0}\"", ".", "format", "(", "address", ")", ")", "for", "token", "in", "address", ".", "split", "(", ")", ":", "if", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "suffixes", ".", "keys", "(", ")", ":", "normalized_address", ".", "append", "(", "self", ".", "parser", ".", "suffixes", "[", "token", ".", "upper", "(", ")", "]", ".", "lower", "(", ")", ")", "elif", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "suffixes", ".", "values", "(", ")", ":", "normalized_address", ".", "append", "(", "token", ".", "lower", "(", ")", ")", "elif", "token", ".", "upper", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", "in", "self", ".", "parser", ".", "suffixes", ".", "values", "(", ")", ":", "normalized_address", ".", "append", "(", "token", ".", "lower", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")", ")", "elif", "token", ".", "lower", "(", ")", "in", "self", ".", "parser", ".", "prefixes", ".", "keys", "(", ")", ":", "normalized_address", ".", "append", "(", "self", ".", "parser", ".", "prefixes", "[", "token", ".", "lower", "(", ")", "]", ".", "lower", "(", ")", ")", "elif", "token", ".", "upper", "(", ")", "in", "self", ".", "parser", ".", "prefixes", ".", "values", "(", ")", ":", "normalized_address", ".", "append", "(", "token", ".", "lower", "(", ")", "[", ":", "-", "1", "]", ")", "elif", "token", ".", "upper", "(", ")", "+", "'.'", "in", "self", ".", "parser", ".", "prefixes", ".", "values", "(", ")", ":", "normalized_address", ".", "append", "(", "token", ".", "lower", "(", ")", ")", "else", ":", "normalized_address", ".", "append", "(", "token", ".", "lower", "(", ")", ")", "return", "normalized_address" ]
Normalize prefixes, suffixes and other to make matching original to returned easier.
[ "Normalize", "prefixes", "suffixes", "and", "other", "to", "make", "matching", "original", "to", "returned", "easier", "." ]
62ebb07a6840e710d256406a8ec1d06abec0e1c4
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L650-L671
13,638
AlexandreDecan/python-intervals
intervals.py
empty
def empty(): """ Create an empty set. """ if not hasattr(empty, '_instance'): empty._instance = Interval(AtomicInterval(OPEN, inf, -inf, OPEN)) return empty._instance
python
def empty(): """ Create an empty set. """ if not hasattr(empty, '_instance'): empty._instance = Interval(AtomicInterval(OPEN, inf, -inf, OPEN)) return empty._instance
[ "def", "empty", "(", ")", ":", "if", "not", "hasattr", "(", "empty", ",", "'_instance'", ")", ":", "empty", ".", "_instance", "=", "Interval", "(", "AtomicInterval", "(", "OPEN", ",", "inf", ",", "-", "inf", ",", "OPEN", ")", ")", "return", "empty", ".", "_instance" ]
Create an empty set.
[ "Create", "an", "empty", "set", "." ]
eda4da7dd39afabab2c1689e0b5158abae08c831
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L115-L121
13,639
AlexandreDecan/python-intervals
intervals.py
from_data
def from_data(data, conv=None, pinf=float('inf'), ninf=float('-inf')): """ Import an interval from a piece of data. :param data: a list of 4-uples (left, lower, upper, right). :param conv: function that is used to convert "lower" and "upper" to bounds, default to identity. :param pinf: value used to represent positive infinity. :param ninf: value used to represent negative infinity. :return: an Interval instance. """ intervals = [] conv = (lambda v: v) if conv is None else conv def _convert(bound): if bound == pinf: return inf elif bound == ninf: return -inf else: return conv(bound) for item in data: left, lower, upper, right = item intervals.append(AtomicInterval( left, _convert(lower), _convert(upper), right )) return Interval(*intervals)
python
def from_data(data, conv=None, pinf=float('inf'), ninf=float('-inf')): """ Import an interval from a piece of data. :param data: a list of 4-uples (left, lower, upper, right). :param conv: function that is used to convert "lower" and "upper" to bounds, default to identity. :param pinf: value used to represent positive infinity. :param ninf: value used to represent negative infinity. :return: an Interval instance. """ intervals = [] conv = (lambda v: v) if conv is None else conv def _convert(bound): if bound == pinf: return inf elif bound == ninf: return -inf else: return conv(bound) for item in data: left, lower, upper, right = item intervals.append(AtomicInterval( left, _convert(lower), _convert(upper), right )) return Interval(*intervals)
[ "def", "from_data", "(", "data", ",", "conv", "=", "None", ",", "pinf", "=", "float", "(", "'inf'", ")", ",", "ninf", "=", "float", "(", "'-inf'", ")", ")", ":", "intervals", "=", "[", "]", "conv", "=", "(", "lambda", "v", ":", "v", ")", "if", "conv", "is", "None", "else", "conv", "def", "_convert", "(", "bound", ")", ":", "if", "bound", "==", "pinf", ":", "return", "inf", "elif", "bound", "==", "ninf", ":", "return", "-", "inf", "else", ":", "return", "conv", "(", "bound", ")", "for", "item", "in", "data", ":", "left", ",", "lower", ",", "upper", ",", "right", "=", "item", "intervals", ".", "append", "(", "AtomicInterval", "(", "left", ",", "_convert", "(", "lower", ")", ",", "_convert", "(", "upper", ")", ",", "right", ")", ")", "return", "Interval", "(", "*", "intervals", ")" ]
Import an interval from a piece of data. :param data: a list of 4-uples (left, lower, upper, right). :param conv: function that is used to convert "lower" and "upper" to bounds, default to identity. :param pinf: value used to represent positive infinity. :param ninf: value used to represent negative infinity. :return: an Interval instance.
[ "Import", "an", "interval", "from", "a", "piece", "of", "data", "." ]
eda4da7dd39afabab2c1689e0b5158abae08c831
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L228-L257
13,640
AlexandreDecan/python-intervals
intervals.py
AtomicInterval.is_empty
def is_empty(self): """ Test interval emptiness. :return: True if interval is empty, False otherwise. """ return ( self._lower > self._upper or (self._lower == self._upper and (self._left == OPEN or self._right == OPEN)) )
python
def is_empty(self): """ Test interval emptiness. :return: True if interval is empty, False otherwise. """ return ( self._lower > self._upper or (self._lower == self._upper and (self._left == OPEN or self._right == OPEN)) )
[ "def", "is_empty", "(", "self", ")", ":", "return", "(", "self", ".", "_lower", ">", "self", ".", "_upper", "or", "(", "self", ".", "_lower", "==", "self", ".", "_upper", "and", "(", "self", ".", "_left", "==", "OPEN", "or", "self", ".", "_right", "==", "OPEN", ")", ")", ")" ]
Test interval emptiness. :return: True if interval is empty, False otherwise.
[ "Test", "interval", "emptiness", "." ]
eda4da7dd39afabab2c1689e0b5158abae08c831
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L355-L364
13,641
AlexandreDecan/python-intervals
intervals.py
Interval.to_atomic
def to_atomic(self): """ Return the smallest atomic interval containing this interval. :return: an AtomicInterval instance. """ lower = self._intervals[0].lower left = self._intervals[0].left upper = self._intervals[-1].upper right = self._intervals[-1].right return AtomicInterval(left, lower, upper, right)
python
def to_atomic(self): """ Return the smallest atomic interval containing this interval. :return: an AtomicInterval instance. """ lower = self._intervals[0].lower left = self._intervals[0].left upper = self._intervals[-1].upper right = self._intervals[-1].right return AtomicInterval(left, lower, upper, right)
[ "def", "to_atomic", "(", "self", ")", ":", "lower", "=", "self", ".", "_intervals", "[", "0", "]", ".", "lower", "left", "=", "self", ".", "_intervals", "[", "0", "]", ".", "left", "upper", "=", "self", ".", "_intervals", "[", "-", "1", "]", ".", "upper", "right", "=", "self", ".", "_intervals", "[", "-", "1", "]", ".", "right", "return", "AtomicInterval", "(", "left", ",", "lower", ",", "upper", ",", "right", ")" ]
Return the smallest atomic interval containing this interval. :return: an AtomicInterval instance.
[ "Return", "the", "smallest", "atomic", "interval", "containing", "this", "interval", "." ]
eda4da7dd39afabab2c1689e0b5158abae08c831
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L730-L741
13,642
dailymotion/tartiflette-aiohttp
tartiflette_aiohttp/__init__.py
register_graphql_handlers
def register_graphql_handlers( app: "Application", engine_sdl: str = None, engine_schema_name: str = "default", executor_context: dict = None, executor_http_endpoint: str = "/graphql", executor_http_methods: List[str] = None, engine: Engine = None, subscription_ws_endpoint: Optional[str] = None, graphiql_enabled: bool = False, graphiql_options: Optional[Dict[str, Any]] = None, ) -> "Application": """Register a Tartiflette Engine to an app Pass a SDL or an already initialized Engine, not both, not neither. Keyword Arguments: app {aiohttp.web.Application} -- The application to register to. engine_sdl {str} -- The SDL defining your API (default: {None}) engine_schema_name {str} -- The name of your sdl (default: {"default"}) executor_context {dict} -- Context dict that will be passed to the resolvers (default: {None}) executor_http_endpoint {str} -- Path part of the URL the graphql endpoint will listen on (default: {"/graphql"}) executor_http_methods {list[str]} -- List of HTTP methods allowed on the endpoint (only GET and POST are supported) (default: {None}) engine {Engine} -- An already initialized Engine (default: {None}) subscription_ws_endpoint {Optional[str]} -- Path part of the URL the WebSocket GraphQL subscription endpoint will listen on (default: {None}) graphiql_enabled {bool} -- Determines whether or not we should handle a GraphiQL endpoint (default: {False}) graphiql_options {dict} -- Customization options for the GraphiQL instance (default: {None}) Raises: Exception -- On bad sdl/engine parameter combinaison. Exception -- On unsupported HTTP Method. Return: The app object. """ # pylint: disable=too-many-arguments,too-many-locals if (not engine_sdl and not engine) or (engine and engine_sdl): raise Exception( "an engine OR an engine_sdl should be passed here, not both, not none" ) if not executor_context: executor_context = {} executor_context["app"] = app if not executor_http_methods: executor_http_methods = ["GET", "POST"] if not engine: engine = Engine(engine_sdl, engine_schema_name) app["ttftt_engine"] = engine for method in executor_http_methods: try: app.router.add_route( method, executor_http_endpoint, partial( getattr(Handlers, "handle_%s" % method.lower()), executor_context, ), ) except AttributeError: raise Exception("Unsupported < %s > http method" % method) _set_subscription_ws_handler(app, subscription_ws_endpoint, engine) _set_graphiql_handler( app, graphiql_enabled, graphiql_options, executor_http_endpoint, executor_http_methods, subscription_ws_endpoint, ) return app
python
def register_graphql_handlers( app: "Application", engine_sdl: str = None, engine_schema_name: str = "default", executor_context: dict = None, executor_http_endpoint: str = "/graphql", executor_http_methods: List[str] = None, engine: Engine = None, subscription_ws_endpoint: Optional[str] = None, graphiql_enabled: bool = False, graphiql_options: Optional[Dict[str, Any]] = None, ) -> "Application": """Register a Tartiflette Engine to an app Pass a SDL or an already initialized Engine, not both, not neither. Keyword Arguments: app {aiohttp.web.Application} -- The application to register to. engine_sdl {str} -- The SDL defining your API (default: {None}) engine_schema_name {str} -- The name of your sdl (default: {"default"}) executor_context {dict} -- Context dict that will be passed to the resolvers (default: {None}) executor_http_endpoint {str} -- Path part of the URL the graphql endpoint will listen on (default: {"/graphql"}) executor_http_methods {list[str]} -- List of HTTP methods allowed on the endpoint (only GET and POST are supported) (default: {None}) engine {Engine} -- An already initialized Engine (default: {None}) subscription_ws_endpoint {Optional[str]} -- Path part of the URL the WebSocket GraphQL subscription endpoint will listen on (default: {None}) graphiql_enabled {bool} -- Determines whether or not we should handle a GraphiQL endpoint (default: {False}) graphiql_options {dict} -- Customization options for the GraphiQL instance (default: {None}) Raises: Exception -- On bad sdl/engine parameter combinaison. Exception -- On unsupported HTTP Method. Return: The app object. """ # pylint: disable=too-many-arguments,too-many-locals if (not engine_sdl and not engine) or (engine and engine_sdl): raise Exception( "an engine OR an engine_sdl should be passed here, not both, not none" ) if not executor_context: executor_context = {} executor_context["app"] = app if not executor_http_methods: executor_http_methods = ["GET", "POST"] if not engine: engine = Engine(engine_sdl, engine_schema_name) app["ttftt_engine"] = engine for method in executor_http_methods: try: app.router.add_route( method, executor_http_endpoint, partial( getattr(Handlers, "handle_%s" % method.lower()), executor_context, ), ) except AttributeError: raise Exception("Unsupported < %s > http method" % method) _set_subscription_ws_handler(app, subscription_ws_endpoint, engine) _set_graphiql_handler( app, graphiql_enabled, graphiql_options, executor_http_endpoint, executor_http_methods, subscription_ws_endpoint, ) return app
[ "def", "register_graphql_handlers", "(", "app", ":", "\"Application\"", ",", "engine_sdl", ":", "str", "=", "None", ",", "engine_schema_name", ":", "str", "=", "\"default\"", ",", "executor_context", ":", "dict", "=", "None", ",", "executor_http_endpoint", ":", "str", "=", "\"/graphql\"", ",", "executor_http_methods", ":", "List", "[", "str", "]", "=", "None", ",", "engine", ":", "Engine", "=", "None", ",", "subscription_ws_endpoint", ":", "Optional", "[", "str", "]", "=", "None", ",", "graphiql_enabled", ":", "bool", "=", "False", ",", "graphiql_options", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "None", ",", ")", "->", "\"Application\"", ":", "# pylint: disable=too-many-arguments,too-many-locals", "if", "(", "not", "engine_sdl", "and", "not", "engine", ")", "or", "(", "engine", "and", "engine_sdl", ")", ":", "raise", "Exception", "(", "\"an engine OR an engine_sdl should be passed here, not both, not none\"", ")", "if", "not", "executor_context", ":", "executor_context", "=", "{", "}", "executor_context", "[", "\"app\"", "]", "=", "app", "if", "not", "executor_http_methods", ":", "executor_http_methods", "=", "[", "\"GET\"", ",", "\"POST\"", "]", "if", "not", "engine", ":", "engine", "=", "Engine", "(", "engine_sdl", ",", "engine_schema_name", ")", "app", "[", "\"ttftt_engine\"", "]", "=", "engine", "for", "method", "in", "executor_http_methods", ":", "try", ":", "app", ".", "router", ".", "add_route", "(", "method", ",", "executor_http_endpoint", ",", "partial", "(", "getattr", "(", "Handlers", ",", "\"handle_%s\"", "%", "method", ".", "lower", "(", ")", ")", ",", "executor_context", ",", ")", ",", ")", "except", "AttributeError", ":", "raise", "Exception", "(", "\"Unsupported < %s > http method\"", "%", "method", ")", "_set_subscription_ws_handler", "(", "app", ",", "subscription_ws_endpoint", ",", "engine", ")", "_set_graphiql_handler", "(", "app", ",", "graphiql_enabled", ",", "graphiql_options", ",", "executor_http_endpoint", ",", "executor_http_methods", ",", "subscription_ws_endpoint", ",", ")", "return", "app" ]
Register a Tartiflette Engine to an app Pass a SDL or an already initialized Engine, not both, not neither. Keyword Arguments: app {aiohttp.web.Application} -- The application to register to. engine_sdl {str} -- The SDL defining your API (default: {None}) engine_schema_name {str} -- The name of your sdl (default: {"default"}) executor_context {dict} -- Context dict that will be passed to the resolvers (default: {None}) executor_http_endpoint {str} -- Path part of the URL the graphql endpoint will listen on (default: {"/graphql"}) executor_http_methods {list[str]} -- List of HTTP methods allowed on the endpoint (only GET and POST are supported) (default: {None}) engine {Engine} -- An already initialized Engine (default: {None}) subscription_ws_endpoint {Optional[str]} -- Path part of the URL the WebSocket GraphQL subscription endpoint will listen on (default: {None}) graphiql_enabled {bool} -- Determines whether or not we should handle a GraphiQL endpoint (default: {False}) graphiql_options {dict} -- Customization options for the GraphiQL instance (default: {None}) Raises: Exception -- On bad sdl/engine parameter combinaison. Exception -- On unsupported HTTP Method. Return: The app object.
[ "Register", "a", "Tartiflette", "Engine", "to", "an", "app" ]
a87e6aa7d1f18b2d700eeb799228745848c88088
https://github.com/dailymotion/tartiflette-aiohttp/blob/a87e6aa7d1f18b2d700eeb799228745848c88088/tartiflette_aiohttp/__init__.py#L92-L170
13,643
dailymotion/tartiflette-aiohttp
examples/aiohttp/starwars/app.py
on_shutdown
async def on_shutdown(app): """app SHUTDOWN event handler """ for method in app.get("close_methods", []): logger.debug("Calling < %s >", method) if asyncio.iscoroutinefunction(method): await method() else: method()
python
async def on_shutdown(app): """app SHUTDOWN event handler """ for method in app.get("close_methods", []): logger.debug("Calling < %s >", method) if asyncio.iscoroutinefunction(method): await method() else: method()
[ "async", "def", "on_shutdown", "(", "app", ")", ":", "for", "method", "in", "app", ".", "get", "(", "\"close_methods\"", ",", "[", "]", ")", ":", "logger", ".", "debug", "(", "\"Calling < %s >\"", ",", "method", ")", "if", "asyncio", ".", "iscoroutinefunction", "(", "method", ")", ":", "await", "method", "(", ")", "else", ":", "method", "(", ")" ]
app SHUTDOWN event handler
[ "app", "SHUTDOWN", "event", "handler" ]
a87e6aa7d1f18b2d700eeb799228745848c88088
https://github.com/dailymotion/tartiflette-aiohttp/blob/a87e6aa7d1f18b2d700eeb799228745848c88088/examples/aiohttp/starwars/app.py#L20-L28
13,644
davidmogar/cucco
cucco/config.py
Config._load_from_file
def _load_from_file(path): """Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file. """ config = [] try: with open(path, 'r') as config_file: config = yaml.load(config_file)['normalizations'] except EnvironmentError as e: raise ConfigError('Problem while loading file: %s' % e.args[1] if len(e.args) > 1 else e) except (TypeError, KeyError) as e: raise ConfigError('Config file has an unexpected structure: %s' % e) except yaml.YAMLError: raise ConfigError('Invalid YAML file syntax') return config
python
def _load_from_file(path): """Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file. """ config = [] try: with open(path, 'r') as config_file: config = yaml.load(config_file)['normalizations'] except EnvironmentError as e: raise ConfigError('Problem while loading file: %s' % e.args[1] if len(e.args) > 1 else e) except (TypeError, KeyError) as e: raise ConfigError('Config file has an unexpected structure: %s' % e) except yaml.YAMLError: raise ConfigError('Invalid YAML file syntax') return config
[ "def", "_load_from_file", "(", "path", ")", ":", "config", "=", "[", "]", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "config_file", ":", "config", "=", "yaml", ".", "load", "(", "config_file", ")", "[", "'normalizations'", "]", "except", "EnvironmentError", "as", "e", ":", "raise", "ConfigError", "(", "'Problem while loading file: %s'", "%", "e", ".", "args", "[", "1", "]", "if", "len", "(", "e", ".", "args", ")", ">", "1", "else", "e", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "raise", "ConfigError", "(", "'Config file has an unexpected structure: %s'", "%", "e", ")", "except", "yaml", ".", "YAMLError", ":", "raise", "ConfigError", "(", "'Invalid YAML file syntax'", ")", "return", "config" ]
Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file.
[ "Load", "a", "config", "file", "from", "the", "given", "path", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L53-L76
13,645
davidmogar/cucco
cucco/config.py
Config._parse_normalization
def _parse_normalization(normalization): """Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization. """ parsed_normalization = None if isinstance(normalization, dict): if len(normalization.keys()) == 1: items = list(normalization.items())[0] if len(items) == 2: # Two elements tuple # Convert to string if no normalization options if items[1] and isinstance(items[1], dict): parsed_normalization = items else: parsed_normalization = items[0] elif isinstance(normalization, STR_TYPE): parsed_normalization = normalization return parsed_normalization
python
def _parse_normalization(normalization): """Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization. """ parsed_normalization = None if isinstance(normalization, dict): if len(normalization.keys()) == 1: items = list(normalization.items())[0] if len(items) == 2: # Two elements tuple # Convert to string if no normalization options if items[1] and isinstance(items[1], dict): parsed_normalization = items else: parsed_normalization = items[0] elif isinstance(normalization, STR_TYPE): parsed_normalization = normalization return parsed_normalization
[ "def", "_parse_normalization", "(", "normalization", ")", ":", "parsed_normalization", "=", "None", "if", "isinstance", "(", "normalization", ",", "dict", ")", ":", "if", "len", "(", "normalization", ".", "keys", "(", ")", ")", "==", "1", ":", "items", "=", "list", "(", "normalization", ".", "items", "(", ")", ")", "[", "0", "]", "if", "len", "(", "items", ")", "==", "2", ":", "# Two elements tuple", "# Convert to string if no normalization options", "if", "items", "[", "1", "]", "and", "isinstance", "(", "items", "[", "1", "]", ",", "dict", ")", ":", "parsed_normalization", "=", "items", "else", ":", "parsed_normalization", "=", "items", "[", "0", "]", "elif", "isinstance", "(", "normalization", ",", "STR_TYPE", ")", ":", "parsed_normalization", "=", "normalization", "return", "parsed_normalization" ]
Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization.
[ "Parse", "a", "normalization", "item", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L79-L105
13,646
davidmogar/cucco
cucco/config.py
Config._parse_normalizations
def _parse_normalizations(self, normalizations): """Returns a list of parsed normalizations. Iterates over a list of normalizations, removing those not correctly defined. It also transform complex items to have a common format (list of tuples and strings). Args: normalizations: List of normalizations to parse. Returns: A list of normalizations after being parsed and curated. """ parsed_normalizations = [] if isinstance(normalizations, list): for item in normalizations: normalization = self._parse_normalization(item) if normalization: parsed_normalizations.append(normalization) else: raise ConfigError('List expected. Found %s' % type(normalizations)) return parsed_normalizations
python
def _parse_normalizations(self, normalizations): """Returns a list of parsed normalizations. Iterates over a list of normalizations, removing those not correctly defined. It also transform complex items to have a common format (list of tuples and strings). Args: normalizations: List of normalizations to parse. Returns: A list of normalizations after being parsed and curated. """ parsed_normalizations = [] if isinstance(normalizations, list): for item in normalizations: normalization = self._parse_normalization(item) if normalization: parsed_normalizations.append(normalization) else: raise ConfigError('List expected. Found %s' % type(normalizations)) return parsed_normalizations
[ "def", "_parse_normalizations", "(", "self", ",", "normalizations", ")", ":", "parsed_normalizations", "=", "[", "]", "if", "isinstance", "(", "normalizations", ",", "list", ")", ":", "for", "item", "in", "normalizations", ":", "normalization", "=", "self", ".", "_parse_normalization", "(", "item", ")", "if", "normalization", ":", "parsed_normalizations", ".", "append", "(", "normalization", ")", "else", ":", "raise", "ConfigError", "(", "'List expected. Found %s'", "%", "type", "(", "normalizations", ")", ")", "return", "parsed_normalizations" ]
Returns a list of parsed normalizations. Iterates over a list of normalizations, removing those not correctly defined. It also transform complex items to have a common format (list of tuples and strings). Args: normalizations: List of normalizations to parse. Returns: A list of normalizations after being parsed and curated.
[ "Returns", "a", "list", "of", "parsed", "normalizations", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L107-L130
13,647
davidmogar/cucco
cucco/logging.py
initialize_logger
def initialize_logger(debug): """Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used. """ level = logging.DEBUG if debug else logging.INFO logger = logging.getLogger('cucco') logger.setLevel(level) formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
python
def initialize_logger(debug): """Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used. """ level = logging.DEBUG if debug else logging.INFO logger = logging.getLogger('cucco') logger.setLevel(level) formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
[ "def", "initialize_logger", "(", "debug", ")", ":", "level", "=", "logging", ".", "DEBUG", "if", "debug", "else", "logging", ".", "INFO", "logger", "=", "logging", ".", "getLogger", "(", "'cucco'", ")", "logger", ".", "setLevel", "(", "level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname).1s %(message)s'", ")", "console_handler", "=", "logging", ".", "StreamHandler", "(", ")", "console_handler", ".", "setLevel", "(", "level", ")", "console_handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "console_handler", ")", "return", "logger" ]
Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used.
[ "Set", "up", "logger", "to", "be", "used", "by", "the", "library", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/logging.py#L5-L23
13,648
davidmogar/cucco
cucco/cli.py
batch
def batch(ctx, path, recursive, watch): """ Normalize files in a path. Apply normalizations over all files found in a given path. The normalizations applied will be those defined in the config file. If no config is specified, the default normalizations will be used. """ batch = Batch(ctx.obj['config'], ctx.obj['cucco']) if os.path.exists(path): if watch: batch.watch(path, recursive) elif os.path.isfile(path): batch.process_file(path) else: batch.process_files(path, recursive) else: click.echo('Error: Specified path doesn\'t exists', err=True) sys.exit(-1)
python
def batch(ctx, path, recursive, watch): """ Normalize files in a path. Apply normalizations over all files found in a given path. The normalizations applied will be those defined in the config file. If no config is specified, the default normalizations will be used. """ batch = Batch(ctx.obj['config'], ctx.obj['cucco']) if os.path.exists(path): if watch: batch.watch(path, recursive) elif os.path.isfile(path): batch.process_file(path) else: batch.process_files(path, recursive) else: click.echo('Error: Specified path doesn\'t exists', err=True) sys.exit(-1)
[ "def", "batch", "(", "ctx", ",", "path", ",", "recursive", ",", "watch", ")", ":", "batch", "=", "Batch", "(", "ctx", ".", "obj", "[", "'config'", "]", ",", "ctx", ".", "obj", "[", "'cucco'", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "if", "watch", ":", "batch", ".", "watch", "(", "path", ",", "recursive", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "batch", ".", "process_file", "(", "path", ")", "else", ":", "batch", ".", "process_files", "(", "path", ",", "recursive", ")", "else", ":", "click", ".", "echo", "(", "'Error: Specified path doesn\\'t exists'", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "-", "1", ")" ]
Normalize files in a path. Apply normalizations over all files found in a given path. The normalizations applied will be those defined in the config file. If no config is specified, the default normalizations will be used.
[ "Normalize", "files", "in", "a", "path", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L23-L43
13,649
davidmogar/cucco
cucco/cli.py
normalize
def normalize(ctx, text): """ Normalize text or piped input. Normalize text passed as an argument to this command using the specified config (default values if --config option is not used). Pipes can be used along this command to process the output of another cli. This is the default behaviour when no text is defined. """ if text: click.echo(ctx.obj['cucco'].normalize(text)) else: for line in sys.stdin: click.echo(ctx.obj['cucco'].normalize(line))
python
def normalize(ctx, text): """ Normalize text or piped input. Normalize text passed as an argument to this command using the specified config (default values if --config option is not used). Pipes can be used along this command to process the output of another cli. This is the default behaviour when no text is defined. """ if text: click.echo(ctx.obj['cucco'].normalize(text)) else: for line in sys.stdin: click.echo(ctx.obj['cucco'].normalize(line))
[ "def", "normalize", "(", "ctx", ",", "text", ")", ":", "if", "text", ":", "click", ".", "echo", "(", "ctx", ".", "obj", "[", "'cucco'", "]", ".", "normalize", "(", "text", ")", ")", "else", ":", "for", "line", "in", "sys", ".", "stdin", ":", "click", ".", "echo", "(", "ctx", ".", "obj", "[", "'cucco'", "]", ".", "normalize", "(", "line", ")", ")" ]
Normalize text or piped input. Normalize text passed as an argument to this command using the specified config (default values if --config option is not used). Pipes can be used along this command to process the output of another cli. This is the default behaviour when no text is defined.
[ "Normalize", "text", "or", "piped", "input", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L48-L64
13,650
davidmogar/cucco
cucco/cli.py
cli
def cli(ctx, config, debug, language, verbose): """ Cucco allows to apply normalizations to a given text or file. This normalizations include, among others, removal of accent marks, stop words an extra white spaces, replacement of punctuation symbols, emails, emojis, etc. For more info on how to use and configure Cucco, check the project website at https://cucco.io. """ ctx.obj = {} try: ctx.obj['config'] = Config(normalizations=config, language=language, debug=debug, verbose=verbose) except ConfigError as e: click.echo(e.message) sys.exit(-1) ctx.obj['cucco'] = Cucco(ctx.obj['config'])
python
def cli(ctx, config, debug, language, verbose): """ Cucco allows to apply normalizations to a given text or file. This normalizations include, among others, removal of accent marks, stop words an extra white spaces, replacement of punctuation symbols, emails, emojis, etc. For more info on how to use and configure Cucco, check the project website at https://cucco.io. """ ctx.obj = {} try: ctx.obj['config'] = Config(normalizations=config, language=language, debug=debug, verbose=verbose) except ConfigError as e: click.echo(e.message) sys.exit(-1) ctx.obj['cucco'] = Cucco(ctx.obj['config'])
[ "def", "cli", "(", "ctx", ",", "config", ",", "debug", ",", "language", ",", "verbose", ")", ":", "ctx", ".", "obj", "=", "{", "}", "try", ":", "ctx", ".", "obj", "[", "'config'", "]", "=", "Config", "(", "normalizations", "=", "config", ",", "language", "=", "language", ",", "debug", "=", "debug", ",", "verbose", "=", "verbose", ")", "except", "ConfigError", "as", "e", ":", "click", ".", "echo", "(", "e", ".", "message", ")", "sys", ".", "exit", "(", "-", "1", ")", "ctx", ".", "obj", "[", "'cucco'", "]", "=", "Cucco", "(", "ctx", ".", "obj", "[", "'config'", "]", ")" ]
Cucco allows to apply normalizations to a given text or file. This normalizations include, among others, removal of accent marks, stop words an extra white spaces, replacement of punctuation symbols, emails, emojis, etc. For more info on how to use and configure Cucco, check the project website at https://cucco.io.
[ "Cucco", "allows", "to", "apply", "normalizations", "to", "a", "given", "text", "or", "file", ".", "This", "normalizations", "include", "among", "others", "removal", "of", "accent", "marks", "stop", "words", "an", "extra", "white", "spaces", "replacement", "of", "punctuation", "symbols", "emails", "emojis", "etc", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cli.py#L77-L98
13,651
davidmogar/cucco
cucco/batch.py
files_generator
def files_generator(path, recursive): """Yield files found in a given path. Walk over a given path finding and yielding all files found on it. This can be done only on the root directory or recursively. Args: path: Path to the directory. recursive: Whether to find files recursively or not. Yields: A tuple for each file in the given path containing the path and the name of the file. """ if recursive: for (path, _, files) in os.walk(path): for file in files: if not file.endswith(BATCH_EXTENSION): yield (path, file) else: for file in os.listdir(path): if (os.path.isfile(os.path.join(path, file)) and not file.endswith(BATCH_EXTENSION)): yield (path, file)
python
def files_generator(path, recursive): """Yield files found in a given path. Walk over a given path finding and yielding all files found on it. This can be done only on the root directory or recursively. Args: path: Path to the directory. recursive: Whether to find files recursively or not. Yields: A tuple for each file in the given path containing the path and the name of the file. """ if recursive: for (path, _, files) in os.walk(path): for file in files: if not file.endswith(BATCH_EXTENSION): yield (path, file) else: for file in os.listdir(path): if (os.path.isfile(os.path.join(path, file)) and not file.endswith(BATCH_EXTENSION)): yield (path, file)
[ "def", "files_generator", "(", "path", ",", "recursive", ")", ":", "if", "recursive", ":", "for", "(", "path", ",", "_", ",", "files", ")", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "file", "in", "files", ":", "if", "not", "file", ".", "endswith", "(", "BATCH_EXTENSION", ")", ":", "yield", "(", "path", ",", "file", ")", "else", ":", "for", "file", "in", "os", ".", "listdir", "(", "path", ")", ":", "if", "(", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file", ")", ")", "and", "not", "file", ".", "endswith", "(", "BATCH_EXTENSION", ")", ")", ":", "yield", "(", "path", ",", "file", ")" ]
Yield files found in a given path. Walk over a given path finding and yielding all files found on it. This can be done only on the root directory or recursively. Args: path: Path to the directory. recursive: Whether to find files recursively or not. Yields: A tuple for each file in the given path containing the path and the name of the file.
[ "Yield", "files", "found", "in", "a", "given", "path", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L11-L35
13,652
davidmogar/cucco
cucco/batch.py
Batch.process_file
def process_file(self, path): """Process a file applying normalizations. Get a file as input and generate a new file with the result of applying normalizations to every single line in the original file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the file. """ if self._config.verbose: self._logger.info('Processing file "%s"', path) output_path = '%s%s' % (path, BATCH_EXTENSION) with open(output_path, 'w') as file: for line in lines_generator(path): file.write('%s\n' % self._cucco.normalize( line.encode().decode('utf-8'))) self._logger.debug('Created file "%s"', output_path)
python
def process_file(self, path): """Process a file applying normalizations. Get a file as input and generate a new file with the result of applying normalizations to every single line in the original file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the file. """ if self._config.verbose: self._logger.info('Processing file "%s"', path) output_path = '%s%s' % (path, BATCH_EXTENSION) with open(output_path, 'w') as file: for line in lines_generator(path): file.write('%s\n' % self._cucco.normalize( line.encode().decode('utf-8'))) self._logger.debug('Created file "%s"', output_path)
[ "def", "process_file", "(", "self", ",", "path", ")", ":", "if", "self", ".", "_config", ".", "verbose", ":", "self", ".", "_logger", ".", "info", "(", "'Processing file \"%s\"'", ",", "path", ")", "output_path", "=", "'%s%s'", "%", "(", "path", ",", "BATCH_EXTENSION", ")", "with", "open", "(", "output_path", ",", "'w'", ")", "as", "file", ":", "for", "line", "in", "lines_generator", "(", "path", ")", ":", "file", ".", "write", "(", "'%s\\n'", "%", "self", ".", "_cucco", ".", "normalize", "(", "line", ".", "encode", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", ")", "self", ".", "_logger", ".", "debug", "(", "'Created file \"%s\"'", ",", "output_path", ")" ]
Process a file applying normalizations. Get a file as input and generate a new file with the result of applying normalizations to every single line in the original file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the file.
[ "Process", "a", "file", "applying", "normalizations", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L76-L97
13,653
davidmogar/cucco
cucco/batch.py
Batch.process_files
def process_files(self, path, recursive=False): """Apply normalizations over all files in the given directory. Iterate over all files in a given directory. Normalizations will be applied to each file, storing the result in a new file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the directory. recursive: Whether to find files recursively or not. """ self._logger.info('Processing files in "%s"', path) for (path, file) in files_generator(path, recursive): if not file.endswith(BATCH_EXTENSION): self.process_file(os.path.join(path, file))
python
def process_files(self, path, recursive=False): """Apply normalizations over all files in the given directory. Iterate over all files in a given directory. Normalizations will be applied to each file, storing the result in a new file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the directory. recursive: Whether to find files recursively or not. """ self._logger.info('Processing files in "%s"', path) for (path, file) in files_generator(path, recursive): if not file.endswith(BATCH_EXTENSION): self.process_file(os.path.join(path, file))
[ "def", "process_files", "(", "self", ",", "path", ",", "recursive", "=", "False", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Processing files in \"%s\"'", ",", "path", ")", "for", "(", "path", ",", "file", ")", "in", "files_generator", "(", "path", ",", "recursive", ")", ":", "if", "not", "file", ".", "endswith", "(", "BATCH_EXTENSION", ")", ":", "self", ".", "process_file", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file", ")", ")" ]
Apply normalizations over all files in the given directory. Iterate over all files in a given directory. Normalizations will be applied to each file, storing the result in a new file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the directory. recursive: Whether to find files recursively or not.
[ "Apply", "normalizations", "over", "all", "files", "in", "the", "given", "directory", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L99-L115
13,654
davidmogar/cucco
cucco/batch.py
Batch.stop_watching
def stop_watching(self): """Stop watching for files. Stop the observer started by watch function and finish thread life. """ self._watch = False if self._observer: self._logger.info('Stopping watcher') self._observer.stop() self._logger.info('Watcher stopped')
python
def stop_watching(self): """Stop watching for files. Stop the observer started by watch function and finish thread life. """ self._watch = False if self._observer: self._logger.info('Stopping watcher') self._observer.stop() self._logger.info('Watcher stopped')
[ "def", "stop_watching", "(", "self", ")", ":", "self", ".", "_watch", "=", "False", "if", "self", ".", "_observer", ":", "self", ".", "_logger", ".", "info", "(", "'Stopping watcher'", ")", "self", ".", "_observer", ".", "stop", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Watcher stopped'", ")" ]
Stop watching for files. Stop the observer started by watch function and finish thread life.
[ "Stop", "watching", "for", "files", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L117-L128
13,655
davidmogar/cucco
cucco/batch.py
Batch.watch
def watch(self, path, recursive=False): """Watch for files in a directory and apply normalizations. Watch for new or changed files in a directory and apply normalizations over them. Args: path: Path to the directory. recursive: Whether to find files recursively or not. """ self._logger.info('Initializing watcher for path "%s"', path) handler = FileHandler(self) self._observer = Observer() self._observer.schedule(handler, path, recursive) self._logger.info('Starting watcher') self._observer.start() self._watch = True try: self._logger.info('Waiting for file events') while self._watch: time.sleep(1) except KeyboardInterrupt: # pragma: no cover self.stop_watching() self._observer.join()
python
def watch(self, path, recursive=False): """Watch for files in a directory and apply normalizations. Watch for new or changed files in a directory and apply normalizations over them. Args: path: Path to the directory. recursive: Whether to find files recursively or not. """ self._logger.info('Initializing watcher for path "%s"', path) handler = FileHandler(self) self._observer = Observer() self._observer.schedule(handler, path, recursive) self._logger.info('Starting watcher') self._observer.start() self._watch = True try: self._logger.info('Waiting for file events') while self._watch: time.sleep(1) except KeyboardInterrupt: # pragma: no cover self.stop_watching() self._observer.join()
[ "def", "watch", "(", "self", ",", "path", ",", "recursive", "=", "False", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Initializing watcher for path \"%s\"'", ",", "path", ")", "handler", "=", "FileHandler", "(", "self", ")", "self", ".", "_observer", "=", "Observer", "(", ")", "self", ".", "_observer", ".", "schedule", "(", "handler", ",", "path", ",", "recursive", ")", "self", ".", "_logger", ".", "info", "(", "'Starting watcher'", ")", "self", ".", "_observer", ".", "start", "(", ")", "self", ".", "_watch", "=", "True", "try", ":", "self", ".", "_logger", ".", "info", "(", "'Waiting for file events'", ")", "while", "self", ".", "_watch", ":", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "# pragma: no cover", "self", ".", "stop_watching", "(", ")", "self", ".", "_observer", ".", "join", "(", ")" ]
Watch for files in a directory and apply normalizations. Watch for new or changed files in a directory and apply normalizations over them. Args: path: Path to the directory. recursive: Whether to find files recursively or not.
[ "Watch", "for", "files", "in", "a", "directory", "and", "apply", "normalizations", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L130-L157
13,656
davidmogar/cucco
cucco/batch.py
FileHandler._process_event
def _process_event(self, event): """Process received events. Process events received, applying normalization for those events referencing a new or changed file and only if it's not the result of a previous normalization. Args: event: Event to process. """ if (not event.is_directory and not event.src_path.endswith(BATCH_EXTENSION)): self._logger.info('Detected file change: %s', event.src_path) self._batch.process_file(event.src_path)
python
def _process_event(self, event): """Process received events. Process events received, applying normalization for those events referencing a new or changed file and only if it's not the result of a previous normalization. Args: event: Event to process. """ if (not event.is_directory and not event.src_path.endswith(BATCH_EXTENSION)): self._logger.info('Detected file change: %s', event.src_path) self._batch.process_file(event.src_path)
[ "def", "_process_event", "(", "self", ",", "event", ")", ":", "if", "(", "not", "event", ".", "is_directory", "and", "not", "event", ".", "src_path", ".", "endswith", "(", "BATCH_EXTENSION", ")", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Detected file change: %s'", ",", "event", ".", "src_path", ")", "self", ".", "_batch", ".", "process_file", "(", "event", ".", "src_path", ")" ]
Process received events. Process events received, applying normalization for those events referencing a new or changed file and only if it's not the result of a previous normalization. Args: event: Event to process.
[ "Process", "received", "events", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L175-L188
13,657
davidmogar/cucco
cucco/batch.py
FileHandler.on_created
def on_created(self, event): """Function called everytime a new file is created. Args: event: Event to process. """ self._logger.debug('Detected create event on watched path: %s', event.src_path) self._process_event(event)
python
def on_created(self, event): """Function called everytime a new file is created. Args: event: Event to process. """ self._logger.debug('Detected create event on watched path: %s', event.src_path) self._process_event(event)
[ "def", "on_created", "(", "self", ",", "event", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Detected create event on watched path: %s'", ",", "event", ".", "src_path", ")", "self", ".", "_process_event", "(", "event", ")" ]
Function called everytime a new file is created. Args: event: Event to process.
[ "Function", "called", "everytime", "a", "new", "file", "is", "created", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L190-L198
13,658
davidmogar/cucco
cucco/batch.py
FileHandler.on_modified
def on_modified(self, event): """Function called everytime a new file is modified. Args: event: Event to process. """ self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
python
def on_modified(self, event): """Function called everytime a new file is modified. Args: event: Event to process. """ self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
[ "def", "on_modified", "(", "self", ",", "event", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Detected modify event on watched path: %s'", ",", "event", ".", "src_path", ")", "self", ".", "_process_event", "(", "event", ")" ]
Function called everytime a new file is modified. Args: event: Event to process.
[ "Function", "called", "everytime", "a", "new", "file", "is", "modified", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L200-L208
13,659
davidmogar/cucco
cucco/cucco.py
Cucco._parse_normalizations
def _parse_normalizations(normalizations): """Parse and yield normalizations. Parse normalizations parameter that yield all normalizations and arguments found on it. Args: normalizations: List of normalizations. Yields: A tuple with a parsed normalization. The first item will contain the normalization name and the second will be a dict with the arguments to be used for the normalization. """ str_type = str if sys.version_info[0] > 2 else (str, unicode) for normalization in normalizations: yield (normalization, {}) if isinstance(normalization, str_type) else normalization
python
def _parse_normalizations(normalizations): """Parse and yield normalizations. Parse normalizations parameter that yield all normalizations and arguments found on it. Args: normalizations: List of normalizations. Yields: A tuple with a parsed normalization. The first item will contain the normalization name and the second will be a dict with the arguments to be used for the normalization. """ str_type = str if sys.version_info[0] > 2 else (str, unicode) for normalization in normalizations: yield (normalization, {}) if isinstance(normalization, str_type) else normalization
[ "def", "_parse_normalizations", "(", "normalizations", ")", ":", "str_type", "=", "str", "if", "sys", ".", "version_info", "[", "0", "]", ">", "2", "else", "(", "str", ",", "unicode", ")", "for", "normalization", "in", "normalizations", ":", "yield", "(", "normalization", ",", "{", "}", ")", "if", "isinstance", "(", "normalization", ",", "str_type", ")", "else", "normalization" ]
Parse and yield normalizations. Parse normalizations parameter that yield all normalizations and arguments found on it. Args: normalizations: List of normalizations. Yields: A tuple with a parsed normalization. The first item will contain the normalization name and the second will be a dict with the arguments to be used for the normalization.
[ "Parse", "and", "yield", "normalizations", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L65-L82
13,660
davidmogar/cucco
cucco/cucco.py
Cucco._parse_stop_words_file
def _parse_stop_words_file(self, path): """Load stop words from the given path. Parse the stop words file, saving each word found in it in a set for the language of the file. This language is obtained from the file name. If the file doesn't exist, the method will have no effect. Args: path: Path to the stop words file. Returns: A boolean indicating whether the file was loaded. """ language = None loaded = False if os.path.isfile(path): self._logger.debug('Loading stop words in %s', path) language = path.split('-')[-1] if not language in self.__stop_words: self.__stop_words[language] = set() with codecs.open(path, 'r', 'UTF-8') as file: loaded = True for word in file: self.__stop_words[language].add(word.strip()) return loaded
python
def _parse_stop_words_file(self, path): """Load stop words from the given path. Parse the stop words file, saving each word found in it in a set for the language of the file. This language is obtained from the file name. If the file doesn't exist, the method will have no effect. Args: path: Path to the stop words file. Returns: A boolean indicating whether the file was loaded. """ language = None loaded = False if os.path.isfile(path): self._logger.debug('Loading stop words in %s', path) language = path.split('-')[-1] if not language in self.__stop_words: self.__stop_words[language] = set() with codecs.open(path, 'r', 'UTF-8') as file: loaded = True for word in file: self.__stop_words[language].add(word.strip()) return loaded
[ "def", "_parse_stop_words_file", "(", "self", ",", "path", ")", ":", "language", "=", "None", "loaded", "=", "False", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Loading stop words in %s'", ",", "path", ")", "language", "=", "path", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "if", "not", "language", "in", "self", ".", "__stop_words", ":", "self", ".", "__stop_words", "[", "language", "]", "=", "set", "(", ")", "with", "codecs", ".", "open", "(", "path", ",", "'r'", ",", "'UTF-8'", ")", "as", "file", ":", "loaded", "=", "True", "for", "word", "in", "file", ":", "self", ".", "__stop_words", "[", "language", "]", ".", "add", "(", "word", ".", "strip", "(", ")", ")", "return", "loaded" ]
Load stop words from the given path. Parse the stop words file, saving each word found in it in a set for the language of the file. This language is obtained from the file name. If the file doesn't exist, the method will have no effect. Args: path: Path to the stop words file. Returns: A boolean indicating whether the file was loaded.
[ "Load", "stop", "words", "from", "the", "given", "path", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L84-L114
13,661
davidmogar/cucco
cucco/cucco.py
Cucco.normalize
def normalize(self, text, normalizations=None): """Normalize a given text applying all normalizations. Normalizations to apply can be specified through a list of parameters and will be executed in that order. Args: text: The text to be processed. normalizations: List of normalizations to apply. Returns: The text normalized. """ for normalization, kwargs in self._parse_normalizations( normalizations or self._config.normalizations): try: text = getattr(self, normalization)(text, **kwargs) except AttributeError as e: self._logger.debug('Invalid normalization: %s', e) return text
python
def normalize(self, text, normalizations=None): """Normalize a given text applying all normalizations. Normalizations to apply can be specified through a list of parameters and will be executed in that order. Args: text: The text to be processed. normalizations: List of normalizations to apply. Returns: The text normalized. """ for normalization, kwargs in self._parse_normalizations( normalizations or self._config.normalizations): try: text = getattr(self, normalization)(text, **kwargs) except AttributeError as e: self._logger.debug('Invalid normalization: %s', e) return text
[ "def", "normalize", "(", "self", ",", "text", ",", "normalizations", "=", "None", ")", ":", "for", "normalization", ",", "kwargs", "in", "self", ".", "_parse_normalizations", "(", "normalizations", "or", "self", ".", "_config", ".", "normalizations", ")", ":", "try", ":", "text", "=", "getattr", "(", "self", ",", "normalization", ")", "(", "text", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "e", ":", "self", ".", "_logger", ".", "debug", "(", "'Invalid normalization: %s'", ",", "e", ")", "return", "text" ]
Normalize a given text applying all normalizations. Normalizations to apply can be specified through a list of parameters and will be executed in that order. Args: text: The text to be processed. normalizations: List of normalizations to apply. Returns: The text normalized.
[ "Normalize", "a", "given", "text", "applying", "all", "normalizations", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L116-L137
13,662
davidmogar/cucco
cucco/cucco.py
Cucco.remove_accent_marks
def remove_accent_marks(text, excluded=None): """Remove accent marks from input text. This function removes accent marks in the text, but leaves unicode characters defined in the 'excluded' parameter. Args: text: The text to be processed. excluded: Set of unicode characters to exclude. Returns: The text without accent marks. """ if excluded is None: excluded = set() return unicodedata.normalize( 'NFKC', ''.join( c for c in unicodedata.normalize( 'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
python
def remove_accent_marks(text, excluded=None): """Remove accent marks from input text. This function removes accent marks in the text, but leaves unicode characters defined in the 'excluded' parameter. Args: text: The text to be processed. excluded: Set of unicode characters to exclude. Returns: The text without accent marks. """ if excluded is None: excluded = set() return unicodedata.normalize( 'NFKC', ''.join( c for c in unicodedata.normalize( 'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
[ "def", "remove_accent_marks", "(", "text", ",", "excluded", "=", "None", ")", ":", "if", "excluded", "is", "None", ":", "excluded", "=", "set", "(", ")", "return", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "''", ".", "join", "(", "c", "for", "c", "in", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "text", ")", "if", "unicodedata", ".", "category", "(", "c", ")", "!=", "'Mn'", "or", "c", "in", "excluded", ")", ")" ]
Remove accent marks from input text. This function removes accent marks in the text, but leaves unicode characters defined in the 'excluded' parameter. Args: text: The text to be processed. excluded: Set of unicode characters to exclude. Returns: The text without accent marks.
[ "Remove", "accent", "marks", "from", "input", "text", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L140-L159
13,663
davidmogar/cucco
cucco/cucco.py
Cucco.replace_characters
def replace_characters(self, text, characters, replacement=''): """Remove characters from text. Removes custom characters from input text or replaces them with a string if specified. Args: text: The text to be processed. characters: Characters that will be replaced. replacement: New text that will replace the custom characters. Returns: The text without the given characters. """ if not characters: return text characters = ''.join(sorted(characters)) if characters in self._characters_regexes: characters_regex = self._characters_regexes[characters] else: characters_regex = re.compile("[%s]" % re.escape(characters)) self._characters_regexes[characters] = characters_regex return characters_regex.sub(replacement, text)
python
def replace_characters(self, text, characters, replacement=''): """Remove characters from text. Removes custom characters from input text or replaces them with a string if specified. Args: text: The text to be processed. characters: Characters that will be replaced. replacement: New text that will replace the custom characters. Returns: The text without the given characters. """ if not characters: return text characters = ''.join(sorted(characters)) if characters in self._characters_regexes: characters_regex = self._characters_regexes[characters] else: characters_regex = re.compile("[%s]" % re.escape(characters)) self._characters_regexes[characters] = characters_regex return characters_regex.sub(replacement, text)
[ "def", "replace_characters", "(", "self", ",", "text", ",", "characters", ",", "replacement", "=", "''", ")", ":", "if", "not", "characters", ":", "return", "text", "characters", "=", "''", ".", "join", "(", "sorted", "(", "characters", ")", ")", "if", "characters", "in", "self", ".", "_characters_regexes", ":", "characters_regex", "=", "self", ".", "_characters_regexes", "[", "characters", "]", "else", ":", "characters_regex", "=", "re", ".", "compile", "(", "\"[%s]\"", "%", "re", ".", "escape", "(", "characters", ")", ")", "self", ".", "_characters_regexes", "[", "characters", "]", "=", "characters_regex", "return", "characters_regex", ".", "sub", "(", "replacement", ",", "text", ")" ]
Remove characters from text. Removes custom characters from input text or replaces them with a string if specified. Args: text: The text to be processed. characters: Characters that will be replaced. replacement: New text that will replace the custom characters. Returns: The text without the given characters.
[ "Remove", "characters", "from", "text", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L202-L226
13,664
davidmogar/cucco
cucco/cucco.py
Cucco.replace_punctuation
def replace_punctuation(self, text, excluded=None, replacement=''): """Replace punctuation symbols in text. Removes punctuation from input text or replaces them with a string if specified. Characters replaced will be those in string.punctuation. Args: text: The text to be processed. excluded: Set of characters to exclude. replacement: New text that will replace punctuation. Returns: The text without punctuation. """ if excluded is None: excluded = set() elif not isinstance(excluded, set): excluded = set(excluded) punct = ''.join(self.__punctuation.difference(excluded)) return self.replace_characters( text, characters=punct, replacement=replacement)
python
def replace_punctuation(self, text, excluded=None, replacement=''): """Replace punctuation symbols in text. Removes punctuation from input text or replaces them with a string if specified. Characters replaced will be those in string.punctuation. Args: text: The text to be processed. excluded: Set of characters to exclude. replacement: New text that will replace punctuation. Returns: The text without punctuation. """ if excluded is None: excluded = set() elif not isinstance(excluded, set): excluded = set(excluded) punct = ''.join(self.__punctuation.difference(excluded)) return self.replace_characters( text, characters=punct, replacement=replacement)
[ "def", "replace_punctuation", "(", "self", ",", "text", ",", "excluded", "=", "None", ",", "replacement", "=", "''", ")", ":", "if", "excluded", "is", "None", ":", "excluded", "=", "set", "(", ")", "elif", "not", "isinstance", "(", "excluded", ",", "set", ")", ":", "excluded", "=", "set", "(", "excluded", ")", "punct", "=", "''", ".", "join", "(", "self", ".", "__punctuation", ".", "difference", "(", "excluded", ")", ")", "return", "self", ".", "replace_characters", "(", "text", ",", "characters", "=", "punct", ",", "replacement", "=", "replacement", ")" ]
Replace punctuation symbols in text. Removes punctuation from input text or replaces them with a string if specified. Characters replaced will be those in string.punctuation. Args: text: The text to be processed. excluded: Set of characters to exclude. replacement: New text that will replace punctuation. Returns: The text without punctuation.
[ "Replace", "punctuation", "symbols", "in", "text", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L276-L298
13,665
davidmogar/cucco
cucco/cucco.py
Cucco.replace_symbols
def replace_symbols( text, form='NFKD', excluded=None, replacement=''): """Replace symbols in text. Removes symbols from input text or replaces them with a string if specified. Args: text: The text to be processed. form: Unicode form. excluded: Set of unicode characters to exclude. replacement: New text that will replace symbols. Returns: The text without symbols. """ if excluded is None: excluded = set() categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So']) return ''.join(c if unicodedata.category(c) not in categories or c in excluded else replacement for c in unicodedata.normalize(form, text))
python
def replace_symbols( text, form='NFKD', excluded=None, replacement=''): """Replace symbols in text. Removes symbols from input text or replaces them with a string if specified. Args: text: The text to be processed. form: Unicode form. excluded: Set of unicode characters to exclude. replacement: New text that will replace symbols. Returns: The text without symbols. """ if excluded is None: excluded = set() categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So']) return ''.join(c if unicodedata.category(c) not in categories or c in excluded else replacement for c in unicodedata.normalize(form, text))
[ "def", "replace_symbols", "(", "text", ",", "form", "=", "'NFKD'", ",", "excluded", "=", "None", ",", "replacement", "=", "''", ")", ":", "if", "excluded", "is", "None", ":", "excluded", "=", "set", "(", ")", "categories", "=", "set", "(", "[", "'Mn'", ",", "'Sc'", ",", "'Sk'", ",", "'Sm'", ",", "'So'", "]", ")", "return", "''", ".", "join", "(", "c", "if", "unicodedata", ".", "category", "(", "c", ")", "not", "in", "categories", "or", "c", "in", "excluded", "else", "replacement", "for", "c", "in", "unicodedata", ".", "normalize", "(", "form", ",", "text", ")", ")" ]
Replace symbols in text. Removes symbols from input text or replaces them with a string if specified. Args: text: The text to be processed. form: Unicode form. excluded: Set of unicode characters to exclude. replacement: New text that will replace symbols. Returns: The text without symbols.
[ "Replace", "symbols", "in", "text", "." ]
e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L301-L326
13,666
tmr232/Sark
sark/graph.py
get_idb_graph
def get_idb_graph(): """Export IDB to a NetworkX graph. Use xrefs to and from functions to build a DiGraph containing all the functions in the IDB and all the links between them. The graph can later be used to perform analysis on the IDB. :return: nx.DiGraph() """ digraph = nx.DiGraph() for function in functions(): for xref in itertools.chain(function.xrefs_from, function.xrefs_to): frm = _try_get_function_start(xref.frm) to = _try_get_function_start(xref.to) digraph.add_edge(frm, to) return digraph
python
def get_idb_graph(): """Export IDB to a NetworkX graph. Use xrefs to and from functions to build a DiGraph containing all the functions in the IDB and all the links between them. The graph can later be used to perform analysis on the IDB. :return: nx.DiGraph() """ digraph = nx.DiGraph() for function in functions(): for xref in itertools.chain(function.xrefs_from, function.xrefs_to): frm = _try_get_function_start(xref.frm) to = _try_get_function_start(xref.to) digraph.add_edge(frm, to) return digraph
[ "def", "get_idb_graph", "(", ")", ":", "digraph", "=", "nx", ".", "DiGraph", "(", ")", "for", "function", "in", "functions", "(", ")", ":", "for", "xref", "in", "itertools", ".", "chain", "(", "function", ".", "xrefs_from", ",", "function", ".", "xrefs_to", ")", ":", "frm", "=", "_try_get_function_start", "(", "xref", ".", "frm", ")", "to", "=", "_try_get_function_start", "(", "xref", ".", "to", ")", "digraph", ".", "add_edge", "(", "frm", ",", "to", ")", "return", "digraph" ]
Export IDB to a NetworkX graph. Use xrefs to and from functions to build a DiGraph containing all the functions in the IDB and all the links between them. The graph can later be used to perform analysis on the IDB. :return: nx.DiGraph()
[ "Export", "IDB", "to", "a", "NetworkX", "graph", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/graph.py#L48-L66
13,667
tmr232/Sark
sark/code/instruction.py
OperandType.name
def name(self): """Name of the xref type.""" return self.TYPES.get(self._type, self.TYPES[idaapi.o_idpspec0])
python
def name(self): """Name of the xref type.""" return self.TYPES.get(self._type, self.TYPES[idaapi.o_idpspec0])
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "TYPES", ".", "get", "(", "self", ".", "_type", ",", "self", ".", "TYPES", "[", "idaapi", ".", "o_idpspec0", "]", ")" ]
Name of the xref type.
[ "Name", "of", "the", "xref", "type", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/instruction.py#L138-L140
13,668
tmr232/Sark
sark/code/instruction.py
Operand.reg
def reg(self): """Name of the register used in the operand.""" if self.type.is_displ or self.type.is_phrase: size = core.get_native_size() return base.get_register_name(self.reg_id, size) if self.type.is_reg: return base.get_register_name(self.reg_id, self.size) else: raise exceptions.SarkOperandWithoutReg("Operand does not have a register.")
python
def reg(self): """Name of the register used in the operand.""" if self.type.is_displ or self.type.is_phrase: size = core.get_native_size() return base.get_register_name(self.reg_id, size) if self.type.is_reg: return base.get_register_name(self.reg_id, self.size) else: raise exceptions.SarkOperandWithoutReg("Operand does not have a register.")
[ "def", "reg", "(", "self", ")", ":", "if", "self", ".", "type", ".", "is_displ", "or", "self", ".", "type", ".", "is_phrase", ":", "size", "=", "core", ".", "get_native_size", "(", ")", "return", "base", ".", "get_register_name", "(", "self", ".", "reg_id", ",", "size", ")", "if", "self", ".", "type", ".", "is_reg", ":", "return", "base", ".", "get_register_name", "(", "self", ".", "reg_id", ",", "self", ".", "size", ")", "else", ":", "raise", "exceptions", ".", "SarkOperandWithoutReg", "(", "\"Operand does not have a register.\"", ")" ]
Name of the register used in the operand.
[ "Name", "of", "the", "register", "used", "in", "the", "operand", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/instruction.py#L270-L280
13,669
tmr232/Sark
sark/code/instruction.py
Instruction.has_reg
def has_reg(self, reg_name): """Check if a register is used in the instruction.""" return any(operand.has_reg(reg_name) for operand in self.operands)
python
def has_reg(self, reg_name): """Check if a register is used in the instruction.""" return any(operand.has_reg(reg_name) for operand in self.operands)
[ "def", "has_reg", "(", "self", ",", "reg_name", ")", ":", "return", "any", "(", "operand", ".", "has_reg", "(", "reg_name", ")", "for", "operand", "in", "self", ".", "operands", ")" ]
Check if a register is used in the instruction.
[ "Check", "if", "a", "register", "is", "used", "in", "the", "instruction", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/instruction.py#L384-L386
13,670
tmr232/Sark
sark/code/instruction.py
Instruction.regs
def regs(self): """Names of all registers used by the instruction.""" regs = set() for operand in self.operands: if not operand.type.has_reg: continue regs.update(operand.regs) return regs
python
def regs(self): """Names of all registers used by the instruction.""" regs = set() for operand in self.operands: if not operand.type.has_reg: continue regs.update(operand.regs) return regs
[ "def", "regs", "(", "self", ")", ":", "regs", "=", "set", "(", ")", "for", "operand", "in", "self", ".", "operands", ":", "if", "not", "operand", ".", "type", ".", "has_reg", ":", "continue", "regs", ".", "update", "(", "operand", ".", "regs", ")", "return", "regs" ]
Names of all registers used by the instruction.
[ "Names", "of", "all", "registers", "used", "by", "the", "instruction", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/instruction.py#L397-L404
13,671
tmr232/Sark
sark/ui.py
NXGraph._pad
def _pad(self, text): """Pad the text.""" top_bottom = ("\n" * self._padding) + " " right_left = " " * self._padding * self.PAD_WIDTH return top_bottom + right_left + text + right_left + top_bottom
python
def _pad(self, text): """Pad the text.""" top_bottom = ("\n" * self._padding) + " " right_left = " " * self._padding * self.PAD_WIDTH return top_bottom + right_left + text + right_left + top_bottom
[ "def", "_pad", "(", "self", ",", "text", ")", ":", "top_bottom", "=", "(", "\"\\n\"", "*", "self", ".", "_padding", ")", "+", "\" \"", "right_left", "=", "\" \"", "*", "self", ".", "_padding", "*", "self", ".", "PAD_WIDTH", "return", "top_bottom", "+", "right_left", "+", "text", "+", "right_left", "+", "top_bottom" ]
Pad the text.
[ "Pad", "the", "text", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/ui.py#L234-L238
13,672
tmr232/Sark
sark/ui.py
NXGraph._make_unique_title
def _make_unique_title(self, title): """Make the title unique. Adds a counter to the title to prevent duplicates. Prior to IDA 6.8, two graphs with the same title could crash IDA. This has been fixed (https://www.hex-rays.com/products/ida/6.8/index.shtml). The code will not change for support of older versions and as it is more usable this way. """ unique_title = title for counter in itertools.count(): unique_title = "{}-{}".format(title, counter) if not idaapi.find_tform(unique_title): break return unique_title
python
def _make_unique_title(self, title): """Make the title unique. Adds a counter to the title to prevent duplicates. Prior to IDA 6.8, two graphs with the same title could crash IDA. This has been fixed (https://www.hex-rays.com/products/ida/6.8/index.shtml). The code will not change for support of older versions and as it is more usable this way. """ unique_title = title for counter in itertools.count(): unique_title = "{}-{}".format(title, counter) if not idaapi.find_tform(unique_title): break return unique_title
[ "def", "_make_unique_title", "(", "self", ",", "title", ")", ":", "unique_title", "=", "title", "for", "counter", "in", "itertools", ".", "count", "(", ")", ":", "unique_title", "=", "\"{}-{}\"", ".", "format", "(", "title", ",", "counter", ")", "if", "not", "idaapi", ".", "find_tform", "(", "unique_title", ")", ":", "break", "return", "unique_title" ]
Make the title unique. Adds a counter to the title to prevent duplicates. Prior to IDA 6.8, two graphs with the same title could crash IDA. This has been fixed (https://www.hex-rays.com/products/ida/6.8/index.shtml). The code will not change for support of older versions and as it is more usable this way.
[ "Make", "the", "title", "unique", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/ui.py#L240-L257
13,673
tmr232/Sark
sark/ui.py
NXGraph._get_handler
def _get_handler(self, node_id): """Get the handler of a given node.""" handler = self._get_attrs(node_id).get(self.HANDLER, self._default_handler) # Here we make sure the handler is an instance of `BasicNodeHandler` or inherited # types. While generally being bad Python practice, we still need it here as an # invalid handler can cause IDA to crash. if not isinstance(handler, BasicNodeHandler): idaapi.msg(("Invalid handler for node {}: {}. All handlers must inherit from" "`BasicNodeHandler`.").format(node_id, handler)) handler = self._default_handler return handler
python
def _get_handler(self, node_id): """Get the handler of a given node.""" handler = self._get_attrs(node_id).get(self.HANDLER, self._default_handler) # Here we make sure the handler is an instance of `BasicNodeHandler` or inherited # types. While generally being bad Python practice, we still need it here as an # invalid handler can cause IDA to crash. if not isinstance(handler, BasicNodeHandler): idaapi.msg(("Invalid handler for node {}: {}. All handlers must inherit from" "`BasicNodeHandler`.").format(node_id, handler)) handler = self._default_handler return handler
[ "def", "_get_handler", "(", "self", ",", "node_id", ")", ":", "handler", "=", "self", ".", "_get_attrs", "(", "node_id", ")", ".", "get", "(", "self", ".", "HANDLER", ",", "self", ".", "_default_handler", ")", "# Here we make sure the handler is an instance of `BasicNodeHandler` or inherited", "# types. While generally being bad Python practice, we still need it here as an", "# invalid handler can cause IDA to crash.", "if", "not", "isinstance", "(", "handler", ",", "BasicNodeHandler", ")", ":", "idaapi", ".", "msg", "(", "(", "\"Invalid handler for node {}: {}. All handlers must inherit from\"", "\"`BasicNodeHandler`.\"", ")", ".", "format", "(", "node_id", ",", "handler", ")", ")", "handler", "=", "self", ".", "_default_handler", "return", "handler" ]
Get the handler of a given node.
[ "Get", "the", "handler", "of", "a", "given", "node", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/ui.py#L259-L270
13,674
tmr232/Sark
sark/ui.py
NXGraph._OnNodeInfo
def _OnNodeInfo(self, node_id): """Sets the node info based on its attributes.""" handler, value, attrs = self._get_handling_triplet(node_id) frame_color = handler.on_frame_color(value, attrs) node_info = idaapi.node_info_t() if frame_color is not None: node_info.frame_color = frame_color flags = node_info.get_flags_for_valid() self.SetNodeInfo(node_id, node_info, flags)
python
def _OnNodeInfo(self, node_id): """Sets the node info based on its attributes.""" handler, value, attrs = self._get_handling_triplet(node_id) frame_color = handler.on_frame_color(value, attrs) node_info = idaapi.node_info_t() if frame_color is not None: node_info.frame_color = frame_color flags = node_info.get_flags_for_valid() self.SetNodeInfo(node_id, node_info, flags)
[ "def", "_OnNodeInfo", "(", "self", ",", "node_id", ")", ":", "handler", ",", "value", ",", "attrs", "=", "self", ".", "_get_handling_triplet", "(", "node_id", ")", "frame_color", "=", "handler", ".", "on_frame_color", "(", "value", ",", "attrs", ")", "node_info", "=", "idaapi", ".", "node_info_t", "(", ")", "if", "frame_color", "is", "not", "None", ":", "node_info", ".", "frame_color", "=", "frame_color", "flags", "=", "node_info", ".", "get_flags_for_valid", "(", ")", "self", ".", "SetNodeInfo", "(", "node_id", ",", "node_info", ",", "flags", ")" ]
Sets the node info based on its attributes.
[ "Sets", "the", "node", "info", "based", "on", "its", "attributes", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/ui.py#L284-L296
13,675
tmr232/Sark
sark/data.py
get_string
def get_string(ea): """Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. """ # We get the item-head because the `GetStringType` function only works on the head of an item. string_type = idc.GetStringType(idaapi.get_item_head(ea)) if string_type is None: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) string = idc.GetString(ea, strtype=string_type) if not string: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) return string
python
def get_string(ea): """Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. """ # We get the item-head because the `GetStringType` function only works on the head of an item. string_type = idc.GetStringType(idaapi.get_item_head(ea)) if string_type is None: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) string = idc.GetString(ea, strtype=string_type) if not string: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) return string
[ "def", "get_string", "(", "ea", ")", ":", "# We get the item-head because the `GetStringType` function only works on the head of an item.", "string_type", "=", "idc", ".", "GetStringType", "(", "idaapi", ".", "get_item_head", "(", "ea", ")", ")", "if", "string_type", "is", "None", ":", "raise", "exceptions", ".", "SarkNoString", "(", "\"No string at 0x{:08X}\"", ".", "format", "(", "ea", ")", ")", "string", "=", "idc", ".", "GetString", "(", "ea", ",", "strtype", "=", "string_type", ")", "if", "not", "string", ":", "raise", "exceptions", ".", "SarkNoString", "(", "\"No string at 0x{:08X}\"", ".", "format", "(", "ea", ")", ")", "return", "string" ]
Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic.
[ "Read", "the", "string", "at", "the", "given", "ea", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/data.py#L147-L163
13,676
tmr232/Sark
plugins/quick_copy.py
copy_current_file_offset
def copy_current_file_offset(): """Get the file-offset mapped to the current address.""" start, end = sark.get_selection() try: file_offset = sark.core.get_fileregion_offset(start) clipboard.copy("0x{:08X}".format(file_offset)) except sark.exceptions.NoFileOffset: message("The current address cannot be mapped to a valid offset of the input file.")
python
def copy_current_file_offset(): """Get the file-offset mapped to the current address.""" start, end = sark.get_selection() try: file_offset = sark.core.get_fileregion_offset(start) clipboard.copy("0x{:08X}".format(file_offset)) except sark.exceptions.NoFileOffset: message("The current address cannot be mapped to a valid offset of the input file.")
[ "def", "copy_current_file_offset", "(", ")", ":", "start", ",", "end", "=", "sark", ".", "get_selection", "(", ")", "try", ":", "file_offset", "=", "sark", ".", "core", ".", "get_fileregion_offset", "(", "start", ")", "clipboard", ".", "copy", "(", "\"0x{:08X}\"", ".", "format", "(", "file_offset", ")", ")", "except", "sark", ".", "exceptions", ".", "NoFileOffset", ":", "message", "(", "\"The current address cannot be mapped to a valid offset of the input file.\"", ")" ]
Get the file-offset mapped to the current address.
[ "Get", "the", "file", "-", "offset", "mapped", "to", "the", "current", "address", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/plugins/quick_copy.py#L17-L26
13,677
tmr232/Sark
sark/core.py
fix_addresses
def fix_addresses(start=None, end=None): """Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end) """ if start in (None, idaapi.BADADDR): start = idaapi.cvar.inf.minEA if end in (None, idaapi.BADADDR): end = idaapi.cvar.inf.maxEA return start, end
python
def fix_addresses(start=None, end=None): """Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end) """ if start in (None, idaapi.BADADDR): start = idaapi.cvar.inf.minEA if end in (None, idaapi.BADADDR): end = idaapi.cvar.inf.maxEA return start, end
[ "def", "fix_addresses", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "start", "in", "(", "None", ",", "idaapi", ".", "BADADDR", ")", ":", "start", "=", "idaapi", ".", "cvar", ".", "inf", ".", "minEA", "if", "end", "in", "(", "None", ",", "idaapi", ".", "BADADDR", ")", ":", "end", "=", "idaapi", ".", "cvar", ".", "inf", ".", "maxEA", "return", "start", ",", "end" ]
Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end)
[ "Set", "missing", "addresses", "to", "start", "and", "end", "of", "IDB", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/core.py#L81-L100
13,678
tmr232/Sark
sark/core.py
set_name
def set_name(address, name, anyway=False): """Set the name of an address. Sets the name of an address in IDA. If the name already exists, check the `anyway` parameter: True - Add `_COUNTER` to the name (default IDA behaviour) False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception. Args address: The address to rename. name: The desired name. anyway: Set anyway or not. Defualt ``False``. """ success = idaapi.set_name(address, name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK) if success: return if anyway: success = idaapi.do_name_anyway(address, name) if success: return raise exceptions.SarkSetNameFailed("Failed renaming 0x{:08X} to {!r}.".format(address, name)) raise exceptions.SarkErrorNameAlreadyExists( "Can't rename 0x{:08X}. Name {!r} already exists.".format(address, name))
python
def set_name(address, name, anyway=False): """Set the name of an address. Sets the name of an address in IDA. If the name already exists, check the `anyway` parameter: True - Add `_COUNTER` to the name (default IDA behaviour) False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception. Args address: The address to rename. name: The desired name. anyway: Set anyway or not. Defualt ``False``. """ success = idaapi.set_name(address, name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK) if success: return if anyway: success = idaapi.do_name_anyway(address, name) if success: return raise exceptions.SarkSetNameFailed("Failed renaming 0x{:08X} to {!r}.".format(address, name)) raise exceptions.SarkErrorNameAlreadyExists( "Can't rename 0x{:08X}. Name {!r} already exists.".format(address, name))
[ "def", "set_name", "(", "address", ",", "name", ",", "anyway", "=", "False", ")", ":", "success", "=", "idaapi", ".", "set_name", "(", "address", ",", "name", ",", "idaapi", ".", "SN_NOWARN", "|", "idaapi", ".", "SN_NOCHECK", ")", "if", "success", ":", "return", "if", "anyway", ":", "success", "=", "idaapi", ".", "do_name_anyway", "(", "address", ",", "name", ")", "if", "success", ":", "return", "raise", "exceptions", ".", "SarkSetNameFailed", "(", "\"Failed renaming 0x{:08X} to {!r}.\"", ".", "format", "(", "address", ",", "name", ")", ")", "raise", "exceptions", ".", "SarkErrorNameAlreadyExists", "(", "\"Can't rename 0x{:08X}. Name {!r} already exists.\"", ".", "format", "(", "address", ",", "name", ")", ")" ]
Set the name of an address. Sets the name of an address in IDA. If the name already exists, check the `anyway` parameter: True - Add `_COUNTER` to the name (default IDA behaviour) False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception. Args address: The address to rename. name: The desired name. anyway: Set anyway or not. Defualt ``False``.
[ "Set", "the", "name", "of", "an", "address", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/core.py#L103-L130
13,679
tmr232/Sark
sark/core.py
is_same_function
def is_same_function(ea1, ea2): """Are both addresses in the same function?""" func1 = idaapi.get_func(ea1) func2 = idaapi.get_func(ea2) # This is bloated code. `None in (func1, func2)` will not work because of a # bug in IDAPython in the way functions are compared. if any(func is None for func in (func1, func2)): return False return func1.startEA == func2.startEA
python
def is_same_function(ea1, ea2): """Are both addresses in the same function?""" func1 = idaapi.get_func(ea1) func2 = idaapi.get_func(ea2) # This is bloated code. `None in (func1, func2)` will not work because of a # bug in IDAPython in the way functions are compared. if any(func is None for func in (func1, func2)): return False return func1.startEA == func2.startEA
[ "def", "is_same_function", "(", "ea1", ",", "ea2", ")", ":", "func1", "=", "idaapi", ".", "get_func", "(", "ea1", ")", "func2", "=", "idaapi", ".", "get_func", "(", "ea2", ")", "# This is bloated code. `None in (func1, func2)` will not work because of a", "# bug in IDAPython in the way functions are compared.", "if", "any", "(", "func", "is", "None", "for", "func", "in", "(", "func1", ",", "func2", ")", ")", ":", "return", "False", "return", "func1", ".", "startEA", "==", "func2", ".", "startEA" ]
Are both addresses in the same function?
[ "Are", "both", "addresses", "in", "the", "same", "function?" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/core.py#L133-L142
13,680
tmr232/Sark
sark/codeblock.py
get_nx_graph
def get_nx_graph(ea): """Convert an IDA flowchart to a NetworkX graph.""" nx_graph = networkx.DiGraph() func = idaapi.get_func(ea) flowchart = FlowChart(func) for block in flowchart: # Make sure all nodes are added (including edge-less nodes) nx_graph.add_node(block.startEA) for pred in block.preds(): nx_graph.add_edge(pred.startEA, block.startEA) for succ in block.succs(): nx_graph.add_edge(block.startEA, succ.startEA) return nx_graph
python
def get_nx_graph(ea): """Convert an IDA flowchart to a NetworkX graph.""" nx_graph = networkx.DiGraph() func = idaapi.get_func(ea) flowchart = FlowChart(func) for block in flowchart: # Make sure all nodes are added (including edge-less nodes) nx_graph.add_node(block.startEA) for pred in block.preds(): nx_graph.add_edge(pred.startEA, block.startEA) for succ in block.succs(): nx_graph.add_edge(block.startEA, succ.startEA) return nx_graph
[ "def", "get_nx_graph", "(", "ea", ")", ":", "nx_graph", "=", "networkx", ".", "DiGraph", "(", ")", "func", "=", "idaapi", ".", "get_func", "(", "ea", ")", "flowchart", "=", "FlowChart", "(", "func", ")", "for", "block", "in", "flowchart", ":", "# Make sure all nodes are added (including edge-less nodes)", "nx_graph", ".", "add_node", "(", "block", ".", "startEA", ")", "for", "pred", "in", "block", ".", "preds", "(", ")", ":", "nx_graph", ".", "add_edge", "(", "pred", ".", "startEA", ",", "block", ".", "startEA", ")", "for", "succ", "in", "block", ".", "succs", "(", ")", ":", "nx_graph", ".", "add_edge", "(", "block", ".", "startEA", ",", "succ", ".", "startEA", ")", "return", "nx_graph" ]
Convert an IDA flowchart to a NetworkX graph.
[ "Convert", "an", "IDA", "flowchart", "to", "a", "NetworkX", "graph", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/codeblock.py#L100-L114
13,681
tmr232/Sark
sark/codeblock.py
codeblocks
def codeblocks(start=None, end=None, full=True): """Get all `CodeBlock`s in a given range. Args: start - start address of the range. If `None` uses IDB start. end - end address of the range. If `None` uses IDB end. full - `True` is required to change node info (e.g. color). `False` causes faster iteration. """ if full: for function in functions(start, end): fc = FlowChart(f=function.func_t) for block in fc: yield block else: start, end = fix_addresses(start, end) for code_block in FlowChart(bounds=(start, end)): yield code_block
python
def codeblocks(start=None, end=None, full=True): """Get all `CodeBlock`s in a given range. Args: start - start address of the range. If `None` uses IDB start. end - end address of the range. If `None` uses IDB end. full - `True` is required to change node info (e.g. color). `False` causes faster iteration. """ if full: for function in functions(start, end): fc = FlowChart(f=function.func_t) for block in fc: yield block else: start, end = fix_addresses(start, end) for code_block in FlowChart(bounds=(start, end)): yield code_block
[ "def", "codeblocks", "(", "start", "=", "None", ",", "end", "=", "None", ",", "full", "=", "True", ")", ":", "if", "full", ":", "for", "function", "in", "functions", "(", "start", ",", "end", ")", ":", "fc", "=", "FlowChart", "(", "f", "=", "function", ".", "func_t", ")", "for", "block", "in", "fc", ":", "yield", "block", "else", ":", "start", ",", "end", "=", "fix_addresses", "(", "start", ",", "end", ")", "for", "code_block", "in", "FlowChart", "(", "bounds", "=", "(", "start", ",", "end", ")", ")", ":", "yield", "code_block" ]
Get all `CodeBlock`s in a given range. Args: start - start address of the range. If `None` uses IDB start. end - end address of the range. If `None` uses IDB end. full - `True` is required to change node info (e.g. color). `False` causes faster iteration.
[ "Get", "all", "CodeBlock", "s", "in", "a", "given", "range", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/codeblock.py#L117-L135
13,682
tmr232/Sark
sark/structure.py
struct_member_error
def struct_member_error(err, sid, name, offset, size): """Create and format a struct member exception. Args: err: The error value returned from struct member creation sid: The struct id name: The member name offset: Memeber offset size: Member size Returns: A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an informative message. """ exception, msg = STRUCT_ERROR_MAP[err] struct_name = idc.GetStrucName(sid) return exception(('AddStructMember(struct="{}", member="{}", offset={}, size={}) ' 'failed: {}').format( struct_name, name, offset, size, msg ))
python
def struct_member_error(err, sid, name, offset, size): """Create and format a struct member exception. Args: err: The error value returned from struct member creation sid: The struct id name: The member name offset: Memeber offset size: Member size Returns: A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an informative message. """ exception, msg = STRUCT_ERROR_MAP[err] struct_name = idc.GetStrucName(sid) return exception(('AddStructMember(struct="{}", member="{}", offset={}, size={}) ' 'failed: {}').format( struct_name, name, offset, size, msg ))
[ "def", "struct_member_error", "(", "err", ",", "sid", ",", "name", ",", "offset", ",", "size", ")", ":", "exception", ",", "msg", "=", "STRUCT_ERROR_MAP", "[", "err", "]", "struct_name", "=", "idc", ".", "GetStrucName", "(", "sid", ")", "return", "exception", "(", "(", "'AddStructMember(struct=\"{}\", member=\"{}\", offset={}, size={}) '", "'failed: {}'", ")", ".", "format", "(", "struct_name", ",", "name", ",", "offset", ",", "size", ",", "msg", ")", ")" ]
Create and format a struct member exception. Args: err: The error value returned from struct member creation sid: The struct id name: The member name offset: Memeber offset size: Member size Returns: A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an informative message.
[ "Create", "and", "format", "a", "struct", "member", "exception", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/structure.py#L33-L56
13,683
tmr232/Sark
sark/structure.py
create_struct
def create_struct(name): """Create a structure. Args: name: The structure's name Returns: The sturct ID Raises: exceptions.SarkStructAlreadyExists: A struct with the same name already exists exceptions.SarkCreationFailed: Struct creation failed """ sid = idc.GetStrucIdByName(name) if sid != idaapi.BADADDR: # The struct already exists. raise exceptions.SarkStructAlreadyExists("A struct names {!r} already exists.".format(name)) sid = idc.AddStrucEx(-1, name, 0) if sid == idaapi.BADADDR: raise exceptions.SarkStructCreationFailed("Struct creation failed.") return sid
python
def create_struct(name): """Create a structure. Args: name: The structure's name Returns: The sturct ID Raises: exceptions.SarkStructAlreadyExists: A struct with the same name already exists exceptions.SarkCreationFailed: Struct creation failed """ sid = idc.GetStrucIdByName(name) if sid != idaapi.BADADDR: # The struct already exists. raise exceptions.SarkStructAlreadyExists("A struct names {!r} already exists.".format(name)) sid = idc.AddStrucEx(-1, name, 0) if sid == idaapi.BADADDR: raise exceptions.SarkStructCreationFailed("Struct creation failed.") return sid
[ "def", "create_struct", "(", "name", ")", ":", "sid", "=", "idc", ".", "GetStrucIdByName", "(", "name", ")", "if", "sid", "!=", "idaapi", ".", "BADADDR", ":", "# The struct already exists.", "raise", "exceptions", ".", "SarkStructAlreadyExists", "(", "\"A struct names {!r} already exists.\"", ".", "format", "(", "name", ")", ")", "sid", "=", "idc", ".", "AddStrucEx", "(", "-", "1", ",", "name", ",", "0", ")", "if", "sid", "==", "idaapi", ".", "BADADDR", ":", "raise", "exceptions", ".", "SarkStructCreationFailed", "(", "\"Struct creation failed.\"", ")", "return", "sid" ]
Create a structure. Args: name: The structure's name Returns: The sturct ID Raises: exceptions.SarkStructAlreadyExists: A struct with the same name already exists exceptions.SarkCreationFailed: Struct creation failed
[ "Create", "a", "structure", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/structure.py#L59-L81
13,684
tmr232/Sark
sark/structure.py
get_struct
def get_struct(name): """Get a struct by it's name. Args: name: The name of the struct Returns: The struct's id Raises: exceptions.SarkStructNotFound: is the struct does not exist. """ sid = idc.GetStrucIdByName(name) if sid == idaapi.BADADDR: raise exceptions.SarkStructNotFound() return sid
python
def get_struct(name): """Get a struct by it's name. Args: name: The name of the struct Returns: The struct's id Raises: exceptions.SarkStructNotFound: is the struct does not exist. """ sid = idc.GetStrucIdByName(name) if sid == idaapi.BADADDR: raise exceptions.SarkStructNotFound() return sid
[ "def", "get_struct", "(", "name", ")", ":", "sid", "=", "idc", ".", "GetStrucIdByName", "(", "name", ")", "if", "sid", "==", "idaapi", ".", "BADADDR", ":", "raise", "exceptions", ".", "SarkStructNotFound", "(", ")", "return", "sid" ]
Get a struct by it's name. Args: name: The name of the struct Returns: The struct's id Raises: exceptions.SarkStructNotFound: is the struct does not exist.
[ "Get", "a", "struct", "by", "it", "s", "name", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/structure.py#L84-L100
13,685
tmr232/Sark
sark/structure.py
get_common_register
def get_common_register(start, end): """Get the register most commonly used in accessing structs. Access to is considered for every opcode that accesses memory in an offset from a register:: mov eax, [ebx + 5] For every access, the struct-referencing registers, in this case `ebx`, are counted. The most used one is returned. Args: start: The adderss to start at end: The address to finish at """ registers = defaultdict(int) for line in lines(start, end): insn = line.insn for operand in insn.operands: if not operand.type.has_phrase: continue if not operand.base: continue register_name = operand.base registers[register_name] += 1 return max(registers.iteritems(), key=operator.itemgetter(1))[0]
python
def get_common_register(start, end): """Get the register most commonly used in accessing structs. Access to is considered for every opcode that accesses memory in an offset from a register:: mov eax, [ebx + 5] For every access, the struct-referencing registers, in this case `ebx`, are counted. The most used one is returned. Args: start: The adderss to start at end: The address to finish at """ registers = defaultdict(int) for line in lines(start, end): insn = line.insn for operand in insn.operands: if not operand.type.has_phrase: continue if not operand.base: continue register_name = operand.base registers[register_name] += 1 return max(registers.iteritems(), key=operator.itemgetter(1))[0]
[ "def", "get_common_register", "(", "start", ",", "end", ")", ":", "registers", "=", "defaultdict", "(", "int", ")", "for", "line", "in", "lines", "(", "start", ",", "end", ")", ":", "insn", "=", "line", ".", "insn", "for", "operand", "in", "insn", ".", "operands", ":", "if", "not", "operand", ".", "type", ".", "has_phrase", ":", "continue", "if", "not", "operand", ".", "base", ":", "continue", "register_name", "=", "operand", ".", "base", "registers", "[", "register_name", "]", "+=", "1", "return", "max", "(", "registers", ".", "iteritems", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "[", "0", "]" ]
Get the register most commonly used in accessing structs. Access to is considered for every opcode that accesses memory in an offset from a register:: mov eax, [ebx + 5] For every access, the struct-referencing registers, in this case `ebx`, are counted. The most used one is returned. Args: start: The adderss to start at end: The address to finish at
[ "Get", "the", "register", "most", "commonly", "used", "in", "accessing", "structs", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/structure.py#L143-L173
13,686
tmr232/Sark
sark/enum.py
_enum_member_error
def _enum_member_error(err, eid, name, value, bitmask): """Format enum member error.""" exception, msg = ENUM_ERROR_MAP[err] enum_name = idaapi.get_enum_name(eid) return exception(('add_enum_member(enum="{}", member="{}", value={}, bitmask=0x{:08X}) ' 'failed: {}').format( enum_name, name, value, bitmask, msg ))
python
def _enum_member_error(err, eid, name, value, bitmask): """Format enum member error.""" exception, msg = ENUM_ERROR_MAP[err] enum_name = idaapi.get_enum_name(eid) return exception(('add_enum_member(enum="{}", member="{}", value={}, bitmask=0x{:08X}) ' 'failed: {}').format( enum_name, name, value, bitmask, msg ))
[ "def", "_enum_member_error", "(", "err", ",", "eid", ",", "name", ",", "value", ",", "bitmask", ")", ":", "exception", ",", "msg", "=", "ENUM_ERROR_MAP", "[", "err", "]", "enum_name", "=", "idaapi", ".", "get_enum_name", "(", "eid", ")", "return", "exception", "(", "(", "'add_enum_member(enum=\"{}\", member=\"{}\", value={}, bitmask=0x{:08X}) '", "'failed: {}'", ")", ".", "format", "(", "enum_name", ",", "name", ",", "value", ",", "bitmask", ",", "msg", ")", ")" ]
Format enum member error.
[ "Format", "enum", "member", "error", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L21-L32
13,687
tmr232/Sark
sark/enum.py
_get_enum
def _get_enum(name): """Get an existing enum ID""" eid = idaapi.get_enum(name) if eid == idaapi.BADADDR: raise exceptions.EnumNotFound('Enum "{}" does not exist.'.format(name)) return eid
python
def _get_enum(name): """Get an existing enum ID""" eid = idaapi.get_enum(name) if eid == idaapi.BADADDR: raise exceptions.EnumNotFound('Enum "{}" does not exist.'.format(name)) return eid
[ "def", "_get_enum", "(", "name", ")", ":", "eid", "=", "idaapi", ".", "get_enum", "(", "name", ")", "if", "eid", "==", "idaapi", ".", "BADADDR", ":", "raise", "exceptions", ".", "EnumNotFound", "(", "'Enum \"{}\" does not exist.'", ".", "format", "(", "name", ")", ")", "return", "eid" ]
Get an existing enum ID
[ "Get", "an", "existing", "enum", "ID" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L35-L40
13,688
tmr232/Sark
sark/enum.py
add_enum
def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False): """Create a new enum. Args: name: Name of the enum to create. index: The index of the enum. Leave at default to append the enum as the last enum. flags: Enum type flags. bitfield: Is the enum a bitfield. Returns: An `Enum` object. """ if name is not None: with ignored(exceptions.EnumNotFound): _get_enum(name) raise exceptions.EnumAlreadyExists() if index is None or index < 0: index = idaapi.get_enum_qty() eid = idaapi.add_enum(index, name, flags) if eid == idaapi.BADADDR: raise exceptions.EnumCreationFailed('Failed creating enum "{}"'.format(name)) if bitfield: idaapi.set_enum_bf(eid, bitfield) return Enum(eid=eid)
python
def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False): """Create a new enum. Args: name: Name of the enum to create. index: The index of the enum. Leave at default to append the enum as the last enum. flags: Enum type flags. bitfield: Is the enum a bitfield. Returns: An `Enum` object. """ if name is not None: with ignored(exceptions.EnumNotFound): _get_enum(name) raise exceptions.EnumAlreadyExists() if index is None or index < 0: index = idaapi.get_enum_qty() eid = idaapi.add_enum(index, name, flags) if eid == idaapi.BADADDR: raise exceptions.EnumCreationFailed('Failed creating enum "{}"'.format(name)) if bitfield: idaapi.set_enum_bf(eid, bitfield) return Enum(eid=eid)
[ "def", "add_enum", "(", "name", "=", "None", ",", "index", "=", "None", ",", "flags", "=", "idaapi", ".", "hexflag", "(", ")", ",", "bitfield", "=", "False", ")", ":", "if", "name", "is", "not", "None", ":", "with", "ignored", "(", "exceptions", ".", "EnumNotFound", ")", ":", "_get_enum", "(", "name", ")", "raise", "exceptions", ".", "EnumAlreadyExists", "(", ")", "if", "index", "is", "None", "or", "index", "<", "0", ":", "index", "=", "idaapi", ".", "get_enum_qty", "(", ")", "eid", "=", "idaapi", ".", "add_enum", "(", "index", ",", "name", ",", "flags", ")", "if", "eid", "==", "idaapi", ".", "BADADDR", ":", "raise", "exceptions", ".", "EnumCreationFailed", "(", "'Failed creating enum \"{}\"'", ".", "format", "(", "name", ")", ")", "if", "bitfield", ":", "idaapi", ".", "set_enum_bf", "(", "eid", ",", "bitfield", ")", "return", "Enum", "(", "eid", "=", "eid", ")" ]
Create a new enum. Args: name: Name of the enum to create. index: The index of the enum. Leave at default to append the enum as the last enum. flags: Enum type flags. bitfield: Is the enum a bitfield. Returns: An `Enum` object.
[ "Create", "a", "new", "enum", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L43-L71
13,689
tmr232/Sark
sark/enum.py
_add_enum_member
def _add_enum_member(enum, name, value, bitmask=DEFMASK): """Add an enum member.""" error = idaapi.add_enum_member(enum, name, value, bitmask) if error: raise _enum_member_error(error, enum, name, value, bitmask)
python
def _add_enum_member(enum, name, value, bitmask=DEFMASK): """Add an enum member.""" error = idaapi.add_enum_member(enum, name, value, bitmask) if error: raise _enum_member_error(error, enum, name, value, bitmask)
[ "def", "_add_enum_member", "(", "enum", ",", "name", ",", "value", ",", "bitmask", "=", "DEFMASK", ")", ":", "error", "=", "idaapi", ".", "add_enum_member", "(", "enum", ",", "name", ",", "value", ",", "bitmask", ")", "if", "error", ":", "raise", "_enum_member_error", "(", "error", ",", "enum", ",", "name", ",", "value", ",", "bitmask", ")" ]
Add an enum member.
[ "Add", "an", "enum", "member", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L79-L84
13,690
tmr232/Sark
sark/enum.py
_iter_bitmasks
def _iter_bitmasks(eid): """Iterate all bitmasks in a given enum. Note that while `DEFMASK` indicates no-more-bitmasks, it is also a valid bitmask value. The only way to tell if it exists is when iterating the serials. """ bitmask = idaapi.get_first_bmask(eid) yield bitmask while bitmask != DEFMASK: bitmask = idaapi.get_next_bmask(eid, bitmask) yield bitmask
python
def _iter_bitmasks(eid): """Iterate all bitmasks in a given enum. Note that while `DEFMASK` indicates no-more-bitmasks, it is also a valid bitmask value. The only way to tell if it exists is when iterating the serials. """ bitmask = idaapi.get_first_bmask(eid) yield bitmask while bitmask != DEFMASK: bitmask = idaapi.get_next_bmask(eid, bitmask) yield bitmask
[ "def", "_iter_bitmasks", "(", "eid", ")", ":", "bitmask", "=", "idaapi", ".", "get_first_bmask", "(", "eid", ")", "yield", "bitmask", "while", "bitmask", "!=", "DEFMASK", ":", "bitmask", "=", "idaapi", ".", "get_next_bmask", "(", "eid", ",", "bitmask", ")", "yield", "bitmask" ]
Iterate all bitmasks in a given enum. Note that while `DEFMASK` indicates no-more-bitmasks, it is also a valid bitmask value. The only way to tell if it exists is when iterating the serials.
[ "Iterate", "all", "bitmasks", "in", "a", "given", "enum", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L352-L365
13,691
tmr232/Sark
sark/enum.py
_iter_enum_member_values
def _iter_enum_member_values(eid, bitmask): """Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart. """ value = idaapi.get_first_enum_member(eid, bitmask) yield value while value != DEFMASK: value = idaapi.get_next_enum_member(eid, value, bitmask) yield value
python
def _iter_enum_member_values(eid, bitmask): """Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart. """ value = idaapi.get_first_enum_member(eid, bitmask) yield value while value != DEFMASK: value = idaapi.get_next_enum_member(eid, value, bitmask) yield value
[ "def", "_iter_enum_member_values", "(", "eid", ",", "bitmask", ")", ":", "value", "=", "idaapi", ".", "get_first_enum_member", "(", "eid", ",", "bitmask", ")", "yield", "value", "while", "value", "!=", "DEFMASK", ":", "value", "=", "idaapi", ".", "get_next_enum_member", "(", "eid", ",", "value", ",", "bitmask", ")", "yield", "value" ]
Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart.
[ "Iterate", "member", "values", "with", "given", "bitmask", "inside", "the", "enum" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L368-L379
13,692
tmr232/Sark
sark/enum.py
_iter_serial_enum_member
def _iter_serial_enum_member(eid, value, bitmask): """Iterate serial and CID of enum members with given value and bitmask. Here only valid values are returned, as `idaapi.BADNODE` always indicates an invalid member. """ cid, serial = idaapi.get_first_serial_enum_member(eid, value, bitmask) while cid != idaapi.BADNODE: yield cid, serial cid, serial = idaapi.get_next_serial_enum_member(cid, serial)
python
def _iter_serial_enum_member(eid, value, bitmask): """Iterate serial and CID of enum members with given value and bitmask. Here only valid values are returned, as `idaapi.BADNODE` always indicates an invalid member. """ cid, serial = idaapi.get_first_serial_enum_member(eid, value, bitmask) while cid != idaapi.BADNODE: yield cid, serial cid, serial = idaapi.get_next_serial_enum_member(cid, serial)
[ "def", "_iter_serial_enum_member", "(", "eid", ",", "value", ",", "bitmask", ")", ":", "cid", ",", "serial", "=", "idaapi", ".", "get_first_serial_enum_member", "(", "eid", ",", "value", ",", "bitmask", ")", "while", "cid", "!=", "idaapi", ".", "BADNODE", ":", "yield", "cid", ",", "serial", "cid", ",", "serial", "=", "idaapi", ".", "get_next_serial_enum_member", "(", "cid", ",", "serial", ")" ]
Iterate serial and CID of enum members with given value and bitmask. Here only valid values are returned, as `idaapi.BADNODE` always indicates an invalid member.
[ "Iterate", "serial", "and", "CID", "of", "enum", "members", "with", "given", "value", "and", "bitmask", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L382-L391
13,693
tmr232/Sark
sark/enum.py
_iter_enum_constant_ids
def _iter_enum_constant_ids(eid): """Iterate the constant IDs of all members in the given enum""" for bitmask in _iter_bitmasks(eid): for value in _iter_enum_member_values(eid, bitmask): for cid, serial in _iter_serial_enum_member(eid, value, bitmask): yield cid
python
def _iter_enum_constant_ids(eid): """Iterate the constant IDs of all members in the given enum""" for bitmask in _iter_bitmasks(eid): for value in _iter_enum_member_values(eid, bitmask): for cid, serial in _iter_serial_enum_member(eid, value, bitmask): yield cid
[ "def", "_iter_enum_constant_ids", "(", "eid", ")", ":", "for", "bitmask", "in", "_iter_bitmasks", "(", "eid", ")", ":", "for", "value", "in", "_iter_enum_member_values", "(", "eid", ",", "bitmask", ")", ":", "for", "cid", ",", "serial", "in", "_iter_serial_enum_member", "(", "eid", ",", "value", ",", "bitmask", ")", ":", "yield", "cid" ]
Iterate the constant IDs of all members in the given enum
[ "Iterate", "the", "constant", "IDs", "of", "all", "members", "in", "the", "given", "enum" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L394-L399
13,694
tmr232/Sark
sark/enum.py
EnumMembers.add
def add(self, name, value, bitmask=DEFMASK): """Add an enum member Args: name: Name of the member value: value of the member bitmask: bitmask. Only use if enum is a bitfield. """ _add_enum_member(self._eid, name, value, bitmask)
python
def add(self, name, value, bitmask=DEFMASK): """Add an enum member Args: name: Name of the member value: value of the member bitmask: bitmask. Only use if enum is a bitfield. """ _add_enum_member(self._eid, name, value, bitmask)
[ "def", "add", "(", "self", ",", "name", ",", "value", ",", "bitmask", "=", "DEFMASK", ")", ":", "_add_enum_member", "(", "self", ".", "_eid", ",", "name", ",", "value", ",", "bitmask", ")" ]
Add an enum member Args: name: Name of the member value: value of the member bitmask: bitmask. Only use if enum is a bitfield.
[ "Add", "an", "enum", "member" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L141-L149
13,695
tmr232/Sark
sark/enum.py
EnumMembers.remove
def remove(self, name): """Remove an enum member by name""" member = self[name] serial = member.serial value = member.value bmask = member.bmask success = idaapi.del_enum_member(self._eid, value, serial, bmask) if not success: raise exceptions.CantDeleteEnumMember("Can't delete enum member {!r}.".format(name))
python
def remove(self, name): """Remove an enum member by name""" member = self[name] serial = member.serial value = member.value bmask = member.bmask success = idaapi.del_enum_member(self._eid, value, serial, bmask) if not success: raise exceptions.CantDeleteEnumMember("Can't delete enum member {!r}.".format(name))
[ "def", "remove", "(", "self", ",", "name", ")", ":", "member", "=", "self", "[", "name", "]", "serial", "=", "member", ".", "serial", "value", "=", "member", ".", "value", "bmask", "=", "member", ".", "bmask", "success", "=", "idaapi", ".", "del_enum_member", "(", "self", ".", "_eid", ",", "value", ",", "serial", ",", "bmask", ")", "if", "not", "success", ":", "raise", "exceptions", ".", "CantDeleteEnumMember", "(", "\"Can't delete enum member {!r}.\"", ".", "format", "(", "name", ")", ")" ]
Remove an enum member by name
[ "Remove", "an", "enum", "member", "by", "name" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L158-L167
13,696
tmr232/Sark
sark/enum.py
Enum.name
def name(self, name): """Set the enum name.""" success = idaapi.set_enum_name(self.eid, name) if not success: raise exceptions.CantRenameEnum("Cant rename enum {!r} to {!r}.".format(self.name, name))
python
def name(self, name): """Set the enum name.""" success = idaapi.set_enum_name(self.eid, name) if not success: raise exceptions.CantRenameEnum("Cant rename enum {!r} to {!r}.".format(self.name, name))
[ "def", "name", "(", "self", ",", "name", ")", ":", "success", "=", "idaapi", ".", "set_enum_name", "(", "self", ".", "eid", ",", "name", ")", "if", "not", "success", ":", "raise", "exceptions", ".", "CantRenameEnum", "(", "\"Cant rename enum {!r} to {!r}.\"", ".", "format", "(", "self", ".", "name", ",", "name", ")", ")" ]
Set the enum name.
[ "Set", "the", "enum", "name", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L201-L205
13,697
tmr232/Sark
sark/enum.py
EnumMember.name
def name(self, name): """Set the member name. Note that a member name cannot appear in other enums, or generally anywhere else in the IDB. """ success = idaapi.set_enum_member_name(self.cid, name) if not success: raise exceptions.CantRenameEnumMember( "Failed renaming {!r} to {!r}. Does the name exist somewhere else?".format(self.name, name))
python
def name(self, name): """Set the member name. Note that a member name cannot appear in other enums, or generally anywhere else in the IDB. """ success = idaapi.set_enum_member_name(self.cid, name) if not success: raise exceptions.CantRenameEnumMember( "Failed renaming {!r} to {!r}. Does the name exist somewhere else?".format(self.name, name))
[ "def", "name", "(", "self", ",", "name", ")", ":", "success", "=", "idaapi", ".", "set_enum_member_name", "(", "self", ".", "cid", ",", "name", ")", "if", "not", "success", ":", "raise", "exceptions", ".", "CantRenameEnumMember", "(", "\"Failed renaming {!r} to {!r}. Does the name exist somewhere else?\"", ".", "format", "(", "self", ".", "name", ",", "name", ")", ")" ]
Set the member name. Note that a member name cannot appear in other enums, or generally anywhere else in the IDB.
[ "Set", "the", "member", "name", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/enum.py#L310-L319
13,698
tmr232/Sark
sark/code/function.py
functions
def functions(start=None, end=None): """Get all functions in range. Args: start: Start address of the range. Defaults to IDB start. end: End address of the range. Defaults to IDB end. Returns: This is a generator that iterates over all the functions in the IDB. """ start, end = fix_addresses(start, end) for func_t in idautils.Functions(start, end): yield Function(func_t)
python
def functions(start=None, end=None): """Get all functions in range. Args: start: Start address of the range. Defaults to IDB start. end: End address of the range. Defaults to IDB end. Returns: This is a generator that iterates over all the functions in the IDB. """ start, end = fix_addresses(start, end) for func_t in idautils.Functions(start, end): yield Function(func_t)
[ "def", "functions", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "start", ",", "end", "=", "fix_addresses", "(", "start", ",", "end", ")", "for", "func_t", "in", "idautils", ".", "Functions", "(", "start", ",", "end", ")", ":", "yield", "Function", "(", "func_t", ")" ]
Get all functions in range. Args: start: Start address of the range. Defaults to IDB start. end: End address of the range. Defaults to IDB end. Returns: This is a generator that iterates over all the functions in the IDB.
[ "Get", "all", "functions", "in", "range", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/function.py#L400-L413
13,699
tmr232/Sark
sark/code/function.py
Function.xrefs_from
def xrefs_from(self): """Xrefs from the function. This includes the xrefs from every line in the function, as `Xref` objects. Xrefs are filtered to exclude code references that are internal to the function. This means that every xrefs to the function's code will NOT be returned (yet, references to the function's data will be returnd). To get those extra xrefs, you need to iterate the function's lines yourself. """ for line in self.lines: for xref in line.xrefs_from: if xref.type.is_flow: continue if xref.to in self and xref.iscode: continue yield xref
python
def xrefs_from(self): """Xrefs from the function. This includes the xrefs from every line in the function, as `Xref` objects. Xrefs are filtered to exclude code references that are internal to the function. This means that every xrefs to the function's code will NOT be returned (yet, references to the function's data will be returnd). To get those extra xrefs, you need to iterate the function's lines yourself. """ for line in self.lines: for xref in line.xrefs_from: if xref.type.is_flow: continue if xref.to in self and xref.iscode: continue yield xref
[ "def", "xrefs_from", "(", "self", ")", ":", "for", "line", "in", "self", ".", "lines", ":", "for", "xref", "in", "line", ".", "xrefs_from", ":", "if", "xref", ".", "type", ".", "is_flow", ":", "continue", "if", "xref", ".", "to", "in", "self", "and", "xref", ".", "iscode", ":", "continue", "yield", "xref" ]
Xrefs from the function. This includes the xrefs from every line in the function, as `Xref` objects. Xrefs are filtered to exclude code references that are internal to the function. This means that every xrefs to the function's code will NOT be returned (yet, references to the function's data will be returnd). To get those extra xrefs, you need to iterate the function's lines yourself.
[ "Xrefs", "from", "the", "function", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/function.py#L234-L251