repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
|---|---|---|---|---|---|---|---|---|---|---|
cdgriffith/Reusables
|
reusables/log.py
|
get_file_handler
|
def get_file_handler(file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read,
handler=logging.FileHandler,
**handler_kwargs):
"""
Set up a file handler to add to a logger.
:param file_path: file to write the log to, defaults to out.log
:param level: logging level to set handler at
:param log_format: formatter to use
:param handler: logging handler to use, defaults to FileHandler
:param handler_kwargs: options to pass to the handler
:return: handler
"""
fh = handler(file_path, **handler_kwargs)
fh.setLevel(level)
fh.setFormatter(logging.Formatter(log_format))
return fh
|
python
|
def get_file_handler(file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read,
handler=logging.FileHandler,
**handler_kwargs):
"""
Set up a file handler to add to a logger.
:param file_path: file to write the log to, defaults to out.log
:param level: logging level to set handler at
:param log_format: formatter to use
:param handler: logging handler to use, defaults to FileHandler
:param handler_kwargs: options to pass to the handler
:return: handler
"""
fh = handler(file_path, **handler_kwargs)
fh.setLevel(level)
fh.setFormatter(logging.Formatter(log_format))
return fh
|
[
"def",
"get_file_handler",
"(",
"file_path",
"=",
"\"out.log\"",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
",",
"handler",
"=",
"logging",
".",
"FileHandler",
",",
"*",
"*",
"handler_kwargs",
")",
":",
"fh",
"=",
"handler",
"(",
"file_path",
",",
"*",
"*",
"handler_kwargs",
")",
"fh",
".",
"setLevel",
"(",
"level",
")",
"fh",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"log_format",
")",
")",
"return",
"fh"
] |
Set up a file handler to add to a logger.
:param file_path: file to write the log to, defaults to out.log
:param level: logging level to set handler at
:param log_format: formatter to use
:param handler: logging handler to use, defaults to FileHandler
:param handler_kwargs: options to pass to the handler
:return: handler
|
[
"Set",
"up",
"a",
"file",
"handler",
"to",
"add",
"to",
"a",
"logger",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L62-L79
|
cdgriffith/Reusables
|
reusables/log.py
|
setup_logger
|
def setup_logger(module_name=None, level=logging.INFO, stream=sys.stderr,
file_path=None, log_format=log_formats.easy_read,
suppress_warning=True):
"""
Grabs the specified logger and adds wanted handlers to it. Will
default to adding a stream handler.
:param module_name: logger name to use
:param level: logging level to set logger at
:param stream: stream to log to, or None
:param file_path: file path to log to, or None
:param log_format: format to set the handlers to use
:param suppress_warning: add a NullHandler if no other handler is specified
:return: configured logger
"""
new_logger = logging.getLogger(module_name)
if stream:
new_logger.addHandler(get_stream_handler(stream, level, log_format))
elif not file_path and suppress_warning and not new_logger.handlers:
new_logger.addHandler(logging.NullHandler())
if file_path:
new_logger.addHandler(get_file_handler(file_path, level, log_format))
if level > 0:
new_logger.setLevel(level)
return new_logger
|
python
|
def setup_logger(module_name=None, level=logging.INFO, stream=sys.stderr,
file_path=None, log_format=log_formats.easy_read,
suppress_warning=True):
"""
Grabs the specified logger and adds wanted handlers to it. Will
default to adding a stream handler.
:param module_name: logger name to use
:param level: logging level to set logger at
:param stream: stream to log to, or None
:param file_path: file path to log to, or None
:param log_format: format to set the handlers to use
:param suppress_warning: add a NullHandler if no other handler is specified
:return: configured logger
"""
new_logger = logging.getLogger(module_name)
if stream:
new_logger.addHandler(get_stream_handler(stream, level, log_format))
elif not file_path and suppress_warning and not new_logger.handlers:
new_logger.addHandler(logging.NullHandler())
if file_path:
new_logger.addHandler(get_file_handler(file_path, level, log_format))
if level > 0:
new_logger.setLevel(level)
return new_logger
|
[
"def",
"setup_logger",
"(",
"module_name",
"=",
"None",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"stream",
"=",
"sys",
".",
"stderr",
",",
"file_path",
"=",
"None",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
",",
"suppress_warning",
"=",
"True",
")",
":",
"new_logger",
"=",
"logging",
".",
"getLogger",
"(",
"module_name",
")",
"if",
"stream",
":",
"new_logger",
".",
"addHandler",
"(",
"get_stream_handler",
"(",
"stream",
",",
"level",
",",
"log_format",
")",
")",
"elif",
"not",
"file_path",
"and",
"suppress_warning",
"and",
"not",
"new_logger",
".",
"handlers",
":",
"new_logger",
".",
"addHandler",
"(",
"logging",
".",
"NullHandler",
"(",
")",
")",
"if",
"file_path",
":",
"new_logger",
".",
"addHandler",
"(",
"get_file_handler",
"(",
"file_path",
",",
"level",
",",
"log_format",
")",
")",
"if",
"level",
">",
"0",
":",
"new_logger",
".",
"setLevel",
"(",
"level",
")",
"return",
"new_logger"
] |
Grabs the specified logger and adds wanted handlers to it. Will
default to adding a stream handler.
:param module_name: logger name to use
:param level: logging level to set logger at
:param stream: stream to log to, or None
:param file_path: file path to log to, or None
:param log_format: format to set the handlers to use
:param suppress_warning: add a NullHandler if no other handler is specified
:return: configured logger
|
[
"Grabs",
"the",
"specified",
"logger",
"and",
"adds",
"wanted",
"handlers",
"to",
"it",
".",
"Will",
"default",
"to",
"adding",
"a",
"stream",
"handler",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L82-L108
|
cdgriffith/Reusables
|
reusables/log.py
|
add_stream_handler
|
def add_stream_handler(logger=None, stream=sys.stderr, level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created stream handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_stream_handler(stream, level, log_format))
|
python
|
def add_stream_handler(logger=None, stream=sys.stderr, level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created stream handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_stream_handler(stream, level, log_format))
|
[
"def",
"add_stream_handler",
"(",
"logger",
"=",
"None",
",",
"stream",
"=",
"sys",
".",
"stderr",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"logger",
".",
"addHandler",
"(",
"get_stream_handler",
"(",
"stream",
",",
"level",
",",
"log_format",
")",
")"
] |
Addes a newly created stream handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
|
[
"Addes",
"a",
"newly",
"created",
"stream",
"handler",
"to",
"the",
"specified",
"logger"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L118-L131
|
cdgriffith/Reusables
|
reusables/log.py
|
add_file_handler
|
def add_file_handler(logger=None, file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created file handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: formatter to use
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format))
|
python
|
def add_file_handler(logger=None, file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created file handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: formatter to use
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format))
|
[
"def",
"add_file_handler",
"(",
"logger",
"=",
"None",
",",
"file_path",
"=",
"\"out.log\"",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"logger",
".",
"addHandler",
"(",
"get_file_handler",
"(",
"file_path",
",",
"level",
",",
"log_format",
")",
")"
] |
Addes a newly created file handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: formatter to use
|
[
"Addes",
"a",
"newly",
"created",
"file",
"handler",
"to",
"the",
"specified",
"logger"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L134-L147
|
cdgriffith/Reusables
|
reusables/log.py
|
add_rotating_file_handler
|
def add_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
max_bytes=10*sizes.mb, backup_count=5,
**handler_kwargs):
""" Adds a rotating file handler to the specified logger.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param max_bytes: Max file size in bytes before rotating
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=RotatingFileHandler,
maxBytes=max_bytes,
backupCount=backup_count,
**handler_kwargs))
|
python
|
def add_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
max_bytes=10*sizes.mb, backup_count=5,
**handler_kwargs):
""" Adds a rotating file handler to the specified logger.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param max_bytes: Max file size in bytes before rotating
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=RotatingFileHandler,
maxBytes=max_bytes,
backupCount=backup_count,
**handler_kwargs))
|
[
"def",
"add_rotating_file_handler",
"(",
"logger",
"=",
"None",
",",
"file_path",
"=",
"\"out.log\"",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
",",
"max_bytes",
"=",
"10",
"*",
"sizes",
".",
"mb",
",",
"backup_count",
"=",
"5",
",",
"*",
"*",
"handler_kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"logger",
".",
"addHandler",
"(",
"get_file_handler",
"(",
"file_path",
",",
"level",
",",
"log_format",
",",
"handler",
"=",
"RotatingFileHandler",
",",
"maxBytes",
"=",
"max_bytes",
",",
"backupCount",
"=",
"backup_count",
",",
"*",
"*",
"handler_kwargs",
")",
")"
] |
Adds a rotating file handler to the specified logger.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param max_bytes: Max file size in bytes before rotating
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
|
[
"Adds",
"a",
"rotating",
"file",
"handler",
"to",
"the",
"specified",
"logger",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L150-L172
|
cdgriffith/Reusables
|
reusables/log.py
|
add_timed_rotating_file_handler
|
def add_timed_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
when='w0', interval=1, backup_count=5,
**handler_kwargs):
""" Adds a timed rotating file handler to the specified logger.
Defaults to weekly rotation, with 5 backups.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param when:
:param interval:
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=TimedRotatingFileHandler,
when=when,
interval=interval,
backupCount=backup_count,
**handler_kwargs))
|
python
|
def add_timed_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
when='w0', interval=1, backup_count=5,
**handler_kwargs):
""" Adds a timed rotating file handler to the specified logger.
Defaults to weekly rotation, with 5 backups.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param when:
:param interval:
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=TimedRotatingFileHandler,
when=when,
interval=interval,
backupCount=backup_count,
**handler_kwargs))
|
[
"def",
"add_timed_rotating_file_handler",
"(",
"logger",
"=",
"None",
",",
"file_path",
"=",
"\"out.log\"",
",",
"level",
"=",
"logging",
".",
"INFO",
",",
"log_format",
"=",
"log_formats",
".",
"easy_read",
",",
"when",
"=",
"'w0'",
",",
"interval",
"=",
"1",
",",
"backup_count",
"=",
"5",
",",
"*",
"*",
"handler_kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"logger",
".",
"addHandler",
"(",
"get_file_handler",
"(",
"file_path",
",",
"level",
",",
"log_format",
",",
"handler",
"=",
"TimedRotatingFileHandler",
",",
"when",
"=",
"when",
",",
"interval",
"=",
"interval",
",",
"backupCount",
"=",
"backup_count",
",",
"*",
"*",
"handler_kwargs",
")",
")"
] |
Adds a timed rotating file handler to the specified logger.
Defaults to weekly rotation, with 5 backups.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param when:
:param interval:
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
|
[
"Adds",
"a",
"timed",
"rotating",
"file",
"handler",
"to",
"the",
"specified",
"logger",
".",
"Defaults",
"to",
"weekly",
"rotation",
"with",
"5",
"backups",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L175-L200
|
cdgriffith/Reusables
|
reusables/log.py
|
remove_stream_handlers
|
def remove_stream_handlers(logger=None):
"""
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
# FileHandler is a subclass of StreamHandler so
# 'if not a StreamHandler' does not work
if (isinstance(handler, logging.FileHandler) or
isinstance(handler, logging.NullHandler) or
(isinstance(handler, logging.Handler) and not
isinstance(handler, logging.StreamHandler))):
new_handlers.append(handler)
logger.handlers = new_handlers
|
python
|
def remove_stream_handlers(logger=None):
"""
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
# FileHandler is a subclass of StreamHandler so
# 'if not a StreamHandler' does not work
if (isinstance(handler, logging.FileHandler) or
isinstance(handler, logging.NullHandler) or
(isinstance(handler, logging.Handler) and not
isinstance(handler, logging.StreamHandler))):
new_handlers.append(handler)
logger.handlers = new_handlers
|
[
"def",
"remove_stream_handlers",
"(",
"logger",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"new_handlers",
"=",
"[",
"]",
"for",
"handler",
"in",
"logger",
".",
"handlers",
":",
"# FileHandler is a subclass of StreamHandler so",
"# 'if not a StreamHandler' does not work",
"if",
"(",
"isinstance",
"(",
"handler",
",",
"logging",
".",
"FileHandler",
")",
"or",
"isinstance",
"(",
"handler",
",",
"logging",
".",
"NullHandler",
")",
"or",
"(",
"isinstance",
"(",
"handler",
",",
"logging",
".",
"Handler",
")",
"and",
"not",
"isinstance",
"(",
"handler",
",",
"logging",
".",
"StreamHandler",
")",
")",
")",
":",
"new_handlers",
".",
"append",
"(",
"handler",
")",
"logger",
".",
"handlers",
"=",
"new_handlers"
] |
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
|
[
"Remove",
"only",
"stream",
"handlers",
"from",
"the",
"specified",
"logger"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L203-L221
|
cdgriffith/Reusables
|
reusables/log.py
|
remove_file_handlers
|
def remove_file_handlers(logger=None):
"""
Remove only file handlers from the specified logger. Will go through
and close each handler for safety.
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
else:
new_handlers.append(handler)
logger.handlers = new_handlers
|
python
|
def remove_file_handlers(logger=None):
"""
Remove only file handlers from the specified logger. Will go through
and close each handler for safety.
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
else:
new_handlers.append(handler)
logger.handlers = new_handlers
|
[
"def",
"remove_file_handlers",
"(",
"logger",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"new_handlers",
"=",
"[",
"]",
"for",
"handler",
"in",
"logger",
".",
"handlers",
":",
"if",
"isinstance",
"(",
"handler",
",",
"logging",
".",
"FileHandler",
")",
":",
"handler",
".",
"close",
"(",
")",
"else",
":",
"new_handlers",
".",
"append",
"(",
"handler",
")",
"logger",
".",
"handlers",
"=",
"new_handlers"
] |
Remove only file handlers from the specified logger. Will go through
and close each handler for safety.
:param logger: logging name or object to modify, defaults to root logger
|
[
"Remove",
"only",
"file",
"handlers",
"from",
"the",
"specified",
"logger",
".",
"Will",
"go",
"through",
"and",
"close",
"each",
"handler",
"for",
"safety",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L224-L240
|
cdgriffith/Reusables
|
reusables/log.py
|
remove_all_handlers
|
def remove_all_handlers(logger=None):
"""
Safely remove all handlers from the logger
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
remove_file_handlers(logger)
logger.handlers = []
|
python
|
def remove_all_handlers(logger=None):
"""
Safely remove all handlers from the logger
:param logger: logging name or object to modify, defaults to root logger
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
remove_file_handlers(logger)
logger.handlers = []
|
[
"def",
"remove_all_handlers",
"(",
"logger",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"remove_file_handlers",
"(",
"logger",
")",
"logger",
".",
"handlers",
"=",
"[",
"]"
] |
Safely remove all handlers from the logger
:param logger: logging name or object to modify, defaults to root logger
|
[
"Safely",
"remove",
"all",
"handlers",
"from",
"the",
"logger"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L243-L253
|
cdgriffith/Reusables
|
reusables/log.py
|
change_logger_levels
|
def change_logger_levels(logger=None, level=logging.DEBUG):
"""
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.setLevel(level)
for handler in logger.handlers:
handler.level = level
|
python
|
def change_logger_levels(logger=None, level=logging.DEBUG):
"""
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.setLevel(level)
for handler in logger.handlers:
handler.level = level
|
[
"def",
"change_logger_levels",
"(",
"logger",
"=",
"None",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"for",
"handler",
"in",
"logger",
".",
"handlers",
":",
"handler",
".",
"level",
"=",
"level"
] |
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
|
[
"Go",
"through",
"the",
"logger",
"and",
"handlers",
"and",
"update",
"their",
"levels",
"to",
"the",
"one",
"specified",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L256-L269
|
cdgriffith/Reusables
|
reusables/log.py
|
get_registered_loggers
|
def get_registered_loggers(hide_children=False, hide_reusables=False):
"""
Find the names of all loggers currently registered
:param hide_children: only return top level logger names
:param hide_reusables: hide the reusables loggers
:return: list of logger names
"""
return [logger for logger in logging.Logger.manager.loggerDict.keys()
if not (hide_reusables and "reusables" in logger)
and not (hide_children and "." in logger)]
|
python
|
def get_registered_loggers(hide_children=False, hide_reusables=False):
"""
Find the names of all loggers currently registered
:param hide_children: only return top level logger names
:param hide_reusables: hide the reusables loggers
:return: list of logger names
"""
return [logger for logger in logging.Logger.manager.loggerDict.keys()
if not (hide_reusables and "reusables" in logger)
and not (hide_children and "." in logger)]
|
[
"def",
"get_registered_loggers",
"(",
"hide_children",
"=",
"False",
",",
"hide_reusables",
"=",
"False",
")",
":",
"return",
"[",
"logger",
"for",
"logger",
"in",
"logging",
".",
"Logger",
".",
"manager",
".",
"loggerDict",
".",
"keys",
"(",
")",
"if",
"not",
"(",
"hide_reusables",
"and",
"\"reusables\"",
"in",
"logger",
")",
"and",
"not",
"(",
"hide_children",
"and",
"\".\"",
"in",
"logger",
")",
"]"
] |
Find the names of all loggers currently registered
:param hide_children: only return top level logger names
:param hide_reusables: hide the reusables loggers
:return: list of logger names
|
[
"Find",
"the",
"names",
"of",
"all",
"loggers",
"currently",
"registered"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/log.py#L272-L283
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
unique
|
def unique(max_retries=10, wait=0, alt_return="-no_alt_return-",
exception=Exception, error_text=None):
"""
Wrapper. Makes sure the function's return value has not been returned before
or else it run with the same inputs again.
.. code: python
import reusables
import random
@reusables.unique(max_retries=100)
def poor_uuid():
return random.randint(0, 10)
print([poor_uuid() for _ in range(10)])
# [8, 9, 6, 3, 0, 7, 2, 5, 4, 10]
print([poor_uuid() for _ in range(100)])
# Exception: No result was unique
Message format options: {func} {args} {kwargs}
:param max_retries: int of number of retries to attempt before failing
:param wait: float of seconds to wait between each try, defaults to 0
:param exception: Exception type of raise
:param error_text: text of the exception
:param alt_return: if specified, an exception is not raised on failure,
instead the provided value of any type of will be returned
"""
def func_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (error_text if error_text else
"No result was unique for function '{func}'")
if not error_text:
msg = _add_args(msg, *args, **kwargs)
for i in range(max_retries):
value = func(*args, **kwargs)
if value not in unique_cache[func.__name__]:
unique_cache[func.__name__].append(value)
return value
if wait:
time.sleep(wait)
else:
if alt_return != "-no_alt_return-":
return alt_return
raise exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
return wrapper
return func_wrap
|
python
|
def unique(max_retries=10, wait=0, alt_return="-no_alt_return-",
exception=Exception, error_text=None):
"""
Wrapper. Makes sure the function's return value has not been returned before
or else it run with the same inputs again.
.. code: python
import reusables
import random
@reusables.unique(max_retries=100)
def poor_uuid():
return random.randint(0, 10)
print([poor_uuid() for _ in range(10)])
# [8, 9, 6, 3, 0, 7, 2, 5, 4, 10]
print([poor_uuid() for _ in range(100)])
# Exception: No result was unique
Message format options: {func} {args} {kwargs}
:param max_retries: int of number of retries to attempt before failing
:param wait: float of seconds to wait between each try, defaults to 0
:param exception: Exception type of raise
:param error_text: text of the exception
:param alt_return: if specified, an exception is not raised on failure,
instead the provided value of any type of will be returned
"""
def func_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (error_text if error_text else
"No result was unique for function '{func}'")
if not error_text:
msg = _add_args(msg, *args, **kwargs)
for i in range(max_retries):
value = func(*args, **kwargs)
if value not in unique_cache[func.__name__]:
unique_cache[func.__name__].append(value)
return value
if wait:
time.sleep(wait)
else:
if alt_return != "-no_alt_return-":
return alt_return
raise exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
return wrapper
return func_wrap
|
[
"def",
"unique",
"(",
"max_retries",
"=",
"10",
",",
"wait",
"=",
"0",
",",
"alt_return",
"=",
"\"-no_alt_return-\"",
",",
"exception",
"=",
"Exception",
",",
"error_text",
"=",
"None",
")",
":",
"def",
"func_wrap",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"(",
"error_text",
"if",
"error_text",
"else",
"\"No result was unique for function '{func}'\"",
")",
"if",
"not",
"error_text",
":",
"msg",
"=",
"_add_args",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"i",
"in",
"range",
"(",
"max_retries",
")",
":",
"value",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"value",
"not",
"in",
"unique_cache",
"[",
"func",
".",
"__name__",
"]",
":",
"unique_cache",
"[",
"func",
".",
"__name__",
"]",
".",
"append",
"(",
"value",
")",
"return",
"value",
"if",
"wait",
":",
"time",
".",
"sleep",
"(",
"wait",
")",
"else",
":",
"if",
"alt_return",
"!=",
"\"-no_alt_return-\"",
":",
"return",
"alt_return",
"raise",
"exception",
"(",
"msg",
".",
"format",
"(",
"func",
"=",
"func",
".",
"__name__",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
")",
"return",
"wrapper",
"return",
"func_wrap"
] |
Wrapper. Makes sure the function's return value has not been returned before
or else it run with the same inputs again.
.. code: python
import reusables
import random
@reusables.unique(max_retries=100)
def poor_uuid():
return random.randint(0, 10)
print([poor_uuid() for _ in range(10)])
# [8, 9, 6, 3, 0, 7, 2, 5, 4, 10]
print([poor_uuid() for _ in range(100)])
# Exception: No result was unique
Message format options: {func} {args} {kwargs}
:param max_retries: int of number of retries to attempt before failing
:param wait: float of seconds to wait between each try, defaults to 0
:param exception: Exception type of raise
:param error_text: text of the exception
:param alt_return: if specified, an exception is not raised on failure,
instead the provided value of any type of will be returned
|
[
"Wrapper",
".",
"Makes",
"sure",
"the",
"function",
"s",
"return",
"value",
"has",
"not",
"been",
"returned",
"before",
"or",
"else",
"it",
"run",
"with",
"the",
"same",
"inputs",
"again",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L37-L87
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
lock_it
|
def lock_it(lock=g_lock):
"""
Wrapper. Simple wrapper to make sure a function is only run once at a time.
.. code: python
import reusables
import time
def func_one(_):
time.sleep(5)
@reusables.lock_it()
def func_two(_):
time.sleep(5)
@reusables.time_it(message="test_1 took {0:.2f} seconds")
def test_1():
reusables.run_in_pool(func_one, (1, 2, 3), threaded=True)
@reusables.time_it(message="test_2 took {0:.2f} seconds")
def test_2():
reusables.run_in_pool(func_two, (1, 2, 3), threaded=True)
test_1()
test_2()
# test_1 took 5.04 seconds
# test_2 took 15.07 seconds
:param lock: Which lock to use, uses unique default
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return func_wrapper
|
python
|
def lock_it(lock=g_lock):
"""
Wrapper. Simple wrapper to make sure a function is only run once at a time.
.. code: python
import reusables
import time
def func_one(_):
time.sleep(5)
@reusables.lock_it()
def func_two(_):
time.sleep(5)
@reusables.time_it(message="test_1 took {0:.2f} seconds")
def test_1():
reusables.run_in_pool(func_one, (1, 2, 3), threaded=True)
@reusables.time_it(message="test_2 took {0:.2f} seconds")
def test_2():
reusables.run_in_pool(func_two, (1, 2, 3), threaded=True)
test_1()
test_2()
# test_1 took 5.04 seconds
# test_2 took 15.07 seconds
:param lock: Which lock to use, uses unique default
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return func_wrapper
|
[
"def",
"lock_it",
"(",
"lock",
"=",
"g_lock",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"lock",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"func_wrapper"
] |
Wrapper. Simple wrapper to make sure a function is only run once at a time.
.. code: python
import reusables
import time
def func_one(_):
time.sleep(5)
@reusables.lock_it()
def func_two(_):
time.sleep(5)
@reusables.time_it(message="test_1 took {0:.2f} seconds")
def test_1():
reusables.run_in_pool(func_one, (1, 2, 3), threaded=True)
@reusables.time_it(message="test_2 took {0:.2f} seconds")
def test_2():
reusables.run_in_pool(func_two, (1, 2, 3), threaded=True)
test_1()
test_2()
# test_1 took 5.04 seconds
# test_2 took 15.07 seconds
:param lock: Which lock to use, uses unique default
|
[
"Wrapper",
".",
"Simple",
"wrapper",
"to",
"make",
"sure",
"a",
"function",
"is",
"only",
"run",
"once",
"at",
"a",
"time",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L90-L129
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
time_it
|
def time_it(log=None, message=None, append=None):
"""
Wrapper. Time the amount of time it takes the execution of the function
and print it.
If log is true, make sure to set the logging level of 'reusables' to INFO
level or lower.
.. code:: python
import time
import reusables
reusables.add_stream_handler('reusables')
@reusables.time_it(log=True, message="{seconds:.2f} seconds")
def test_time(length):
time.sleep(length)
return "slept {0}".format(length)
result = test_time(5)
# 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds
print(result)
# slept 5
Message format options: {func} {seconds} {args} {kwargs}
:param log: log as INFO level instead of printing
:param message: string to format with total time as the only input
:param append: list to append item too
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Can't use nonlocal in 2.x
msg = (message if message else
"Function '{func}' took a total of {seconds} seconds")
if not message:
msg = _add_args(msg, *args, **kwargs)
time_func = (time.perf_counter if python_version >= (3, 3)
else time.time)
start_time = time_func()
try:
return func(*args, **kwargs)
finally:
total_time = time_func() - start_time
time_string = msg.format(func=func.__name__,
seconds=total_time,
args=args, kwargs=kwargs)
if log:
my_logger = logging.getLogger(log) if isinstance(log, str)\
else logger
my_logger.info(time_string)
else:
print(time_string)
if isinstance(append, list):
append.append(total_time)
return wrapper
return func_wrapper
|
python
|
def time_it(log=None, message=None, append=None):
"""
Wrapper. Time the amount of time it takes the execution of the function
and print it.
If log is true, make sure to set the logging level of 'reusables' to INFO
level or lower.
.. code:: python
import time
import reusables
reusables.add_stream_handler('reusables')
@reusables.time_it(log=True, message="{seconds:.2f} seconds")
def test_time(length):
time.sleep(length)
return "slept {0}".format(length)
result = test_time(5)
# 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds
print(result)
# slept 5
Message format options: {func} {seconds} {args} {kwargs}
:param log: log as INFO level instead of printing
:param message: string to format with total time as the only input
:param append: list to append item too
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Can't use nonlocal in 2.x
msg = (message if message else
"Function '{func}' took a total of {seconds} seconds")
if not message:
msg = _add_args(msg, *args, **kwargs)
time_func = (time.perf_counter if python_version >= (3, 3)
else time.time)
start_time = time_func()
try:
return func(*args, **kwargs)
finally:
total_time = time_func() - start_time
time_string = msg.format(func=func.__name__,
seconds=total_time,
args=args, kwargs=kwargs)
if log:
my_logger = logging.getLogger(log) if isinstance(log, str)\
else logger
my_logger.info(time_string)
else:
print(time_string)
if isinstance(append, list):
append.append(total_time)
return wrapper
return func_wrapper
|
[
"def",
"time_it",
"(",
"log",
"=",
"None",
",",
"message",
"=",
"None",
",",
"append",
"=",
"None",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Can't use nonlocal in 2.x",
"msg",
"=",
"(",
"message",
"if",
"message",
"else",
"\"Function '{func}' took a total of {seconds} seconds\"",
")",
"if",
"not",
"message",
":",
"msg",
"=",
"_add_args",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"time_func",
"=",
"(",
"time",
".",
"perf_counter",
"if",
"python_version",
">=",
"(",
"3",
",",
"3",
")",
"else",
"time",
".",
"time",
")",
"start_time",
"=",
"time_func",
"(",
")",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"total_time",
"=",
"time_func",
"(",
")",
"-",
"start_time",
"time_string",
"=",
"msg",
".",
"format",
"(",
"func",
"=",
"func",
".",
"__name__",
",",
"seconds",
"=",
"total_time",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"if",
"log",
":",
"my_logger",
"=",
"logging",
".",
"getLogger",
"(",
"log",
")",
"if",
"isinstance",
"(",
"log",
",",
"str",
")",
"else",
"logger",
"my_logger",
".",
"info",
"(",
"time_string",
")",
"else",
":",
"print",
"(",
"time_string",
")",
"if",
"isinstance",
"(",
"append",
",",
"list",
")",
":",
"append",
".",
"append",
"(",
"total_time",
")",
"return",
"wrapper",
"return",
"func_wrapper"
] |
Wrapper. Time the amount of time it takes the execution of the function
and print it.
If log is true, make sure to set the logging level of 'reusables' to INFO
level or lower.
.. code:: python
import time
import reusables
reusables.add_stream_handler('reusables')
@reusables.time_it(log=True, message="{seconds:.2f} seconds")
def test_time(length):
time.sleep(length)
return "slept {0}".format(length)
result = test_time(5)
# 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds
print(result)
# slept 5
Message format options: {func} {seconds} {args} {kwargs}
:param log: log as INFO level instead of printing
:param message: string to format with total time as the only input
:param append: list to append item too
|
[
"Wrapper",
".",
"Time",
"the",
"amount",
"of",
"time",
"it",
"takes",
"the",
"execution",
"of",
"the",
"function",
"and",
"print",
"it",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L132-L193
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
queue_it
|
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper
|
python
|
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper
|
[
"def",
"queue_it",
"(",
"queue",
"=",
"g_queue",
",",
"*",
"*",
"put_args",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"queue",
".",
"put",
"(",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"*",
"*",
"put_args",
")",
"return",
"wrapper",
"return",
"func_wrapper"
] |
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
|
[
"Wrapper",
".",
"Instead",
"of",
"returning",
"the",
"result",
"of",
"the",
"function",
"add",
"it",
"to",
"a",
"queue",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L196-L224
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
log_exception
|
def log_exception(log="reusables", message=None, exceptions=(Exception, ),
level=logging.ERROR, show_traceback=True):
"""
Wrapper. Log the traceback to any exceptions raised. Possible to raise
custom exception.
.. code :: python
@reusables.log_exception()
def test():
raise Exception("Bad")
# 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# File "reusables\wrappers.py", line 200, in wrapper
# raise err
# Exception: Bad
Message format options: {func} {err} {args} {kwargs}
:param exceptions: types of exceptions to catch
:param log: log name to use
:param message: message to use in log
:param level: logging level
:param show_traceback: include full traceback or just error message
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = message if message else "Exception in '{func}': {err}"
if not message:
msg = _add_args(msg, *args, **kwargs)
try:
return func(*args, **kwargs)
except exceptions as err:
my_logger = (logging.getLogger(log) if isinstance(log, str)
else log)
my_logger.log(level, msg.format(func=func.__name__,
err=str(err),
args=args, kwargs=kwargs),
exc_info=show_traceback)
raise err
return wrapper
return func_wrapper
|
python
|
def log_exception(log="reusables", message=None, exceptions=(Exception, ),
level=logging.ERROR, show_traceback=True):
"""
Wrapper. Log the traceback to any exceptions raised. Possible to raise
custom exception.
.. code :: python
@reusables.log_exception()
def test():
raise Exception("Bad")
# 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# File "reusables\wrappers.py", line 200, in wrapper
# raise err
# Exception: Bad
Message format options: {func} {err} {args} {kwargs}
:param exceptions: types of exceptions to catch
:param log: log name to use
:param message: message to use in log
:param level: logging level
:param show_traceback: include full traceback or just error message
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = message if message else "Exception in '{func}': {err}"
if not message:
msg = _add_args(msg, *args, **kwargs)
try:
return func(*args, **kwargs)
except exceptions as err:
my_logger = (logging.getLogger(log) if isinstance(log, str)
else log)
my_logger.log(level, msg.format(func=func.__name__,
err=str(err),
args=args, kwargs=kwargs),
exc_info=show_traceback)
raise err
return wrapper
return func_wrapper
|
[
"def",
"log_exception",
"(",
"log",
"=",
"\"reusables\"",
",",
"message",
"=",
"None",
",",
"exceptions",
"=",
"(",
"Exception",
",",
")",
",",
"level",
"=",
"logging",
".",
"ERROR",
",",
"show_traceback",
"=",
"True",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"message",
"if",
"message",
"else",
"\"Exception in '{func}': {err}\"",
"if",
"not",
"message",
":",
"msg",
"=",
"_add_args",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
"as",
"err",
":",
"my_logger",
"=",
"(",
"logging",
".",
"getLogger",
"(",
"log",
")",
"if",
"isinstance",
"(",
"log",
",",
"str",
")",
"else",
"log",
")",
"my_logger",
".",
"log",
"(",
"level",
",",
"msg",
".",
"format",
"(",
"func",
"=",
"func",
".",
"__name__",
",",
"err",
"=",
"str",
"(",
"err",
")",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
",",
"exc_info",
"=",
"show_traceback",
")",
"raise",
"err",
"return",
"wrapper",
"return",
"func_wrapper"
] |
Wrapper. Log the traceback to any exceptions raised. Possible to raise
custom exception.
.. code :: python
@reusables.log_exception()
def test():
raise Exception("Bad")
# 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# File "reusables\wrappers.py", line 200, in wrapper
# raise err
# Exception: Bad
Message format options: {func} {err} {args} {kwargs}
:param exceptions: types of exceptions to catch
:param log: log name to use
:param message: message to use in log
:param level: logging level
:param show_traceback: include full traceback or just error message
|
[
"Wrapper",
".",
"Log",
"the",
"traceback",
"to",
"any",
"exceptions",
"raised",
".",
"Possible",
"to",
"raise",
"custom",
"exception",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L227-L272
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
catch_it
|
def catch_it(exceptions=(Exception, ), default=None, handler=None):
"""
If the function encounters an exception, catch it, and
return the specified default or sent to a handler function instead.
.. code :: python
def handle_error(exception, func, *args, **kwargs):
print(f"{func.__name__} raised {exception} when called with {args}")
@reusables.catch_it(handler=err_func)
def will_raise(message="Hello")
raise Exception(message)
:param exceptions: tuple of exceptions to catch
:param default: what to return if the exception is caught
:param handler: function to send exception, func, *args and **kwargs
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions as err:
if handler:
return handler(err, func, *args, **kwargs)
return default
return wrapper
return func_wrapper
|
python
|
def catch_it(exceptions=(Exception, ), default=None, handler=None):
"""
If the function encounters an exception, catch it, and
return the specified default or sent to a handler function instead.
.. code :: python
def handle_error(exception, func, *args, **kwargs):
print(f"{func.__name__} raised {exception} when called with {args}")
@reusables.catch_it(handler=err_func)
def will_raise(message="Hello")
raise Exception(message)
:param exceptions: tuple of exceptions to catch
:param default: what to return if the exception is caught
:param handler: function to send exception, func, *args and **kwargs
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions as err:
if handler:
return handler(err, func, *args, **kwargs)
return default
return wrapper
return func_wrapper
|
[
"def",
"catch_it",
"(",
"exceptions",
"=",
"(",
"Exception",
",",
")",
",",
"default",
"=",
"None",
",",
"handler",
"=",
"None",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
"as",
"err",
":",
"if",
"handler",
":",
"return",
"handler",
"(",
"err",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"default",
"return",
"wrapper",
"return",
"func_wrapper"
] |
If the function encounters an exception, catch it, and
return the specified default or sent to a handler function instead.
.. code :: python
def handle_error(exception, func, *args, **kwargs):
print(f"{func.__name__} raised {exception} when called with {args}")
@reusables.catch_it(handler=err_func)
def will_raise(message="Hello")
raise Exception(message)
:param exceptions: tuple of exceptions to catch
:param default: what to return if the exception is caught
:param handler: function to send exception, func, *args and **kwargs
|
[
"If",
"the",
"function",
"encounters",
"an",
"exception",
"catch",
"it",
"and",
"return",
"the",
"specified",
"default",
"or",
"sent",
"to",
"a",
"handler",
"function",
"instead",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L275-L304
|
cdgriffith/Reusables
|
reusables/wrappers.py
|
retry_it
|
def retry_it(exceptions=(Exception, ), tries=10, wait=0, handler=None,
raised_exception=ReusablesError, raised_message=None):
"""
Retry a function if an exception is raised, or if output_check returns
False.
Message format options: {func} {args} {kwargs}
:param exceptions: tuple of exceptions to catch
:param tries: number of tries to retry the function
:param wait: time to wait between executions in seconds
:param handler: function to check if output is valid, must return bool
:param raised_exception: default is ReusablesError
:param raised_message: message to pass to raised exception
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (raised_message if raised_message
else "Max retries exceeded for function '{func}'")
if not raised_message:
msg = _add_args(msg, *args, **kwargs)
try:
result = func(*args, **kwargs)
except exceptions:
if tries:
if wait:
time.sleep(wait)
return retry_it(exceptions=exceptions, tries=tries-1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
if raised_exception:
exc = raised_exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
exc.__cause__ = None
raise exc
else:
if handler:
if not handler(result):
return retry_it(exceptions=exceptions, tries=tries - 1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
return result
return wrapper
return func_wrapper
|
python
|
def retry_it(exceptions=(Exception, ), tries=10, wait=0, handler=None,
raised_exception=ReusablesError, raised_message=None):
"""
Retry a function if an exception is raised, or if output_check returns
False.
Message format options: {func} {args} {kwargs}
:param exceptions: tuple of exceptions to catch
:param tries: number of tries to retry the function
:param wait: time to wait between executions in seconds
:param handler: function to check if output is valid, must return bool
:param raised_exception: default is ReusablesError
:param raised_message: message to pass to raised exception
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (raised_message if raised_message
else "Max retries exceeded for function '{func}'")
if not raised_message:
msg = _add_args(msg, *args, **kwargs)
try:
result = func(*args, **kwargs)
except exceptions:
if tries:
if wait:
time.sleep(wait)
return retry_it(exceptions=exceptions, tries=tries-1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
if raised_exception:
exc = raised_exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
exc.__cause__ = None
raise exc
else:
if handler:
if not handler(result):
return retry_it(exceptions=exceptions, tries=tries - 1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
return result
return wrapper
return func_wrapper
|
[
"def",
"retry_it",
"(",
"exceptions",
"=",
"(",
"Exception",
",",
")",
",",
"tries",
"=",
"10",
",",
"wait",
"=",
"0",
",",
"handler",
"=",
"None",
",",
"raised_exception",
"=",
"ReusablesError",
",",
"raised_message",
"=",
"None",
")",
":",
"def",
"func_wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"(",
"raised_message",
"if",
"raised_message",
"else",
"\"Max retries exceeded for function '{func}'\"",
")",
"if",
"not",
"raised_message",
":",
"msg",
"=",
"_add_args",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
":",
"if",
"tries",
":",
"if",
"wait",
":",
"time",
".",
"sleep",
"(",
"wait",
")",
"return",
"retry_it",
"(",
"exceptions",
"=",
"exceptions",
",",
"tries",
"=",
"tries",
"-",
"1",
",",
"handler",
"=",
"handler",
",",
"wait",
"=",
"wait",
")",
"(",
"func",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"raised_exception",
":",
"exc",
"=",
"raised_exception",
"(",
"msg",
".",
"format",
"(",
"func",
"=",
"func",
".",
"__name__",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
")",
"exc",
".",
"__cause__",
"=",
"None",
"raise",
"exc",
"else",
":",
"if",
"handler",
":",
"if",
"not",
"handler",
"(",
"result",
")",
":",
"return",
"retry_it",
"(",
"exceptions",
"=",
"exceptions",
",",
"tries",
"=",
"tries",
"-",
"1",
",",
"handler",
"=",
"handler",
",",
"wait",
"=",
"wait",
")",
"(",
"func",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"return",
"wrapper",
"return",
"func_wrapper"
] |
Retry a function if an exception is raised, or if output_check returns
False.
Message format options: {func} {args} {kwargs}
:param exceptions: tuple of exceptions to catch
:param tries: number of tries to retry the function
:param wait: time to wait between executions in seconds
:param handler: function to check if output is valid, must return bool
:param raised_exception: default is ReusablesError
:param raised_message: message to pass to raised exception
|
[
"Retry",
"a",
"function",
"if",
"an",
"exception",
"is",
"raised",
"or",
"if",
"output_check",
"returns",
"False",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/wrappers.py#L307-L351
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
extract
|
def extract(archive_file, path=".", delete_on_success=False,
enable_rar=False):
"""
Automatically detect archive type and extract all files to specified path.
.. code:: python
import os
os.listdir(".")
# ['test_structure.zip']
reusables.extract("test_structure.zip")
os.listdir(".")
# [ 'test_structure', 'test_structure.zip']
:param archive_file: path to file to extract
:param path: location to extract to
:param delete_on_success: Will delete the original archive if set to True
:param enable_rar: include the rarfile import and extract
:return: path to extracted files
"""
if not os.path.exists(archive_file) or not os.path.getsize(archive_file):
logger.error("File {0} unextractable".format(archive_file))
raise OSError("File does not exist or has zero size")
arch = None
if zipfile.is_zipfile(archive_file):
logger.debug("File {0} detected as a zip file".format(archive_file))
arch = zipfile.ZipFile(archive_file)
elif tarfile.is_tarfile(archive_file):
logger.debug("File {0} detected as a tar file".format(archive_file))
arch = tarfile.open(archive_file)
elif enable_rar:
import rarfile
if rarfile.is_rarfile(archive_file):
logger.debug("File {0} detected as "
"a rar file".format(archive_file))
arch = rarfile.RarFile(archive_file)
if not arch:
raise TypeError("File is not a known archive")
logger.debug("Extracting files to {0}".format(path))
try:
arch.extractall(path=path)
finally:
arch.close()
if delete_on_success:
logger.debug("Archive {0} will now be deleted".format(archive_file))
os.unlink(archive_file)
return os.path.abspath(path)
|
python
|
def extract(archive_file, path=".", delete_on_success=False,
enable_rar=False):
"""
Automatically detect archive type and extract all files to specified path.
.. code:: python
import os
os.listdir(".")
# ['test_structure.zip']
reusables.extract("test_structure.zip")
os.listdir(".")
# [ 'test_structure', 'test_structure.zip']
:param archive_file: path to file to extract
:param path: location to extract to
:param delete_on_success: Will delete the original archive if set to True
:param enable_rar: include the rarfile import and extract
:return: path to extracted files
"""
if not os.path.exists(archive_file) or not os.path.getsize(archive_file):
logger.error("File {0} unextractable".format(archive_file))
raise OSError("File does not exist or has zero size")
arch = None
if zipfile.is_zipfile(archive_file):
logger.debug("File {0} detected as a zip file".format(archive_file))
arch = zipfile.ZipFile(archive_file)
elif tarfile.is_tarfile(archive_file):
logger.debug("File {0} detected as a tar file".format(archive_file))
arch = tarfile.open(archive_file)
elif enable_rar:
import rarfile
if rarfile.is_rarfile(archive_file):
logger.debug("File {0} detected as "
"a rar file".format(archive_file))
arch = rarfile.RarFile(archive_file)
if not arch:
raise TypeError("File is not a known archive")
logger.debug("Extracting files to {0}".format(path))
try:
arch.extractall(path=path)
finally:
arch.close()
if delete_on_success:
logger.debug("Archive {0} will now be deleted".format(archive_file))
os.unlink(archive_file)
return os.path.abspath(path)
|
[
"def",
"extract",
"(",
"archive_file",
",",
"path",
"=",
"\".\"",
",",
"delete_on_success",
"=",
"False",
",",
"enable_rar",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"archive_file",
")",
"or",
"not",
"os",
".",
"path",
".",
"getsize",
"(",
"archive_file",
")",
":",
"logger",
".",
"error",
"(",
"\"File {0} unextractable\"",
".",
"format",
"(",
"archive_file",
")",
")",
"raise",
"OSError",
"(",
"\"File does not exist or has zero size\"",
")",
"arch",
"=",
"None",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"archive_file",
")",
":",
"logger",
".",
"debug",
"(",
"\"File {0} detected as a zip file\"",
".",
"format",
"(",
"archive_file",
")",
")",
"arch",
"=",
"zipfile",
".",
"ZipFile",
"(",
"archive_file",
")",
"elif",
"tarfile",
".",
"is_tarfile",
"(",
"archive_file",
")",
":",
"logger",
".",
"debug",
"(",
"\"File {0} detected as a tar file\"",
".",
"format",
"(",
"archive_file",
")",
")",
"arch",
"=",
"tarfile",
".",
"open",
"(",
"archive_file",
")",
"elif",
"enable_rar",
":",
"import",
"rarfile",
"if",
"rarfile",
".",
"is_rarfile",
"(",
"archive_file",
")",
":",
"logger",
".",
"debug",
"(",
"\"File {0} detected as \"",
"\"a rar file\"",
".",
"format",
"(",
"archive_file",
")",
")",
"arch",
"=",
"rarfile",
".",
"RarFile",
"(",
"archive_file",
")",
"if",
"not",
"arch",
":",
"raise",
"TypeError",
"(",
"\"File is not a known archive\"",
")",
"logger",
".",
"debug",
"(",
"\"Extracting files to {0}\"",
".",
"format",
"(",
"path",
")",
")",
"try",
":",
"arch",
".",
"extractall",
"(",
"path",
"=",
"path",
")",
"finally",
":",
"arch",
".",
"close",
"(",
")",
"if",
"delete_on_success",
":",
"logger",
".",
"debug",
"(",
"\"Archive {0} will now be deleted\"",
".",
"format",
"(",
"archive_file",
")",
")",
"os",
".",
"unlink",
"(",
"archive_file",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")"
] |
Automatically detect archive type and extract all files to specified path.
.. code:: python
import os
os.listdir(".")
# ['test_structure.zip']
reusables.extract("test_structure.zip")
os.listdir(".")
# [ 'test_structure', 'test_structure.zip']
:param archive_file: path to file to extract
:param path: location to extract to
:param delete_on_success: Will delete the original archive if set to True
:param enable_rar: include the rarfile import and extract
:return: path to extracted files
|
[
"Automatically",
"detect",
"archive",
"type",
"and",
"extract",
"all",
"files",
"to",
"specified",
"path",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L35-L92
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
archive
|
def archive(files_to_archive, name="archive.zip", archive_type=None,
overwrite=False, store=False, depth=None, err_non_exist=True,
allow_zip_64=True, **tarfile_kwargs):
""" Archive a list of files (or files inside a folder), can chose between
- zip
- tar
- gz (tar.gz, tgz)
- bz2 (tar.bz2)
.. code:: python
reusables.archive(['reusables', '.travis.yml'],
name="my_archive.bz2")
# 'C:\\Users\\Me\\Reusables\\my_archive.bz2'
:param files_to_archive: list of files and folders to archive
:param name: path and name of archive file
:param archive_type: auto-detects unless specified
:param overwrite: overwrite if archive exists
:param store: zipfile only, True will not compress files
:param depth: specify max depth for folders
:param err_non_exist: raise error if provided file does not exist
:param allow_zip_64: must be enabled for zip files larger than 2GB
:param tarfile_kwargs: extra args to pass to tarfile.open
:return: path to created archive
"""
if not isinstance(files_to_archive, (list, tuple)):
files_to_archive = [files_to_archive]
if not archive_type:
if name.lower().endswith("zip"):
archive_type = "zip"
elif name.lower().endswith("gz"):
archive_type = "gz"
elif name.lower().endswith("z2"):
archive_type = "bz2"
elif name.lower().endswith("tar"):
archive_type = "tar"
else:
err_msg = ("Could not determine archive "
"type based off {0}".format(name))
logger.error(err_msg)
raise ValueError(err_msg)
logger.debug("{0} file detected for {1}".format(archive_type, name))
elif archive_type not in ("tar", "gz", "bz2", "zip"):
err_msg = ("archive_type must be zip, gz, bz2,"
" or gz, was {0}".format(archive_type))
logger.error(err_msg)
raise ValueError(err_msg)
if not overwrite and os.path.exists(name):
err_msg = "File {0} exists and overwrite not specified".format(name)
logger.error(err_msg)
raise OSError(err_msg)
if archive_type == "zip":
arch = zipfile.ZipFile(name, 'w',
zipfile.ZIP_STORED if store else
zipfile.ZIP_DEFLATED,
allowZip64=allow_zip_64)
write = arch.write
elif archive_type in ("tar", "gz", "bz2"):
mode = archive_type if archive_type != "tar" else ""
arch = tarfile.open(name, 'w:{0}'.format(mode), **tarfile_kwargs)
write = arch.add
else:
raise ValueError("archive_type must be zip, gz, bz2, or gz")
try:
for file_path in files_to_archive:
if os.path.isfile(file_path):
if err_non_exist and not os.path.exists(file_path):
raise OSError("File {0} does not exist".format(file_path))
write(file_path)
elif os.path.isdir(file_path):
for nf in find_files(file_path, abspath=False, depth=depth):
write(nf)
except (Exception, KeyboardInterrupt) as err:
logger.exception("Could not archive {0}".format(files_to_archive))
try:
arch.close()
finally:
os.unlink(name)
raise err
else:
arch.close()
return os.path.abspath(name)
|
python
|
def archive(files_to_archive, name="archive.zip", archive_type=None,
overwrite=False, store=False, depth=None, err_non_exist=True,
allow_zip_64=True, **tarfile_kwargs):
""" Archive a list of files (or files inside a folder), can chose between
- zip
- tar
- gz (tar.gz, tgz)
- bz2 (tar.bz2)
.. code:: python
reusables.archive(['reusables', '.travis.yml'],
name="my_archive.bz2")
# 'C:\\Users\\Me\\Reusables\\my_archive.bz2'
:param files_to_archive: list of files and folders to archive
:param name: path and name of archive file
:param archive_type: auto-detects unless specified
:param overwrite: overwrite if archive exists
:param store: zipfile only, True will not compress files
:param depth: specify max depth for folders
:param err_non_exist: raise error if provided file does not exist
:param allow_zip_64: must be enabled for zip files larger than 2GB
:param tarfile_kwargs: extra args to pass to tarfile.open
:return: path to created archive
"""
if not isinstance(files_to_archive, (list, tuple)):
files_to_archive = [files_to_archive]
if not archive_type:
if name.lower().endswith("zip"):
archive_type = "zip"
elif name.lower().endswith("gz"):
archive_type = "gz"
elif name.lower().endswith("z2"):
archive_type = "bz2"
elif name.lower().endswith("tar"):
archive_type = "tar"
else:
err_msg = ("Could not determine archive "
"type based off {0}".format(name))
logger.error(err_msg)
raise ValueError(err_msg)
logger.debug("{0} file detected for {1}".format(archive_type, name))
elif archive_type not in ("tar", "gz", "bz2", "zip"):
err_msg = ("archive_type must be zip, gz, bz2,"
" or gz, was {0}".format(archive_type))
logger.error(err_msg)
raise ValueError(err_msg)
if not overwrite and os.path.exists(name):
err_msg = "File {0} exists and overwrite not specified".format(name)
logger.error(err_msg)
raise OSError(err_msg)
if archive_type == "zip":
arch = zipfile.ZipFile(name, 'w',
zipfile.ZIP_STORED if store else
zipfile.ZIP_DEFLATED,
allowZip64=allow_zip_64)
write = arch.write
elif archive_type in ("tar", "gz", "bz2"):
mode = archive_type if archive_type != "tar" else ""
arch = tarfile.open(name, 'w:{0}'.format(mode), **tarfile_kwargs)
write = arch.add
else:
raise ValueError("archive_type must be zip, gz, bz2, or gz")
try:
for file_path in files_to_archive:
if os.path.isfile(file_path):
if err_non_exist and not os.path.exists(file_path):
raise OSError("File {0} does not exist".format(file_path))
write(file_path)
elif os.path.isdir(file_path):
for nf in find_files(file_path, abspath=False, depth=depth):
write(nf)
except (Exception, KeyboardInterrupt) as err:
logger.exception("Could not archive {0}".format(files_to_archive))
try:
arch.close()
finally:
os.unlink(name)
raise err
else:
arch.close()
return os.path.abspath(name)
|
[
"def",
"archive",
"(",
"files_to_archive",
",",
"name",
"=",
"\"archive.zip\"",
",",
"archive_type",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"store",
"=",
"False",
",",
"depth",
"=",
"None",
",",
"err_non_exist",
"=",
"True",
",",
"allow_zip_64",
"=",
"True",
",",
"*",
"*",
"tarfile_kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"files_to_archive",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"files_to_archive",
"=",
"[",
"files_to_archive",
"]",
"if",
"not",
"archive_type",
":",
"if",
"name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"zip\"",
")",
":",
"archive_type",
"=",
"\"zip\"",
"elif",
"name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"gz\"",
")",
":",
"archive_type",
"=",
"\"gz\"",
"elif",
"name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"z2\"",
")",
":",
"archive_type",
"=",
"\"bz2\"",
"elif",
"name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"tar\"",
")",
":",
"archive_type",
"=",
"\"tar\"",
"else",
":",
"err_msg",
"=",
"(",
"\"Could not determine archive \"",
"\"type based off {0}\"",
".",
"format",
"(",
"name",
")",
")",
"logger",
".",
"error",
"(",
"err_msg",
")",
"raise",
"ValueError",
"(",
"err_msg",
")",
"logger",
".",
"debug",
"(",
"\"{0} file detected for {1}\"",
".",
"format",
"(",
"archive_type",
",",
"name",
")",
")",
"elif",
"archive_type",
"not",
"in",
"(",
"\"tar\"",
",",
"\"gz\"",
",",
"\"bz2\"",
",",
"\"zip\"",
")",
":",
"err_msg",
"=",
"(",
"\"archive_type must be zip, gz, bz2,\"",
"\" or gz, was {0}\"",
".",
"format",
"(",
"archive_type",
")",
")",
"logger",
".",
"error",
"(",
"err_msg",
")",
"raise",
"ValueError",
"(",
"err_msg",
")",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"name",
")",
":",
"err_msg",
"=",
"\"File {0} exists and overwrite not specified\"",
".",
"format",
"(",
"name",
")",
"logger",
".",
"error",
"(",
"err_msg",
")",
"raise",
"OSError",
"(",
"err_msg",
")",
"if",
"archive_type",
"==",
"\"zip\"",
":",
"arch",
"=",
"zipfile",
".",
"ZipFile",
"(",
"name",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_STORED",
"if",
"store",
"else",
"zipfile",
".",
"ZIP_DEFLATED",
",",
"allowZip64",
"=",
"allow_zip_64",
")",
"write",
"=",
"arch",
".",
"write",
"elif",
"archive_type",
"in",
"(",
"\"tar\"",
",",
"\"gz\"",
",",
"\"bz2\"",
")",
":",
"mode",
"=",
"archive_type",
"if",
"archive_type",
"!=",
"\"tar\"",
"else",
"\"\"",
"arch",
"=",
"tarfile",
".",
"open",
"(",
"name",
",",
"'w:{0}'",
".",
"format",
"(",
"mode",
")",
",",
"*",
"*",
"tarfile_kwargs",
")",
"write",
"=",
"arch",
".",
"add",
"else",
":",
"raise",
"ValueError",
"(",
"\"archive_type must be zip, gz, bz2, or gz\"",
")",
"try",
":",
"for",
"file_path",
"in",
"files_to_archive",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"if",
"err_non_exist",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"OSError",
"(",
"\"File {0} does not exist\"",
".",
"format",
"(",
"file_path",
")",
")",
"write",
"(",
"file_path",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"file_path",
")",
":",
"for",
"nf",
"in",
"find_files",
"(",
"file_path",
",",
"abspath",
"=",
"False",
",",
"depth",
"=",
"depth",
")",
":",
"write",
"(",
"nf",
")",
"except",
"(",
"Exception",
",",
"KeyboardInterrupt",
")",
"as",
"err",
":",
"logger",
".",
"exception",
"(",
"\"Could not archive {0}\"",
".",
"format",
"(",
"files_to_archive",
")",
")",
"try",
":",
"arch",
".",
"close",
"(",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"name",
")",
"raise",
"err",
"else",
":",
"arch",
".",
"close",
"(",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"name",
")"
] |
Archive a list of files (or files inside a folder), can chose between
- zip
- tar
- gz (tar.gz, tgz)
- bz2 (tar.bz2)
.. code:: python
reusables.archive(['reusables', '.travis.yml'],
name="my_archive.bz2")
# 'C:\\Users\\Me\\Reusables\\my_archive.bz2'
:param files_to_archive: list of files and folders to archive
:param name: path and name of archive file
:param archive_type: auto-detects unless specified
:param overwrite: overwrite if archive exists
:param store: zipfile only, True will not compress files
:param depth: specify max depth for folders
:param err_non_exist: raise error if provided file does not exist
:param allow_zip_64: must be enabled for zip files larger than 2GB
:param tarfile_kwargs: extra args to pass to tarfile.open
:return: path to created archive
|
[
"Archive",
"a",
"list",
"of",
"files",
"(",
"or",
"files",
"inside",
"a",
"folder",
")",
"can",
"chose",
"between"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L95-L183
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
list_to_csv
|
def list_to_csv(my_list, csv_file):
"""
Save a matrix (list of lists) to a file as a CSV
.. code:: python
my_list = [["Name", "Location"],
["Chris", "South Pole"],
["Harry", "Depth of Winter"],
["Bob", "Skull"]]
reusables.list_to_csv(my_list, "example.csv")
example.csv
.. code:: csv
"Name","Location"
"Chris","South Pole"
"Harry","Depth of Winter"
"Bob","Skull"
:param my_list: list of lists to save to CSV
:param csv_file: File to save data to
"""
if PY3:
csv_handler = open(csv_file, 'w', newline='')
else:
csv_handler = open(csv_file, 'wb')
try:
writer = csv.writer(csv_handler, delimiter=',', quoting=csv.QUOTE_ALL)
writer.writerows(my_list)
finally:
csv_handler.close()
|
python
|
def list_to_csv(my_list, csv_file):
"""
Save a matrix (list of lists) to a file as a CSV
.. code:: python
my_list = [["Name", "Location"],
["Chris", "South Pole"],
["Harry", "Depth of Winter"],
["Bob", "Skull"]]
reusables.list_to_csv(my_list, "example.csv")
example.csv
.. code:: csv
"Name","Location"
"Chris","South Pole"
"Harry","Depth of Winter"
"Bob","Skull"
:param my_list: list of lists to save to CSV
:param csv_file: File to save data to
"""
if PY3:
csv_handler = open(csv_file, 'w', newline='')
else:
csv_handler = open(csv_file, 'wb')
try:
writer = csv.writer(csv_handler, delimiter=',', quoting=csv.QUOTE_ALL)
writer.writerows(my_list)
finally:
csv_handler.close()
|
[
"def",
"list_to_csv",
"(",
"my_list",
",",
"csv_file",
")",
":",
"if",
"PY3",
":",
"csv_handler",
"=",
"open",
"(",
"csv_file",
",",
"'w'",
",",
"newline",
"=",
"''",
")",
"else",
":",
"csv_handler",
"=",
"open",
"(",
"csv_file",
",",
"'wb'",
")",
"try",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"csv_handler",
",",
"delimiter",
"=",
"','",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_ALL",
")",
"writer",
".",
"writerows",
"(",
"my_list",
")",
"finally",
":",
"csv_handler",
".",
"close",
"(",
")"
] |
Save a matrix (list of lists) to a file as a CSV
.. code:: python
my_list = [["Name", "Location"],
["Chris", "South Pole"],
["Harry", "Depth of Winter"],
["Bob", "Skull"]]
reusables.list_to_csv(my_list, "example.csv")
example.csv
.. code:: csv
"Name","Location"
"Chris","South Pole"
"Harry","Depth of Winter"
"Bob","Skull"
:param my_list: list of lists to save to CSV
:param csv_file: File to save data to
|
[
"Save",
"a",
"matrix",
"(",
"list",
"of",
"lists",
")",
"to",
"a",
"file",
"as",
"a",
"CSV"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L186-L220
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
csv_to_list
|
def csv_to_list(csv_file):
"""
Open and transform a CSV file into a matrix (list of lists).
.. code:: python
reusables.csv_to_list("example.csv")
# [['Name', 'Location'],
# ['Chris', 'South Pole'],
# ['Harry', 'Depth of Winter'],
# ['Bob', 'Skull']]
:param csv_file: Path to CSV file as str
:return: list
"""
with open(csv_file, 'r' if PY3 else 'rb') as f:
return list(csv.reader(f))
|
python
|
def csv_to_list(csv_file):
"""
Open and transform a CSV file into a matrix (list of lists).
.. code:: python
reusables.csv_to_list("example.csv")
# [['Name', 'Location'],
# ['Chris', 'South Pole'],
# ['Harry', 'Depth of Winter'],
# ['Bob', 'Skull']]
:param csv_file: Path to CSV file as str
:return: list
"""
with open(csv_file, 'r' if PY3 else 'rb') as f:
return list(csv.reader(f))
|
[
"def",
"csv_to_list",
"(",
"csv_file",
")",
":",
"with",
"open",
"(",
"csv_file",
",",
"'r'",
"if",
"PY3",
"else",
"'rb'",
")",
"as",
"f",
":",
"return",
"list",
"(",
"csv",
".",
"reader",
"(",
"f",
")",
")"
] |
Open and transform a CSV file into a matrix (list of lists).
.. code:: python
reusables.csv_to_list("example.csv")
# [['Name', 'Location'],
# ['Chris', 'South Pole'],
# ['Harry', 'Depth of Winter'],
# ['Bob', 'Skull']]
:param csv_file: Path to CSV file as str
:return: list
|
[
"Open",
"and",
"transform",
"a",
"CSV",
"file",
"into",
"a",
"matrix",
"(",
"list",
"of",
"lists",
")",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L223-L239
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
load_json
|
def load_json(json_file, **kwargs):
"""
Open and load data from a JSON file
.. code:: python
reusables.load_json("example.json")
# {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}
:param json_file: Path to JSON file as string
:param kwargs: Additional arguments for the json.load command
:return: Dictionary
"""
with open(json_file) as f:
return json.load(f, **kwargs)
|
python
|
def load_json(json_file, **kwargs):
"""
Open and load data from a JSON file
.. code:: python
reusables.load_json("example.json")
# {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}
:param json_file: Path to JSON file as string
:param kwargs: Additional arguments for the json.load command
:return: Dictionary
"""
with open(json_file) as f:
return json.load(f, **kwargs)
|
[
"def",
"load_json",
"(",
"json_file",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"json_file",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
",",
"*",
"*",
"kwargs",
")"
] |
Open and load data from a JSON file
.. code:: python
reusables.load_json("example.json")
# {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}
:param json_file: Path to JSON file as string
:param kwargs: Additional arguments for the json.load command
:return: Dictionary
|
[
"Open",
"and",
"load",
"data",
"from",
"a",
"JSON",
"file"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L242-L256
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
save_json
|
def save_json(data, json_file, indent=4, **kwargs):
"""
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
"""
with open(json_file, "w") as f:
json.dump(data, f, indent=indent, **kwargs)
|
python
|
def save_json(data, json_file, indent=4, **kwargs):
"""
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
"""
with open(json_file, "w") as f:
json.dump(data, f, indent=indent, **kwargs)
|
[
"def",
"save_json",
"(",
"data",
",",
"json_file",
",",
"indent",
"=",
"4",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"json_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"data",
",",
"f",
",",
"indent",
"=",
"indent",
",",
"*",
"*",
"kwargs",
")"
] |
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
|
[
"Takes",
"a",
"dictionary",
"and",
"saves",
"it",
"to",
"a",
"file",
"as",
"JSON"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L259-L287
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
config_dict
|
def config_dict(config_file=None, auto_find=False, verify=True, **cfg_options):
"""
Return configuration options as dictionary. Accepts either a single
config file or a list of files. Auto find will search for all .cfg, .config
and .ini in the execution directory and package root (unsafe but handy).
.. code:: python
reusables.config_dict(os.path.join("test", "data", "test_config.ini"))
# {'General': {'example': 'A regular string'},
# 'Section 2': {'anint': '234',
# 'examplelist': '234,123,234,543',
# 'floatly': '4.4',
# 'my_bool': 'yes'}}
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: dictionary of the config files
"""
if not config_file:
config_file = []
cfg_parser = ConfigParser.ConfigParser(**cfg_options)
cfg_files = []
if config_file:
if not isinstance(config_file, (list, tuple)):
if isinstance(config_file, str):
cfg_files.append(config_file)
else:
raise TypeError("config_files must be a list or a string")
else:
cfg_files.extend(config_file)
if auto_find:
cfg_files.extend(find_files_list(
current_root if isinstance(auto_find, bool) else auto_find,
ext=(".cfg", ".config", ".ini")))
logger.info("config files to be used: {0}".format(cfg_files))
if verify:
cfg_parser.read([cfg for cfg in cfg_files if os.path.exists(cfg)])
else:
cfg_parser.read(cfg_files)
return dict((section, dict(cfg_parser.items(section)))
for section in cfg_parser.sections())
|
python
|
def config_dict(config_file=None, auto_find=False, verify=True, **cfg_options):
"""
Return configuration options as dictionary. Accepts either a single
config file or a list of files. Auto find will search for all .cfg, .config
and .ini in the execution directory and package root (unsafe but handy).
.. code:: python
reusables.config_dict(os.path.join("test", "data", "test_config.ini"))
# {'General': {'example': 'A regular string'},
# 'Section 2': {'anint': '234',
# 'examplelist': '234,123,234,543',
# 'floatly': '4.4',
# 'my_bool': 'yes'}}
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: dictionary of the config files
"""
if not config_file:
config_file = []
cfg_parser = ConfigParser.ConfigParser(**cfg_options)
cfg_files = []
if config_file:
if not isinstance(config_file, (list, tuple)):
if isinstance(config_file, str):
cfg_files.append(config_file)
else:
raise TypeError("config_files must be a list or a string")
else:
cfg_files.extend(config_file)
if auto_find:
cfg_files.extend(find_files_list(
current_root if isinstance(auto_find, bool) else auto_find,
ext=(".cfg", ".config", ".ini")))
logger.info("config files to be used: {0}".format(cfg_files))
if verify:
cfg_parser.read([cfg for cfg in cfg_files if os.path.exists(cfg)])
else:
cfg_parser.read(cfg_files)
return dict((section, dict(cfg_parser.items(section)))
for section in cfg_parser.sections())
|
[
"def",
"config_dict",
"(",
"config_file",
"=",
"None",
",",
"auto_find",
"=",
"False",
",",
"verify",
"=",
"True",
",",
"*",
"*",
"cfg_options",
")",
":",
"if",
"not",
"config_file",
":",
"config_file",
"=",
"[",
"]",
"cfg_parser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
"*",
"*",
"cfg_options",
")",
"cfg_files",
"=",
"[",
"]",
"if",
"config_file",
":",
"if",
"not",
"isinstance",
"(",
"config_file",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"isinstance",
"(",
"config_file",
",",
"str",
")",
":",
"cfg_files",
".",
"append",
"(",
"config_file",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"config_files must be a list or a string\"",
")",
"else",
":",
"cfg_files",
".",
"extend",
"(",
"config_file",
")",
"if",
"auto_find",
":",
"cfg_files",
".",
"extend",
"(",
"find_files_list",
"(",
"current_root",
"if",
"isinstance",
"(",
"auto_find",
",",
"bool",
")",
"else",
"auto_find",
",",
"ext",
"=",
"(",
"\".cfg\"",
",",
"\".config\"",
",",
"\".ini\"",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"config files to be used: {0}\"",
".",
"format",
"(",
"cfg_files",
")",
")",
"if",
"verify",
":",
"cfg_parser",
".",
"read",
"(",
"[",
"cfg",
"for",
"cfg",
"in",
"cfg_files",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cfg",
")",
"]",
")",
"else",
":",
"cfg_parser",
".",
"read",
"(",
"cfg_files",
")",
"return",
"dict",
"(",
"(",
"section",
",",
"dict",
"(",
"cfg_parser",
".",
"items",
"(",
"section",
")",
")",
")",
"for",
"section",
"in",
"cfg_parser",
".",
"sections",
"(",
")",
")"
] |
Return configuration options as dictionary. Accepts either a single
config file or a list of files. Auto find will search for all .cfg, .config
and .ini in the execution directory and package root (unsafe but handy).
.. code:: python
reusables.config_dict(os.path.join("test", "data", "test_config.ini"))
# {'General': {'example': 'A regular string'},
# 'Section 2': {'anint': '234',
# 'examplelist': '234,123,234,543',
# 'floatly': '4.4',
# 'my_bool': 'yes'}}
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: dictionary of the config files
|
[
"Return",
"configuration",
"options",
"as",
"dictionary",
".",
"Accepts",
"either",
"a",
"single",
"config",
"file",
"or",
"a",
"list",
"of",
"files",
".",
"Auto",
"find",
"will",
"search",
"for",
"all",
".",
"cfg",
".",
"config",
"and",
".",
"ini",
"in",
"the",
"execution",
"directory",
"and",
"package",
"root",
"(",
"unsafe",
"but",
"handy",
")",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L290-L340
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
config_namespace
|
def config_namespace(config_file=None, auto_find=False,
verify=True, **cfg_options):
"""
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
"""
return ConfigNamespace(**config_dict(config_file, auto_find,
verify, **cfg_options))
|
python
|
def config_namespace(config_file=None, auto_find=False,
verify=True, **cfg_options):
"""
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
"""
return ConfigNamespace(**config_dict(config_file, auto_find,
verify, **cfg_options))
|
[
"def",
"config_namespace",
"(",
"config_file",
"=",
"None",
",",
"auto_find",
"=",
"False",
",",
"verify",
"=",
"True",
",",
"*",
"*",
"cfg_options",
")",
":",
"return",
"ConfigNamespace",
"(",
"*",
"*",
"config_dict",
"(",
"config_file",
",",
"auto_find",
",",
"verify",
",",
"*",
"*",
"cfg_options",
")",
")"
] |
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
|
[
"Return",
"configuration",
"options",
"as",
"a",
"Namespace",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L343-L362
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
_walk
|
def _walk(directory, enable_scandir=False, **kwargs):
"""
Internal function to return walk generator either from os or scandir
:param directory: directory to traverse
:param enable_scandir: on python < 3.5 enable external scandir package
:param kwargs: arguments to pass to walk function
:return: walk generator
"""
walk = os.walk
if python_version < (3, 5) and enable_scandir:
import scandir
walk = scandir.walk
return walk(directory, **kwargs)
|
python
|
def _walk(directory, enable_scandir=False, **kwargs):
"""
Internal function to return walk generator either from os or scandir
:param directory: directory to traverse
:param enable_scandir: on python < 3.5 enable external scandir package
:param kwargs: arguments to pass to walk function
:return: walk generator
"""
walk = os.walk
if python_version < (3, 5) and enable_scandir:
import scandir
walk = scandir.walk
return walk(directory, **kwargs)
|
[
"def",
"_walk",
"(",
"directory",
",",
"enable_scandir",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"walk",
"=",
"os",
".",
"walk",
"if",
"python_version",
"<",
"(",
"3",
",",
"5",
")",
"and",
"enable_scandir",
":",
"import",
"scandir",
"walk",
"=",
"scandir",
".",
"walk",
"return",
"walk",
"(",
"directory",
",",
"*",
"*",
"kwargs",
")"
] |
Internal function to return walk generator either from os or scandir
:param directory: directory to traverse
:param enable_scandir: on python < 3.5 enable external scandir package
:param kwargs: arguments to pass to walk function
:return: walk generator
|
[
"Internal",
"function",
"to",
"return",
"walk",
"generator",
"either",
"from",
"os",
"or",
"scandir"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L365-L378
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
os_tree
|
def os_tree(directory, enable_scandir=False):
"""
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
"""
if not os.path.exists(directory):
raise OSError("Directory does not exist")
if not os.path.isdir(directory):
raise OSError("Path is not a directory")
full_list = []
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep
for d in dirs])
tree = {os.path.basename(directory): {}}
for item in full_list:
separated = item.split(os.sep)
is_dir = separated[-1:] == ['']
if is_dir:
separated = separated[:-1]
parent = tree[os.path.basename(directory)]
for index, path in enumerate(separated):
if path in parent:
parent = parent[path]
continue
else:
parent[path] = dict()
parent = parent[path]
return tree
|
python
|
def os_tree(directory, enable_scandir=False):
"""
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
"""
if not os.path.exists(directory):
raise OSError("Directory does not exist")
if not os.path.isdir(directory):
raise OSError("Path is not a directory")
full_list = []
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep
for d in dirs])
tree = {os.path.basename(directory): {}}
for item in full_list:
separated = item.split(os.sep)
is_dir = separated[-1:] == ['']
if is_dir:
separated = separated[:-1]
parent = tree[os.path.basename(directory)]
for index, path in enumerate(separated):
if path in parent:
parent = parent[path]
continue
else:
parent[path] = dict()
parent = parent[path]
return tree
|
[
"def",
"os_tree",
"(",
"directory",
",",
"enable_scandir",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"raise",
"OSError",
"(",
"\"Directory does not exist\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"raise",
"OSError",
"(",
"\"Path is not a directory\"",
")",
"full_list",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"_walk",
"(",
"directory",
",",
"enable_scandir",
"=",
"enable_scandir",
")",
":",
"full_list",
".",
"extend",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
".",
"lstrip",
"(",
"directory",
")",
"+",
"os",
".",
"sep",
"for",
"d",
"in",
"dirs",
"]",
")",
"tree",
"=",
"{",
"os",
".",
"path",
".",
"basename",
"(",
"directory",
")",
":",
"{",
"}",
"}",
"for",
"item",
"in",
"full_list",
":",
"separated",
"=",
"item",
".",
"split",
"(",
"os",
".",
"sep",
")",
"is_dir",
"=",
"separated",
"[",
"-",
"1",
":",
"]",
"==",
"[",
"''",
"]",
"if",
"is_dir",
":",
"separated",
"=",
"separated",
"[",
":",
"-",
"1",
"]",
"parent",
"=",
"tree",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"directory",
")",
"]",
"for",
"index",
",",
"path",
"in",
"enumerate",
"(",
"separated",
")",
":",
"if",
"path",
"in",
"parent",
":",
"parent",
"=",
"parent",
"[",
"path",
"]",
"continue",
"else",
":",
"parent",
"[",
"path",
"]",
"=",
"dict",
"(",
")",
"parent",
"=",
"parent",
"[",
"path",
"]",
"return",
"tree"
] |
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
|
[
"Return",
"a",
"directories",
"contents",
"as",
"a",
"dictionary",
"hierarchy",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L381-L422
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
file_hash
|
def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
"""
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
"""
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest()
|
python
|
def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
"""
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
"""
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest()
|
[
"def",
"file_hash",
"(",
"path",
",",
"hash_type",
"=",
"\"md5\"",
",",
"block_size",
"=",
"65536",
",",
"hex_digest",
"=",
"True",
")",
":",
"hashed",
"=",
"hashlib",
".",
"new",
"(",
"hash_type",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"infile",
":",
"buf",
"=",
"infile",
".",
"read",
"(",
"block_size",
")",
"while",
"len",
"(",
"buf",
")",
">",
"0",
":",
"hashed",
".",
"update",
"(",
"buf",
")",
"buf",
"=",
"infile",
".",
"read",
"(",
"block_size",
")",
"return",
"hashed",
".",
"hexdigest",
"(",
")",
"if",
"hex_digest",
"else",
"hashed",
".",
"digest",
"(",
")"
] |
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
|
[
"Hash",
"a",
"given",
"file",
"with",
"md5",
"or",
"any",
"other",
"and",
"return",
"the",
"hex",
"digest",
".",
"You",
"can",
"run",
"hashlib",
".",
"algorithms_available",
"to",
"see",
"which",
"are",
"available",
"on",
"your",
"system",
"unless",
"you",
"have",
"an",
"archaic",
"python",
"version",
"you",
"poor",
"soul",
")",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L425-L450
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
find_files
|
def find_files(directory=".", ext=None, name=None,
match_case=False, disable_glob=False, depth=None,
abspath=False, enable_scandir=False):
"""
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
"""
if ext or not name:
disable_glob = True
if not disable_glob:
disable_glob = not glob.has_magic(name)
if ext and isinstance(ext, str):
ext = [ext]
elif ext and not isinstance(ext, (list, tuple)):
raise TypeError("extension must be either one extension or a list")
if abspath:
directory = os.path.abspath(directory)
starting_depth = directory.count(os.sep)
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
if depth and root.count(os.sep) - starting_depth >= depth:
continue
if not disable_glob:
if match_case:
raise ValueError("Cannot use glob and match case, please "
"either disable glob or not set match_case")
glob_generator = glob.iglob(os.path.join(root, name))
for item in glob_generator:
yield item
continue
for file_name in files:
if ext:
for end in ext:
if file_name.lower().endswith(end.lower() if not
match_case else end):
break
else:
continue
if name:
if match_case and name not in file_name:
continue
elif name.lower() not in file_name.lower():
continue
yield os.path.join(root, file_name)
|
python
|
def find_files(directory=".", ext=None, name=None,
match_case=False, disable_glob=False, depth=None,
abspath=False, enable_scandir=False):
"""
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
"""
if ext or not name:
disable_glob = True
if not disable_glob:
disable_glob = not glob.has_magic(name)
if ext and isinstance(ext, str):
ext = [ext]
elif ext and not isinstance(ext, (list, tuple)):
raise TypeError("extension must be either one extension or a list")
if abspath:
directory = os.path.abspath(directory)
starting_depth = directory.count(os.sep)
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
if depth and root.count(os.sep) - starting_depth >= depth:
continue
if not disable_glob:
if match_case:
raise ValueError("Cannot use glob and match case, please "
"either disable glob or not set match_case")
glob_generator = glob.iglob(os.path.join(root, name))
for item in glob_generator:
yield item
continue
for file_name in files:
if ext:
for end in ext:
if file_name.lower().endswith(end.lower() if not
match_case else end):
break
else:
continue
if name:
if match_case and name not in file_name:
continue
elif name.lower() not in file_name.lower():
continue
yield os.path.join(root, file_name)
|
[
"def",
"find_files",
"(",
"directory",
"=",
"\".\"",
",",
"ext",
"=",
"None",
",",
"name",
"=",
"None",
",",
"match_case",
"=",
"False",
",",
"disable_glob",
"=",
"False",
",",
"depth",
"=",
"None",
",",
"abspath",
"=",
"False",
",",
"enable_scandir",
"=",
"False",
")",
":",
"if",
"ext",
"or",
"not",
"name",
":",
"disable_glob",
"=",
"True",
"if",
"not",
"disable_glob",
":",
"disable_glob",
"=",
"not",
"glob",
".",
"has_magic",
"(",
"name",
")",
"if",
"ext",
"and",
"isinstance",
"(",
"ext",
",",
"str",
")",
":",
"ext",
"=",
"[",
"ext",
"]",
"elif",
"ext",
"and",
"not",
"isinstance",
"(",
"ext",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"extension must be either one extension or a list\"",
")",
"if",
"abspath",
":",
"directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"starting_depth",
"=",
"directory",
".",
"count",
"(",
"os",
".",
"sep",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"_walk",
"(",
"directory",
",",
"enable_scandir",
"=",
"enable_scandir",
")",
":",
"if",
"depth",
"and",
"root",
".",
"count",
"(",
"os",
".",
"sep",
")",
"-",
"starting_depth",
">=",
"depth",
":",
"continue",
"if",
"not",
"disable_glob",
":",
"if",
"match_case",
":",
"raise",
"ValueError",
"(",
"\"Cannot use glob and match case, please \"",
"\"either disable glob or not set match_case\"",
")",
"glob_generator",
"=",
"glob",
".",
"iglob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
")",
"for",
"item",
"in",
"glob_generator",
":",
"yield",
"item",
"continue",
"for",
"file_name",
"in",
"files",
":",
"if",
"ext",
":",
"for",
"end",
"in",
"ext",
":",
"if",
"file_name",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"end",
".",
"lower",
"(",
")",
"if",
"not",
"match_case",
"else",
"end",
")",
":",
"break",
"else",
":",
"continue",
"if",
"name",
":",
"if",
"match_case",
"and",
"name",
"not",
"in",
"file_name",
":",
"continue",
"elif",
"name",
".",
"lower",
"(",
")",
"not",
"in",
"file_name",
".",
"lower",
"(",
")",
":",
"continue",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file_name",
")"
] |
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
|
[
"Walk",
"through",
"a",
"file",
"directory",
"and",
"return",
"an",
"iterator",
"of",
"files",
"that",
"match",
"requirements",
".",
"Will",
"autodetect",
"if",
"name",
"has",
"glob",
"as",
"magic",
"characters",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L463-L541
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
remove_empty_directories
|
def remove_empty_directories(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty folders from a path. Returns list of empty directories.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed directories
"""
listdir = os.listdir
if python_version < (3, 5) and enable_scandir:
import scandir as _scandir
def listdir(directory):
return list(_scandir.scandir(directory))
directory_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir,
topdown=False):
if (not directories and not files and os.path.exists(root) and
root != root_directory and os.path.isdir(root)):
directory_list.append(root)
if not dry_run:
try:
os.rmdir(root)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(root))
else:
raise err
elif directories and not files:
for directory in directories:
directory = join_paths(root, directory, strict=True)
if (os.path.exists(directory) and os.path.isdir(directory) and
not listdir(directory)):
directory_list.append(directory)
if not dry_run:
try:
os.rmdir(directory)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(
directory))
else:
raise err
return directory_list
|
python
|
def remove_empty_directories(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty folders from a path. Returns list of empty directories.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed directories
"""
listdir = os.listdir
if python_version < (3, 5) and enable_scandir:
import scandir as _scandir
def listdir(directory):
return list(_scandir.scandir(directory))
directory_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir,
topdown=False):
if (not directories and not files and os.path.exists(root) and
root != root_directory and os.path.isdir(root)):
directory_list.append(root)
if not dry_run:
try:
os.rmdir(root)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(root))
else:
raise err
elif directories and not files:
for directory in directories:
directory = join_paths(root, directory, strict=True)
if (os.path.exists(directory) and os.path.isdir(directory) and
not listdir(directory)):
directory_list.append(directory)
if not dry_run:
try:
os.rmdir(directory)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(
directory))
else:
raise err
return directory_list
|
[
"def",
"remove_empty_directories",
"(",
"root_directory",
",",
"dry_run",
"=",
"False",
",",
"ignore_errors",
"=",
"True",
",",
"enable_scandir",
"=",
"False",
")",
":",
"listdir",
"=",
"os",
".",
"listdir",
"if",
"python_version",
"<",
"(",
"3",
",",
"5",
")",
"and",
"enable_scandir",
":",
"import",
"scandir",
"as",
"_scandir",
"def",
"listdir",
"(",
"directory",
")",
":",
"return",
"list",
"(",
"_scandir",
".",
"scandir",
"(",
"directory",
")",
")",
"directory_list",
"=",
"[",
"]",
"for",
"root",
",",
"directories",
",",
"files",
"in",
"_walk",
"(",
"root_directory",
",",
"enable_scandir",
"=",
"enable_scandir",
",",
"topdown",
"=",
"False",
")",
":",
"if",
"(",
"not",
"directories",
"and",
"not",
"files",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"root",
")",
"and",
"root",
"!=",
"root_directory",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"root",
")",
")",
":",
"directory_list",
".",
"append",
"(",
"root",
")",
"if",
"not",
"dry_run",
":",
"try",
":",
"os",
".",
"rmdir",
"(",
"root",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"ignore_errors",
":",
"logger",
".",
"info",
"(",
"\"{0} could not be deleted\"",
".",
"format",
"(",
"root",
")",
")",
"else",
":",
"raise",
"err",
"elif",
"directories",
"and",
"not",
"files",
":",
"for",
"directory",
"in",
"directories",
":",
"directory",
"=",
"join_paths",
"(",
"root",
",",
"directory",
",",
"strict",
"=",
"True",
")",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
"and",
"not",
"listdir",
"(",
"directory",
")",
")",
":",
"directory_list",
".",
"append",
"(",
"directory",
")",
"if",
"not",
"dry_run",
":",
"try",
":",
"os",
".",
"rmdir",
"(",
"directory",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"ignore_errors",
":",
"logger",
".",
"info",
"(",
"\"{0} could not be deleted\"",
".",
"format",
"(",
"directory",
")",
")",
"else",
":",
"raise",
"err",
"return",
"directory_list"
] |
Remove all empty folders from a path. Returns list of empty directories.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed directories
|
[
"Remove",
"all",
"empty",
"folders",
"from",
"a",
"path",
".",
"Returns",
"list",
"of",
"empty",
"directories",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L544-L592
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
remove_empty_files
|
def remove_empty_files(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty files from a path. Returns list of the empty files removed.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed files
"""
file_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir):
for file_name in files:
file_path = join_paths(root, file_name, strict=True)
if os.path.isfile(file_path) and not os.path.getsize(file_path):
if file_hash(file_path) == variables.hashes.empty_file.md5:
file_list.append(file_path)
file_list = sorted(set(file_list))
if not dry_run:
for afile in file_list:
try:
os.unlink(afile)
except OSError as err:
if ignore_errors:
logger.info("File {0} could not be deleted".format(afile))
else:
raise err
return file_list
|
python
|
def remove_empty_files(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty files from a path. Returns list of the empty files removed.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed files
"""
file_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir):
for file_name in files:
file_path = join_paths(root, file_name, strict=True)
if os.path.isfile(file_path) and not os.path.getsize(file_path):
if file_hash(file_path) == variables.hashes.empty_file.md5:
file_list.append(file_path)
file_list = sorted(set(file_list))
if not dry_run:
for afile in file_list:
try:
os.unlink(afile)
except OSError as err:
if ignore_errors:
logger.info("File {0} could not be deleted".format(afile))
else:
raise err
return file_list
|
[
"def",
"remove_empty_files",
"(",
"root_directory",
",",
"dry_run",
"=",
"False",
",",
"ignore_errors",
"=",
"True",
",",
"enable_scandir",
"=",
"False",
")",
":",
"file_list",
"=",
"[",
"]",
"for",
"root",
",",
"directories",
",",
"files",
"in",
"_walk",
"(",
"root_directory",
",",
"enable_scandir",
"=",
"enable_scandir",
")",
":",
"for",
"file_name",
"in",
"files",
":",
"file_path",
"=",
"join_paths",
"(",
"root",
",",
"file_name",
",",
"strict",
"=",
"True",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
"and",
"not",
"os",
".",
"path",
".",
"getsize",
"(",
"file_path",
")",
":",
"if",
"file_hash",
"(",
"file_path",
")",
"==",
"variables",
".",
"hashes",
".",
"empty_file",
".",
"md5",
":",
"file_list",
".",
"append",
"(",
"file_path",
")",
"file_list",
"=",
"sorted",
"(",
"set",
"(",
"file_list",
")",
")",
"if",
"not",
"dry_run",
":",
"for",
"afile",
"in",
"file_list",
":",
"try",
":",
"os",
".",
"unlink",
"(",
"afile",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"ignore_errors",
":",
"logger",
".",
"info",
"(",
"\"File {0} could not be deleted\"",
".",
"format",
"(",
"afile",
")",
")",
"else",
":",
"raise",
"err",
"return",
"file_list"
] |
Remove all empty files from a path. Returns list of the empty files removed.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed files
|
[
"Remove",
"all",
"empty",
"files",
"from",
"a",
"path",
".",
"Returns",
"list",
"of",
"the",
"empty",
"files",
"removed",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L595-L627
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
dup_finder
|
def dup_finder(file_path, directory=".", enable_scandir=False):
"""
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
"""
size = os.path.getsize(file_path)
if size == 0:
for empty_file in remove_empty_files(directory, dry_run=True):
yield empty_file
else:
with open(file_path, 'rb') as f:
first_twenty = f.read(20)
file_sha256 = file_hash(file_path, "sha256")
for root, directories, files in _walk(directory,
enable_scandir=enable_scandir):
for each_file in files:
test_file = os.path.join(root, each_file)
if os.path.getsize(test_file) == size:
try:
with open(test_file, 'rb') as f:
test_first_twenty = f.read(20)
except OSError:
logger.warning("Could not open file to compare - "
"{0}".format(test_file))
else:
if first_twenty == test_first_twenty:
if file_hash(test_file, "sha256") == file_sha256:
yield os.path.abspath(test_file)
|
python
|
def dup_finder(file_path, directory=".", enable_scandir=False):
"""
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
"""
size = os.path.getsize(file_path)
if size == 0:
for empty_file in remove_empty_files(directory, dry_run=True):
yield empty_file
else:
with open(file_path, 'rb') as f:
first_twenty = f.read(20)
file_sha256 = file_hash(file_path, "sha256")
for root, directories, files in _walk(directory,
enable_scandir=enable_scandir):
for each_file in files:
test_file = os.path.join(root, each_file)
if os.path.getsize(test_file) == size:
try:
with open(test_file, 'rb') as f:
test_first_twenty = f.read(20)
except OSError:
logger.warning("Could not open file to compare - "
"{0}".format(test_file))
else:
if first_twenty == test_first_twenty:
if file_hash(test_file, "sha256") == file_sha256:
yield os.path.abspath(test_file)
|
[
"def",
"dup_finder",
"(",
"file_path",
",",
"directory",
"=",
"\".\"",
",",
"enable_scandir",
"=",
"False",
")",
":",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"file_path",
")",
"if",
"size",
"==",
"0",
":",
"for",
"empty_file",
"in",
"remove_empty_files",
"(",
"directory",
",",
"dry_run",
"=",
"True",
")",
":",
"yield",
"empty_file",
"else",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"f",
":",
"first_twenty",
"=",
"f",
".",
"read",
"(",
"20",
")",
"file_sha256",
"=",
"file_hash",
"(",
"file_path",
",",
"\"sha256\"",
")",
"for",
"root",
",",
"directories",
",",
"files",
"in",
"_walk",
"(",
"directory",
",",
"enable_scandir",
"=",
"enable_scandir",
")",
":",
"for",
"each_file",
"in",
"files",
":",
"test_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"each_file",
")",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"test_file",
")",
"==",
"size",
":",
"try",
":",
"with",
"open",
"(",
"test_file",
",",
"'rb'",
")",
"as",
"f",
":",
"test_first_twenty",
"=",
"f",
".",
"read",
"(",
"20",
")",
"except",
"OSError",
":",
"logger",
".",
"warning",
"(",
"\"Could not open file to compare - \"",
"\"{0}\"",
".",
"format",
"(",
"test_file",
")",
")",
"else",
":",
"if",
"first_twenty",
"==",
"test_first_twenty",
":",
"if",
"file_hash",
"(",
"test_file",
",",
"\"sha256\"",
")",
"==",
"file_sha256",
":",
"yield",
"os",
".",
"path",
".",
"abspath",
"(",
"test_file",
")"
] |
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
|
[
"Check",
"a",
"directory",
"for",
"duplicates",
"of",
"the",
"specified",
"file",
".",
"This",
"is",
"meant",
"for",
"a",
"single",
"file",
"only",
"for",
"checking",
"a",
"directory",
"for",
"dups",
"use",
"directory_duplicates",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L630-L681
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
directory_duplicates
|
def directory_duplicates(directory, hash_type='md5', **kwargs):
"""
Find all duplicates in a directory. Will return a list, in that list
are lists of duplicate files.
.. code: python
dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures')
print(len(dups))
# 56
print(dups)
# [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg',
# 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ...
:param directory: Directory to search
:param hash_type: Type of hash to perform
:param kwargs: Arguments to pass to find_files to narrow file types
:return: list of lists of dups"""
size_map, hash_map = defaultdict(list), defaultdict(list)
for item in find_files(directory, **kwargs):
file_size = os.path.getsize(item)
size_map[file_size].append(item)
for possible_dups in (v for v in size_map.values() if len(v) > 1):
for each_item in possible_dups:
item_hash = file_hash(each_item, hash_type=hash_type)
hash_map[item_hash].append(each_item)
return [v for v in hash_map.values() if len(v) > 1]
|
python
|
def directory_duplicates(directory, hash_type='md5', **kwargs):
"""
Find all duplicates in a directory. Will return a list, in that list
are lists of duplicate files.
.. code: python
dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures')
print(len(dups))
# 56
print(dups)
# [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg',
# 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ...
:param directory: Directory to search
:param hash_type: Type of hash to perform
:param kwargs: Arguments to pass to find_files to narrow file types
:return: list of lists of dups"""
size_map, hash_map = defaultdict(list), defaultdict(list)
for item in find_files(directory, **kwargs):
file_size = os.path.getsize(item)
size_map[file_size].append(item)
for possible_dups in (v for v in size_map.values() if len(v) > 1):
for each_item in possible_dups:
item_hash = file_hash(each_item, hash_type=hash_type)
hash_map[item_hash].append(each_item)
return [v for v in hash_map.values() if len(v) > 1]
|
[
"def",
"directory_duplicates",
"(",
"directory",
",",
"hash_type",
"=",
"'md5'",
",",
"*",
"*",
"kwargs",
")",
":",
"size_map",
",",
"hash_map",
"=",
"defaultdict",
"(",
"list",
")",
",",
"defaultdict",
"(",
"list",
")",
"for",
"item",
"in",
"find_files",
"(",
"directory",
",",
"*",
"*",
"kwargs",
")",
":",
"file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"item",
")",
"size_map",
"[",
"file_size",
"]",
".",
"append",
"(",
"item",
")",
"for",
"possible_dups",
"in",
"(",
"v",
"for",
"v",
"in",
"size_map",
".",
"values",
"(",
")",
"if",
"len",
"(",
"v",
")",
">",
"1",
")",
":",
"for",
"each_item",
"in",
"possible_dups",
":",
"item_hash",
"=",
"file_hash",
"(",
"each_item",
",",
"hash_type",
"=",
"hash_type",
")",
"hash_map",
"[",
"item_hash",
"]",
".",
"append",
"(",
"each_item",
")",
"return",
"[",
"v",
"for",
"v",
"in",
"hash_map",
".",
"values",
"(",
")",
"if",
"len",
"(",
"v",
")",
">",
"1",
"]"
] |
Find all duplicates in a directory. Will return a list, in that list
are lists of duplicate files.
.. code: python
dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures')
print(len(dups))
# 56
print(dups)
# [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg',
# 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ...
:param directory: Directory to search
:param hash_type: Type of hash to perform
:param kwargs: Arguments to pass to find_files to narrow file types
:return: list of lists of dups
|
[
"Find",
"all",
"duplicates",
"in",
"a",
"directory",
".",
"Will",
"return",
"a",
"list",
"in",
"that",
"list",
"are",
"lists",
"of",
"duplicate",
"files",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L684-L715
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
join_paths
|
def join_paths(*paths, **kwargs):
"""
Join multiple paths together and return the absolute path of them. If 'safe'
is specified, this function will 'clean' the path with the 'safe_path'
function. This will clean root decelerations from the path
after the first item.
Would like to do 'safe=False' instead of '**kwargs' but stupider versions
of python *cough 2.6* don't like that after '*paths'.
.. code: python
reusables.join_paths("var", "\\log", "/test")
'C:\\Users\\Me\\var\\log\\test'
os.path.join("var", "\\log", "/test")
'/test'
:param paths: paths to join together
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
"""
path = os.path.abspath(paths[0])
for next_path in paths[1:]:
path = os.path.join(path, next_path.lstrip("\\").lstrip("/").strip())
path.rstrip(os.sep)
return path if not kwargs.get('safe') else safe_path(path)
|
python
|
def join_paths(*paths, **kwargs):
"""
Join multiple paths together and return the absolute path of them. If 'safe'
is specified, this function will 'clean' the path with the 'safe_path'
function. This will clean root decelerations from the path
after the first item.
Would like to do 'safe=False' instead of '**kwargs' but stupider versions
of python *cough 2.6* don't like that after '*paths'.
.. code: python
reusables.join_paths("var", "\\log", "/test")
'C:\\Users\\Me\\var\\log\\test'
os.path.join("var", "\\log", "/test")
'/test'
:param paths: paths to join together
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
"""
path = os.path.abspath(paths[0])
for next_path in paths[1:]:
path = os.path.join(path, next_path.lstrip("\\").lstrip("/").strip())
path.rstrip(os.sep)
return path if not kwargs.get('safe') else safe_path(path)
|
[
"def",
"join_paths",
"(",
"*",
"paths",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"paths",
"[",
"0",
"]",
")",
"for",
"next_path",
"in",
"paths",
"[",
"1",
":",
"]",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"next_path",
".",
"lstrip",
"(",
"\"\\\\\"",
")",
".",
"lstrip",
"(",
"\"/\"",
")",
".",
"strip",
"(",
")",
")",
"path",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
"return",
"path",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'safe'",
")",
"else",
"safe_path",
"(",
"path",
")"
] |
Join multiple paths together and return the absolute path of them. If 'safe'
is specified, this function will 'clean' the path with the 'safe_path'
function. This will clean root decelerations from the path
after the first item.
Would like to do 'safe=False' instead of '**kwargs' but stupider versions
of python *cough 2.6* don't like that after '*paths'.
.. code: python
reusables.join_paths("var", "\\log", "/test")
'C:\\Users\\Me\\var\\log\\test'
os.path.join("var", "\\log", "/test")
'/test'
:param paths: paths to join together
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
|
[
"Join",
"multiple",
"paths",
"together",
"and",
"return",
"the",
"absolute",
"path",
"of",
"them",
".",
"If",
"safe",
"is",
"specified",
"this",
"function",
"will",
"clean",
"the",
"path",
"with",
"the",
"safe_path",
"function",
".",
"This",
"will",
"clean",
"root",
"decelerations",
"from",
"the",
"path",
"after",
"the",
"first",
"item",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L728-L755
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
join_here
|
def join_here(*paths, **kwargs):
"""
Join any path or paths as a sub directory of the current file's directory.
.. code:: python
reusables.join_here("Makefile")
# 'C:\\Reusables\\Makefile'
:param paths: paths to join together
:param kwargs: 'strict', do not strip os.sep
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
"""
path = os.path.abspath(".")
for next_path in paths:
next_path = next_path.lstrip("\\").lstrip("/").strip() if not \
kwargs.get('strict') else next_path
path = os.path.abspath(os.path.join(path, next_path))
return path if not kwargs.get('safe') else safe_path(path)
|
python
|
def join_here(*paths, **kwargs):
"""
Join any path or paths as a sub directory of the current file's directory.
.. code:: python
reusables.join_here("Makefile")
# 'C:\\Reusables\\Makefile'
:param paths: paths to join together
:param kwargs: 'strict', do not strip os.sep
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
"""
path = os.path.abspath(".")
for next_path in paths:
next_path = next_path.lstrip("\\").lstrip("/").strip() if not \
kwargs.get('strict') else next_path
path = os.path.abspath(os.path.join(path, next_path))
return path if not kwargs.get('safe') else safe_path(path)
|
[
"def",
"join_here",
"(",
"*",
"paths",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"\".\"",
")",
"for",
"next_path",
"in",
"paths",
":",
"next_path",
"=",
"next_path",
".",
"lstrip",
"(",
"\"\\\\\"",
")",
".",
"lstrip",
"(",
"\"/\"",
")",
".",
"strip",
"(",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'strict'",
")",
"else",
"next_path",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"next_path",
")",
")",
"return",
"path",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'safe'",
")",
"else",
"safe_path",
"(",
"path",
")"
] |
Join any path or paths as a sub directory of the current file's directory.
.. code:: python
reusables.join_here("Makefile")
# 'C:\\Reusables\\Makefile'
:param paths: paths to join together
:param kwargs: 'strict', do not strip os.sep
:param kwargs: 'safe', make them into a safe path it True
:return: abspath as string
|
[
"Join",
"any",
"path",
"or",
"paths",
"as",
"a",
"sub",
"directory",
"of",
"the",
"current",
"file",
"s",
"directory",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L758-L777
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
check_filename
|
def check_filename(filename):
"""
Returns a boolean stating if the filename is safe to use or not. Note that
this does not test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:return: boolean if it is a safe file name
"""
if not isinstance(filename, str):
raise TypeError("filename must be a string")
if regex.path.linux.filename.search(filename):
return True
return False
|
python
|
def check_filename(filename):
"""
Returns a boolean stating if the filename is safe to use or not. Note that
this does not test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:return: boolean if it is a safe file name
"""
if not isinstance(filename, str):
raise TypeError("filename must be a string")
if regex.path.linux.filename.search(filename):
return True
return False
|
[
"def",
"check_filename",
"(",
"filename",
")",
":",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"filename must be a string\"",
")",
"if",
"regex",
".",
"path",
".",
"linux",
".",
"filename",
".",
"search",
"(",
"filename",
")",
":",
"return",
"True",
"return",
"False"
] |
Returns a boolean stating if the filename is safe to use or not. Note that
this does not test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:return: boolean if it is a safe file name
|
[
"Returns",
"a",
"boolean",
"stating",
"if",
"the",
"filename",
"is",
"safe",
"to",
"use",
"or",
"not",
".",
"Note",
"that",
"this",
"does",
"not",
"test",
"for",
"legal",
"names",
"accepted",
"but",
"a",
"more",
"restricted",
"set",
"of",
":",
"Letters",
"numbers",
"spaces",
"hyphens",
"underscores",
"and",
"periods",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L780-L793
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
safe_filename
|
def safe_filename(filename, replacement="_"):
"""
Replace unsafe filename characters with underscores. Note that this does not
test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:param replacement: character to use as a replacement of bad characters
:return: safe filename string
"""
if not isinstance(filename, str):
raise TypeError("filename must be a string")
if regex.path.linux.filename.search(filename):
return filename
safe_name = ""
for char in filename:
safe_name += char if regex.path.linux.filename.search(char) \
else replacement
return safe_name
|
python
|
def safe_filename(filename, replacement="_"):
"""
Replace unsafe filename characters with underscores. Note that this does not
test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:param replacement: character to use as a replacement of bad characters
:return: safe filename string
"""
if not isinstance(filename, str):
raise TypeError("filename must be a string")
if regex.path.linux.filename.search(filename):
return filename
safe_name = ""
for char in filename:
safe_name += char if regex.path.linux.filename.search(char) \
else replacement
return safe_name
|
[
"def",
"safe_filename",
"(",
"filename",
",",
"replacement",
"=",
"\"_\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"filename must be a string\"",
")",
"if",
"regex",
".",
"path",
".",
"linux",
".",
"filename",
".",
"search",
"(",
"filename",
")",
":",
"return",
"filename",
"safe_name",
"=",
"\"\"",
"for",
"char",
"in",
"filename",
":",
"safe_name",
"+=",
"char",
"if",
"regex",
".",
"path",
".",
"linux",
".",
"filename",
".",
"search",
"(",
"char",
")",
"else",
"replacement",
"return",
"safe_name"
] |
Replace unsafe filename characters with underscores. Note that this does not
test for "legal" names accepted, but a more restricted set of:
Letters, numbers, spaces, hyphens, underscores and periods.
:param filename: name of a file as a string
:param replacement: character to use as a replacement of bad characters
:return: safe filename string
|
[
"Replace",
"unsafe",
"filename",
"characters",
"with",
"underscores",
".",
"Note",
"that",
"this",
"does",
"not",
"test",
"for",
"legal",
"names",
"accepted",
"but",
"a",
"more",
"restricted",
"set",
"of",
":",
"Letters",
"numbers",
"spaces",
"hyphens",
"underscores",
"and",
"periods",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L796-L814
|
cdgriffith/Reusables
|
reusables/file_operations.py
|
safe_path
|
def safe_path(path, replacement="_"):
"""
Replace unsafe path characters with underscores. Do NOT use this
with existing paths that cannot be modified, this to to help generate
new, clean paths.
Supports windows and *nix systems.
:param path: path as a string
:param replacement: character to use in place of bad characters
:return: a safer path
"""
if not isinstance(path, str):
raise TypeError("path must be a string")
if os.sep not in path:
return safe_filename(path, replacement=replacement)
filename = safe_filename(os.path.basename(path))
dirname = os.path.dirname(path)
safe_dirname = ""
regexp = regex.path.windows.safe if win_based else regex.path.linux.safe
if win_based and dirname.find(":\\") == 1 and dirname[0].isalpha():
safe_dirname = dirname[0:3]
dirname = dirname[3:]
if regexp.search(dirname) and check_filename(filename):
return path
else:
for char in dirname:
safe_dirname += char if regexp.search(char) else replacement
sanitized_path = os.path.normpath("{path}{sep}{filename}".format(
path=safe_dirname,
sep=os.sep if not safe_dirname.endswith(os.sep) else "",
filename=filename))
if (not filename and
path.endswith(os.sep) and
not sanitized_path.endswith(os.sep)):
sanitized_path += os.sep
return sanitized_path
|
python
|
def safe_path(path, replacement="_"):
"""
Replace unsafe path characters with underscores. Do NOT use this
with existing paths that cannot be modified, this to to help generate
new, clean paths.
Supports windows and *nix systems.
:param path: path as a string
:param replacement: character to use in place of bad characters
:return: a safer path
"""
if not isinstance(path, str):
raise TypeError("path must be a string")
if os.sep not in path:
return safe_filename(path, replacement=replacement)
filename = safe_filename(os.path.basename(path))
dirname = os.path.dirname(path)
safe_dirname = ""
regexp = regex.path.windows.safe if win_based else regex.path.linux.safe
if win_based and dirname.find(":\\") == 1 and dirname[0].isalpha():
safe_dirname = dirname[0:3]
dirname = dirname[3:]
if regexp.search(dirname) and check_filename(filename):
return path
else:
for char in dirname:
safe_dirname += char if regexp.search(char) else replacement
sanitized_path = os.path.normpath("{path}{sep}{filename}".format(
path=safe_dirname,
sep=os.sep if not safe_dirname.endswith(os.sep) else "",
filename=filename))
if (not filename and
path.endswith(os.sep) and
not sanitized_path.endswith(os.sep)):
sanitized_path += os.sep
return sanitized_path
|
[
"def",
"safe_path",
"(",
"path",
",",
"replacement",
"=",
"\"_\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"path must be a string\"",
")",
"if",
"os",
".",
"sep",
"not",
"in",
"path",
":",
"return",
"safe_filename",
"(",
"path",
",",
"replacement",
"=",
"replacement",
")",
"filename",
"=",
"safe_filename",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"safe_dirname",
"=",
"\"\"",
"regexp",
"=",
"regex",
".",
"path",
".",
"windows",
".",
"safe",
"if",
"win_based",
"else",
"regex",
".",
"path",
".",
"linux",
".",
"safe",
"if",
"win_based",
"and",
"dirname",
".",
"find",
"(",
"\":\\\\\"",
")",
"==",
"1",
"and",
"dirname",
"[",
"0",
"]",
".",
"isalpha",
"(",
")",
":",
"safe_dirname",
"=",
"dirname",
"[",
"0",
":",
"3",
"]",
"dirname",
"=",
"dirname",
"[",
"3",
":",
"]",
"if",
"regexp",
".",
"search",
"(",
"dirname",
")",
"and",
"check_filename",
"(",
"filename",
")",
":",
"return",
"path",
"else",
":",
"for",
"char",
"in",
"dirname",
":",
"safe_dirname",
"+=",
"char",
"if",
"regexp",
".",
"search",
"(",
"char",
")",
"else",
"replacement",
"sanitized_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"\"{path}{sep}{filename}\"",
".",
"format",
"(",
"path",
"=",
"safe_dirname",
",",
"sep",
"=",
"os",
".",
"sep",
"if",
"not",
"safe_dirname",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
"else",
"\"\"",
",",
"filename",
"=",
"filename",
")",
")",
"if",
"(",
"not",
"filename",
"and",
"path",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
"and",
"not",
"sanitized_path",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
")",
":",
"sanitized_path",
"+=",
"os",
".",
"sep",
"return",
"sanitized_path"
] |
Replace unsafe path characters with underscores. Do NOT use this
with existing paths that cannot be modified, this to to help generate
new, clean paths.
Supports windows and *nix systems.
:param path: path as a string
:param replacement: character to use in place of bad characters
:return: a safer path
|
[
"Replace",
"unsafe",
"path",
"characters",
"with",
"underscores",
".",
"Do",
"NOT",
"use",
"this",
"with",
"existing",
"paths",
"that",
"cannot",
"be",
"modified",
"this",
"to",
"to",
"help",
"generate",
"new",
"clean",
"paths",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L817-L853
|
cdgriffith/Reusables
|
reusables/tasker.py
|
Tasker.change_task_size
|
def change_task_size(self, size):
"""Blocking request to change number of running tasks"""
self._pause.value = True
self.log.debug("About to change task size to {0}".format(size))
try:
size = int(size)
except ValueError:
self.log.error("Cannot change task size, non integer size provided")
return False
if size < 0:
self.log.error("Cannot change task size, less than 0 size provided")
return False
self.max_tasks = size
if size < self.max_tasks:
diff = self.max_tasks - size
self.log.debug("Reducing size offset by {0}".format(diff))
while True:
self._update_tasks()
if len(self.free_tasks) >= diff:
for i in range(diff):
task_id = self.free_tasks.pop(0)
del self.current_tasks[task_id]
break
time.sleep(0.5)
if not size:
self._reset_and_pause()
return True
elif size > self.max_tasks:
diff = size - self.max_tasks
for i in range(diff):
task_id = str(uuid.uuid4())
self.current_tasks[task_id] = {}
self.free_tasks.append(task_id)
self._pause.value = False
self.log.debug("Task size changed to {0}".format(size))
return True
|
python
|
def change_task_size(self, size):
"""Blocking request to change number of running tasks"""
self._pause.value = True
self.log.debug("About to change task size to {0}".format(size))
try:
size = int(size)
except ValueError:
self.log.error("Cannot change task size, non integer size provided")
return False
if size < 0:
self.log.error("Cannot change task size, less than 0 size provided")
return False
self.max_tasks = size
if size < self.max_tasks:
diff = self.max_tasks - size
self.log.debug("Reducing size offset by {0}".format(diff))
while True:
self._update_tasks()
if len(self.free_tasks) >= diff:
for i in range(diff):
task_id = self.free_tasks.pop(0)
del self.current_tasks[task_id]
break
time.sleep(0.5)
if not size:
self._reset_and_pause()
return True
elif size > self.max_tasks:
diff = size - self.max_tasks
for i in range(diff):
task_id = str(uuid.uuid4())
self.current_tasks[task_id] = {}
self.free_tasks.append(task_id)
self._pause.value = False
self.log.debug("Task size changed to {0}".format(size))
return True
|
[
"def",
"change_task_size",
"(",
"self",
",",
"size",
")",
":",
"self",
".",
"_pause",
".",
"value",
"=",
"True",
"self",
".",
"log",
".",
"debug",
"(",
"\"About to change task size to {0}\"",
".",
"format",
"(",
"size",
")",
")",
"try",
":",
"size",
"=",
"int",
"(",
"size",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Cannot change task size, non integer size provided\"",
")",
"return",
"False",
"if",
"size",
"<",
"0",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Cannot change task size, less than 0 size provided\"",
")",
"return",
"False",
"self",
".",
"max_tasks",
"=",
"size",
"if",
"size",
"<",
"self",
".",
"max_tasks",
":",
"diff",
"=",
"self",
".",
"max_tasks",
"-",
"size",
"self",
".",
"log",
".",
"debug",
"(",
"\"Reducing size offset by {0}\"",
".",
"format",
"(",
"diff",
")",
")",
"while",
"True",
":",
"self",
".",
"_update_tasks",
"(",
")",
"if",
"len",
"(",
"self",
".",
"free_tasks",
")",
">=",
"diff",
":",
"for",
"i",
"in",
"range",
"(",
"diff",
")",
":",
"task_id",
"=",
"self",
".",
"free_tasks",
".",
"pop",
"(",
"0",
")",
"del",
"self",
".",
"current_tasks",
"[",
"task_id",
"]",
"break",
"time",
".",
"sleep",
"(",
"0.5",
")",
"if",
"not",
"size",
":",
"self",
".",
"_reset_and_pause",
"(",
")",
"return",
"True",
"elif",
"size",
">",
"self",
".",
"max_tasks",
":",
"diff",
"=",
"size",
"-",
"self",
".",
"max_tasks",
"for",
"i",
"in",
"range",
"(",
"diff",
")",
":",
"task_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"self",
".",
"current_tasks",
"[",
"task_id",
"]",
"=",
"{",
"}",
"self",
".",
"free_tasks",
".",
"append",
"(",
"task_id",
")",
"self",
".",
"_pause",
".",
"value",
"=",
"False",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task size changed to {0}\"",
".",
"format",
"(",
"size",
")",
")",
"return",
"True"
] |
Blocking request to change number of running tasks
|
[
"Blocking",
"request",
"to",
"change",
"number",
"of",
"running",
"tasks"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/tasker.py#L128-L163
|
cdgriffith/Reusables
|
reusables/tasker.py
|
Tasker.stop
|
def stop(self):
"""Hard stop the server and sub process"""
self._end.value = True
if self.background_process:
try:
self.background_process.terminate()
except Exception:
pass
for task_id, values in self.current_tasks.items():
try:
values['proc'].terminate()
except Exception:
pass
|
python
|
def stop(self):
"""Hard stop the server and sub process"""
self._end.value = True
if self.background_process:
try:
self.background_process.terminate()
except Exception:
pass
for task_id, values in self.current_tasks.items():
try:
values['proc'].terminate()
except Exception:
pass
|
[
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_end",
".",
"value",
"=",
"True",
"if",
"self",
".",
"background_process",
":",
"try",
":",
"self",
".",
"background_process",
".",
"terminate",
"(",
")",
"except",
"Exception",
":",
"pass",
"for",
"task_id",
",",
"values",
"in",
"self",
".",
"current_tasks",
".",
"items",
"(",
")",
":",
"try",
":",
"values",
"[",
"'proc'",
"]",
".",
"terminate",
"(",
")",
"except",
"Exception",
":",
"pass"
] |
Hard stop the server and sub process
|
[
"Hard",
"stop",
"the",
"server",
"and",
"sub",
"process"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/tasker.py#L165-L177
|
cdgriffith/Reusables
|
reusables/tasker.py
|
Tasker.get_state
|
def get_state(self):
"""Get general information about the state of the class"""
return {"started": (True if self.background_process and
self.background_process.is_alive() else False),
"paused": self._pause.value,
"stopped": self._end.value,
"tasks": len(self.current_tasks),
"busy_tasks": len(self.busy_tasks),
"free_tasks": len(self.free_tasks)}
|
python
|
def get_state(self):
"""Get general information about the state of the class"""
return {"started": (True if self.background_process and
self.background_process.is_alive() else False),
"paused": self._pause.value,
"stopped": self._end.value,
"tasks": len(self.current_tasks),
"busy_tasks": len(self.busy_tasks),
"free_tasks": len(self.free_tasks)}
|
[
"def",
"get_state",
"(",
"self",
")",
":",
"return",
"{",
"\"started\"",
":",
"(",
"True",
"if",
"self",
".",
"background_process",
"and",
"self",
".",
"background_process",
".",
"is_alive",
"(",
")",
"else",
"False",
")",
",",
"\"paused\"",
":",
"self",
".",
"_pause",
".",
"value",
",",
"\"stopped\"",
":",
"self",
".",
"_end",
".",
"value",
",",
"\"tasks\"",
":",
"len",
"(",
"self",
".",
"current_tasks",
")",
",",
"\"busy_tasks\"",
":",
"len",
"(",
"self",
".",
"busy_tasks",
")",
",",
"\"free_tasks\"",
":",
"len",
"(",
"self",
".",
"free_tasks",
")",
"}"
] |
Get general information about the state of the class
|
[
"Get",
"general",
"information",
"about",
"the",
"state",
"of",
"the",
"class"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/tasker.py#L187-L195
|
cdgriffith/Reusables
|
reusables/tasker.py
|
Tasker.main_loop
|
def main_loop(self, stop_at_empty=False):
"""Blocking function that can be run directly, if so would probably
want to specify 'stop_at_empty' to true, or have a separate process
adding items to the queue. """
try:
while True:
self.hook_pre_command()
self._check_command_queue()
if self.run_until and self.run_until < datetime.datetime.now():
self.log.info("Time limit reached")
break
if self._end.value:
break
if self._pause.value:
time.sleep(.5)
continue
self.hook_post_command()
self._update_tasks()
task_id = self._free_task()
if task_id:
try:
task = self.task_queue.get(timeout=.1)
except queue.Empty:
if stop_at_empty:
break
self._return_task(task_id)
else:
self.hook_pre_task()
self.log.debug("Starting task on {0}".format(task_id))
try:
self._start_task(task_id, task)
except Exception as err:
self.log.exception("Could not start task {0} -"
" {1}".format(task_id, err))
else:
self.hook_post_task()
finally:
self.log.info("Ending main loop")
|
python
|
def main_loop(self, stop_at_empty=False):
"""Blocking function that can be run directly, if so would probably
want to specify 'stop_at_empty' to true, or have a separate process
adding items to the queue. """
try:
while True:
self.hook_pre_command()
self._check_command_queue()
if self.run_until and self.run_until < datetime.datetime.now():
self.log.info("Time limit reached")
break
if self._end.value:
break
if self._pause.value:
time.sleep(.5)
continue
self.hook_post_command()
self._update_tasks()
task_id = self._free_task()
if task_id:
try:
task = self.task_queue.get(timeout=.1)
except queue.Empty:
if stop_at_empty:
break
self._return_task(task_id)
else:
self.hook_pre_task()
self.log.debug("Starting task on {0}".format(task_id))
try:
self._start_task(task_id, task)
except Exception as err:
self.log.exception("Could not start task {0} -"
" {1}".format(task_id, err))
else:
self.hook_post_task()
finally:
self.log.info("Ending main loop")
|
[
"def",
"main_loop",
"(",
"self",
",",
"stop_at_empty",
"=",
"False",
")",
":",
"try",
":",
"while",
"True",
":",
"self",
".",
"hook_pre_command",
"(",
")",
"self",
".",
"_check_command_queue",
"(",
")",
"if",
"self",
".",
"run_until",
"and",
"self",
".",
"run_until",
"<",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Time limit reached\"",
")",
"break",
"if",
"self",
".",
"_end",
".",
"value",
":",
"break",
"if",
"self",
".",
"_pause",
".",
"value",
":",
"time",
".",
"sleep",
"(",
".5",
")",
"continue",
"self",
".",
"hook_post_command",
"(",
")",
"self",
".",
"_update_tasks",
"(",
")",
"task_id",
"=",
"self",
".",
"_free_task",
"(",
")",
"if",
"task_id",
":",
"try",
":",
"task",
"=",
"self",
".",
"task_queue",
".",
"get",
"(",
"timeout",
"=",
".1",
")",
"except",
"queue",
".",
"Empty",
":",
"if",
"stop_at_empty",
":",
"break",
"self",
".",
"_return_task",
"(",
"task_id",
")",
"else",
":",
"self",
".",
"hook_pre_task",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Starting task on {0}\"",
".",
"format",
"(",
"task_id",
")",
")",
"try",
":",
"self",
".",
"_start_task",
"(",
"task_id",
",",
"task",
")",
"except",
"Exception",
"as",
"err",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Could not start task {0} -\"",
"\" {1}\"",
".",
"format",
"(",
"task_id",
",",
"err",
")",
")",
"else",
":",
"self",
".",
"hook_post_task",
"(",
")",
"finally",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Ending main loop\"",
")"
] |
Blocking function that can be run directly, if so would probably
want to specify 'stop_at_empty' to true, or have a separate process
adding items to the queue.
|
[
"Blocking",
"function",
"that",
"can",
"be",
"run",
"directly",
"if",
"so",
"would",
"probably",
"want",
"to",
"specify",
"stop_at_empty",
"to",
"true",
"or",
"have",
"a",
"separate",
"process",
"adding",
"items",
"to",
"the",
"queue",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/tasker.py#L232-L269
|
cdgriffith/Reusables
|
reusables/tasker.py
|
Tasker.run
|
def run(self):
"""Start the main loop as a background process. *nix only"""
if win_based:
raise NotImplementedError("Please run main_loop, "
"backgrounding not supported on Windows")
self.background_process = mp.Process(target=self.main_loop)
self.background_process.start()
|
python
|
def run(self):
"""Start the main loop as a background process. *nix only"""
if win_based:
raise NotImplementedError("Please run main_loop, "
"backgrounding not supported on Windows")
self.background_process = mp.Process(target=self.main_loop)
self.background_process.start()
|
[
"def",
"run",
"(",
"self",
")",
":",
"if",
"win_based",
":",
"raise",
"NotImplementedError",
"(",
"\"Please run main_loop, \"",
"\"backgrounding not supported on Windows\"",
")",
"self",
".",
"background_process",
"=",
"mp",
".",
"Process",
"(",
"target",
"=",
"self",
".",
"main_loop",
")",
"self",
".",
"background_process",
".",
"start",
"(",
")"
] |
Start the main loop as a background process. *nix only
|
[
"Start",
"the",
"main",
"loop",
"as",
"a",
"background",
"process",
".",
"*",
"nix",
"only"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/tasker.py#L271-L277
|
cdgriffith/Reusables
|
reusables/cli.py
|
cmd
|
def cmd(command, ignore_stderr=False, raise_on_return=False, timeout=None,
encoding="utf-8"):
""" Run a shell command and have it automatically decoded and printed
:param command: Command to run as str
:param ignore_stderr: To not print stderr
:param raise_on_return: Run CompletedProcess.check_returncode()
:param timeout: timeout to pass to communicate if python 3
:param encoding: How the output should be decoded
"""
result = run(command, timeout=timeout, shell=True)
if raise_on_return:
result.check_returncode()
print(result.stdout.decode(encoding))
if not ignore_stderr and result.stderr:
print(result.stderr.decode(encoding))
|
python
|
def cmd(command, ignore_stderr=False, raise_on_return=False, timeout=None,
encoding="utf-8"):
""" Run a shell command and have it automatically decoded and printed
:param command: Command to run as str
:param ignore_stderr: To not print stderr
:param raise_on_return: Run CompletedProcess.check_returncode()
:param timeout: timeout to pass to communicate if python 3
:param encoding: How the output should be decoded
"""
result = run(command, timeout=timeout, shell=True)
if raise_on_return:
result.check_returncode()
print(result.stdout.decode(encoding))
if not ignore_stderr and result.stderr:
print(result.stderr.decode(encoding))
|
[
"def",
"cmd",
"(",
"command",
",",
"ignore_stderr",
"=",
"False",
",",
"raise_on_return",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"result",
"=",
"run",
"(",
"command",
",",
"timeout",
"=",
"timeout",
",",
"shell",
"=",
"True",
")",
"if",
"raise_on_return",
":",
"result",
".",
"check_returncode",
"(",
")",
"print",
"(",
"result",
".",
"stdout",
".",
"decode",
"(",
"encoding",
")",
")",
"if",
"not",
"ignore_stderr",
"and",
"result",
".",
"stderr",
":",
"print",
"(",
"result",
".",
"stderr",
".",
"decode",
"(",
"encoding",
")",
")"
] |
Run a shell command and have it automatically decoded and printed
:param command: Command to run as str
:param ignore_stderr: To not print stderr
:param raise_on_return: Run CompletedProcess.check_returncode()
:param timeout: timeout to pass to communicate if python 3
:param encoding: How the output should be decoded
|
[
"Run",
"a",
"shell",
"command",
"and",
"have",
"it",
"automatically",
"decoded",
"and",
"printed"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L29-L44
|
cdgriffith/Reusables
|
reusables/cli.py
|
pushd
|
def pushd(directory):
"""Change working directories in style and stay organized!
:param directory: Where do you want to go and remember?
:return: saved directory stack
"""
directory = os.path.expanduser(directory)
_saved_paths.insert(0, os.path.abspath(os.getcwd()))
os.chdir(directory)
return [directory] + _saved_paths
|
python
|
def pushd(directory):
"""Change working directories in style and stay organized!
:param directory: Where do you want to go and remember?
:return: saved directory stack
"""
directory = os.path.expanduser(directory)
_saved_paths.insert(0, os.path.abspath(os.getcwd()))
os.chdir(directory)
return [directory] + _saved_paths
|
[
"def",
"pushd",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"directory",
")",
"_saved_paths",
".",
"insert",
"(",
"0",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
")",
"os",
".",
"chdir",
"(",
"directory",
")",
"return",
"[",
"directory",
"]",
"+",
"_saved_paths"
] |
Change working directories in style and stay organized!
:param directory: Where do you want to go and remember?
:return: saved directory stack
|
[
"Change",
"working",
"directories",
"in",
"style",
"and",
"stay",
"organized!"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L47-L56
|
cdgriffith/Reusables
|
reusables/cli.py
|
popd
|
def popd():
"""Go back to where you once were.
:return: saved directory stack
"""
try:
directory = _saved_paths.pop(0)
except IndexError:
return [os.getcwd()]
os.chdir(directory)
return [directory] + _saved_paths
|
python
|
def popd():
"""Go back to where you once were.
:return: saved directory stack
"""
try:
directory = _saved_paths.pop(0)
except IndexError:
return [os.getcwd()]
os.chdir(directory)
return [directory] + _saved_paths
|
[
"def",
"popd",
"(",
")",
":",
"try",
":",
"directory",
"=",
"_saved_paths",
".",
"pop",
"(",
"0",
")",
"except",
"IndexError",
":",
"return",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
"os",
".",
"chdir",
"(",
"directory",
")",
"return",
"[",
"directory",
"]",
"+",
"_saved_paths"
] |
Go back to where you once were.
:return: saved directory stack
|
[
"Go",
"back",
"to",
"where",
"you",
"once",
"were",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L59-L69
|
cdgriffith/Reusables
|
reusables/cli.py
|
ls
|
def ls(params="", directory=".", printed=True):
"""Know the best python implantation of ls? It's just to subprocess ls...
(uses dir on windows).
:param params: options to pass to ls or dir
:param directory: if not this directory
:param printed: If you're using this, you probably wanted it just printed
:return: if not printed, you can parse it yourself
"""
command = "{0} {1} {2}".format("ls" if not win_based else "dir",
params, directory)
response = run(command, shell=True) # Shell required for windows
response.check_returncode()
if printed:
print(response.stdout.decode("utf-8"))
else:
return response.stdout
|
python
|
def ls(params="", directory=".", printed=True):
"""Know the best python implantation of ls? It's just to subprocess ls...
(uses dir on windows).
:param params: options to pass to ls or dir
:param directory: if not this directory
:param printed: If you're using this, you probably wanted it just printed
:return: if not printed, you can parse it yourself
"""
command = "{0} {1} {2}".format("ls" if not win_based else "dir",
params, directory)
response = run(command, shell=True) # Shell required for windows
response.check_returncode()
if printed:
print(response.stdout.decode("utf-8"))
else:
return response.stdout
|
[
"def",
"ls",
"(",
"params",
"=",
"\"\"",
",",
"directory",
"=",
"\".\"",
",",
"printed",
"=",
"True",
")",
":",
"command",
"=",
"\"{0} {1} {2}\"",
".",
"format",
"(",
"\"ls\"",
"if",
"not",
"win_based",
"else",
"\"dir\"",
",",
"params",
",",
"directory",
")",
"response",
"=",
"run",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"# Shell required for windows",
"response",
".",
"check_returncode",
"(",
")",
"if",
"printed",
":",
"print",
"(",
"response",
".",
"stdout",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"else",
":",
"return",
"response",
".",
"stdout"
] |
Know the best python implantation of ls? It's just to subprocess ls...
(uses dir on windows).
:param params: options to pass to ls or dir
:param directory: if not this directory
:param printed: If you're using this, you probably wanted it just printed
:return: if not printed, you can parse it yourself
|
[
"Know",
"the",
"best",
"python",
"implantation",
"of",
"ls?",
"It",
"s",
"just",
"to",
"subprocess",
"ls",
"...",
"(",
"uses",
"dir",
"on",
"windows",
")",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L85-L101
|
cdgriffith/Reusables
|
reusables/cli.py
|
find
|
def find(name=None, ext=None, directory=".", match_case=False,
disable_glob=False, depth=None):
""" Designed for the interactive interpreter by making default order
of find_files faster.
:param name: Part of the file name
:param ext: Extensions of the file you are looking for
:param directory: Top location to recursively search for matching files
:param match_case: If name has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:return: list of all files in the specified directory
"""
return find_files_list(directory=directory, ext=ext, name=name,
match_case=match_case, disable_glob=disable_glob,
depth=depth)
|
python
|
def find(name=None, ext=None, directory=".", match_case=False,
disable_glob=False, depth=None):
""" Designed for the interactive interpreter by making default order
of find_files faster.
:param name: Part of the file name
:param ext: Extensions of the file you are looking for
:param directory: Top location to recursively search for matching files
:param match_case: If name has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:return: list of all files in the specified directory
"""
return find_files_list(directory=directory, ext=ext, name=name,
match_case=match_case, disable_glob=disable_glob,
depth=depth)
|
[
"def",
"find",
"(",
"name",
"=",
"None",
",",
"ext",
"=",
"None",
",",
"directory",
"=",
"\".\"",
",",
"match_case",
"=",
"False",
",",
"disable_glob",
"=",
"False",
",",
"depth",
"=",
"None",
")",
":",
"return",
"find_files_list",
"(",
"directory",
"=",
"directory",
",",
"ext",
"=",
"ext",
",",
"name",
"=",
"name",
",",
"match_case",
"=",
"match_case",
",",
"disable_glob",
"=",
"disable_glob",
",",
"depth",
"=",
"depth",
")"
] |
Designed for the interactive interpreter by making default order
of find_files faster.
:param name: Part of the file name
:param ext: Extensions of the file you are looking for
:param directory: Top location to recursively search for matching files
:param match_case: If name has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:return: list of all files in the specified directory
|
[
"Designed",
"for",
"the",
"interactive",
"interpreter",
"by",
"making",
"default",
"order",
"of",
"find_files",
"faster",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L104-L119
|
cdgriffith/Reusables
|
reusables/cli.py
|
head
|
def head(file_path, lines=10, encoding="utf-8", printed=True,
errors='strict'):
"""
Read the first N lines of a file, defaults to 10
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = []
with open(file_path, "rb") as f:
for _ in range(lines):
try:
if python_version >= (2, 7):
data.append(next(f).decode(encoding, errors=errors))
else:
data.append(next(f).decode(encoding))
except StopIteration:
break
if printed:
print("".join(data))
else:
return data
|
python
|
def head(file_path, lines=10, encoding="utf-8", printed=True,
errors='strict'):
"""
Read the first N lines of a file, defaults to 10
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = []
with open(file_path, "rb") as f:
for _ in range(lines):
try:
if python_version >= (2, 7):
data.append(next(f).decode(encoding, errors=errors))
else:
data.append(next(f).decode(encoding))
except StopIteration:
break
if printed:
print("".join(data))
else:
return data
|
[
"def",
"head",
"(",
"file_path",
",",
"lines",
"=",
"10",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"printed",
"=",
"True",
",",
"errors",
"=",
"'strict'",
")",
":",
"data",
"=",
"[",
"]",
"with",
"open",
"(",
"file_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"for",
"_",
"in",
"range",
"(",
"lines",
")",
":",
"try",
":",
"if",
"python_version",
">=",
"(",
"2",
",",
"7",
")",
":",
"data",
".",
"append",
"(",
"next",
"(",
"f",
")",
".",
"decode",
"(",
"encoding",
",",
"errors",
"=",
"errors",
")",
")",
"else",
":",
"data",
".",
"append",
"(",
"next",
"(",
"f",
")",
".",
"decode",
"(",
"encoding",
")",
")",
"except",
"StopIteration",
":",
"break",
"if",
"printed",
":",
"print",
"(",
"\"\"",
".",
"join",
"(",
"data",
")",
")",
"else",
":",
"return",
"data"
] |
Read the first N lines of a file, defaults to 10
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
|
[
"Read",
"the",
"first",
"N",
"lines",
"of",
"a",
"file",
"defaults",
"to",
"10"
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L122-L147
|
cdgriffith/Reusables
|
reusables/cli.py
|
tail
|
def tail(file_path, lines=10, encoding="utf-8",
printed=True, errors='strict'):
"""
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = deque()
with open(file_path, "rb") as f:
for line in f:
if python_version >= (2, 7):
data.append(line.decode(encoding, errors=errors))
else:
data.append(line.decode(encoding))
if len(data) > lines:
data.popleft()
if printed:
print("".join(data))
else:
return data
|
python
|
def tail(file_path, lines=10, encoding="utf-8",
printed=True, errors='strict'):
"""
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = deque()
with open(file_path, "rb") as f:
for line in f:
if python_version >= (2, 7):
data.append(line.decode(encoding, errors=errors))
else:
data.append(line.decode(encoding))
if len(data) > lines:
data.popleft()
if printed:
print("".join(data))
else:
return data
|
[
"def",
"tail",
"(",
"file_path",
",",
"lines",
"=",
"10",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"printed",
"=",
"True",
",",
"errors",
"=",
"'strict'",
")",
":",
"data",
"=",
"deque",
"(",
")",
"with",
"open",
"(",
"file_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"python_version",
">=",
"(",
"2",
",",
"7",
")",
":",
"data",
".",
"append",
"(",
"line",
".",
"decode",
"(",
"encoding",
",",
"errors",
"=",
"errors",
")",
")",
"else",
":",
"data",
".",
"append",
"(",
"line",
".",
"decode",
"(",
"encoding",
")",
")",
"if",
"len",
"(",
"data",
")",
">",
"lines",
":",
"data",
".",
"popleft",
"(",
")",
"if",
"printed",
":",
"print",
"(",
"\"\"",
".",
"join",
"(",
"data",
")",
")",
"else",
":",
"return",
"data"
] |
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
|
[
"A",
"really",
"silly",
"way",
"to",
"get",
"the",
"last",
"N",
"lines",
"defaults",
"to",
"10",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L175-L201
|
cdgriffith/Reusables
|
reusables/cli.py
|
cp
|
def cp(src, dst, overwrite=False):
"""
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
"""
if not isinstance(src, list):
src = [src]
dst = os.path.expanduser(dst)
dst_folder = os.path.isdir(dst)
if len(src) > 1 and not dst_folder:
raise OSError("Cannot copy multiple item to same file")
for item in src:
source = os.path.expanduser(item)
destination = (dst if not dst_folder else
os.path.join(dst, os.path.basename(source)))
if not overwrite and os.path.exists(destination):
_logger.warning("Not replacing {0} with {1}, overwrite not enabled"
"".format(destination, source))
continue
shutil.copy(source, destination)
|
python
|
def cp(src, dst, overwrite=False):
"""
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
"""
if not isinstance(src, list):
src = [src]
dst = os.path.expanduser(dst)
dst_folder = os.path.isdir(dst)
if len(src) > 1 and not dst_folder:
raise OSError("Cannot copy multiple item to same file")
for item in src:
source = os.path.expanduser(item)
destination = (dst if not dst_folder else
os.path.join(dst, os.path.basename(source)))
if not overwrite and os.path.exists(destination):
_logger.warning("Not replacing {0} with {1}, overwrite not enabled"
"".format(destination, source))
continue
shutil.copy(source, destination)
|
[
"def",
"cp",
"(",
"src",
",",
"dst",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"src",
",",
"list",
")",
":",
"src",
"=",
"[",
"src",
"]",
"dst",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"dst",
")",
"dst_folder",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
"if",
"len",
"(",
"src",
")",
">",
"1",
"and",
"not",
"dst_folder",
":",
"raise",
"OSError",
"(",
"\"Cannot copy multiple item to same file\"",
")",
"for",
"item",
"in",
"src",
":",
"source",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"item",
")",
"destination",
"=",
"(",
"dst",
"if",
"not",
"dst_folder",
"else",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
")",
")",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
":",
"_logger",
".",
"warning",
"(",
"\"Not replacing {0} with {1}, overwrite not enabled\"",
"\"\"",
".",
"format",
"(",
"destination",
",",
"source",
")",
")",
"continue",
"shutil",
".",
"copy",
"(",
"source",
",",
"destination",
")"
] |
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
|
[
"Copy",
"files",
"to",
"a",
"new",
"location",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L204-L231
|
cdgriffith/Reusables
|
reusables/string_manipulation.py
|
cut
|
def cut(string, characters=2, trailing="normal"):
"""
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
"""
split_str = [string[i:i + characters] for
i in range(0, len(string), characters)]
if trailing != "normal" and len(split_str[-1]) != characters:
if trailing.lower() == "remove":
return split_str[:-1]
if trailing.lower() == "combine" and len(split_str) >= 2:
return split_str[:-2] + [split_str[-2] + split_str[-1]]
if trailing.lower() == "error":
raise IndexError("String of length {0} not divisible by {1} to"
" cut".format(len(string), characters))
return split_str
|
python
|
def cut(string, characters=2, trailing="normal"):
"""
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
"""
split_str = [string[i:i + characters] for
i in range(0, len(string), characters)]
if trailing != "normal" and len(split_str[-1]) != characters:
if trailing.lower() == "remove":
return split_str[:-1]
if trailing.lower() == "combine" and len(split_str) >= 2:
return split_str[:-2] + [split_str[-2] + split_str[-1]]
if trailing.lower() == "error":
raise IndexError("String of length {0} not divisible by {1} to"
" cut".format(len(string), characters))
return split_str
|
[
"def",
"cut",
"(",
"string",
",",
"characters",
"=",
"2",
",",
"trailing",
"=",
"\"normal\"",
")",
":",
"split_str",
"=",
"[",
"string",
"[",
"i",
":",
"i",
"+",
"characters",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"string",
")",
",",
"characters",
")",
"]",
"if",
"trailing",
"!=",
"\"normal\"",
"and",
"len",
"(",
"split_str",
"[",
"-",
"1",
"]",
")",
"!=",
"characters",
":",
"if",
"trailing",
".",
"lower",
"(",
")",
"==",
"\"remove\"",
":",
"return",
"split_str",
"[",
":",
"-",
"1",
"]",
"if",
"trailing",
".",
"lower",
"(",
")",
"==",
"\"combine\"",
"and",
"len",
"(",
"split_str",
")",
">=",
"2",
":",
"return",
"split_str",
"[",
":",
"-",
"2",
"]",
"+",
"[",
"split_str",
"[",
"-",
"2",
"]",
"+",
"split_str",
"[",
"-",
"1",
"]",
"]",
"if",
"trailing",
".",
"lower",
"(",
")",
"==",
"\"error\"",
":",
"raise",
"IndexError",
"(",
"\"String of length {0} not divisible by {1} to\"",
"\" cut\"",
".",
"format",
"(",
"len",
"(",
"string",
")",
",",
"characters",
")",
")",
"return",
"split_str"
] |
Split a string into a list of N characters each.
.. code:: python
reusables.cut("abcdefghi")
# ['ab', 'cd', 'ef', 'gh', 'i']
trailing gives you the following options:
* normal: leaves remaining characters in their own last position
* remove: return the list without the remainder characters
* combine: add the remainder characters to the previous set
* error: raise an IndexError if there are remaining characters
.. code:: python
reusables.cut("abcdefghi", 2, "error")
# Traceback (most recent call last):
# ...
# IndexError: String of length 9 not divisible by 2 to splice
reusables.cut("abcdefghi", 2, "remove")
# ['ab', 'cd', 'ef', 'gh']
reusables.cut("abcdefghi", 2, "combine")
# ['ab', 'cd', 'ef', 'ghi']
:param string: string to modify
:param characters: how many characters to split it into
:param trailing: "normal", "remove", "combine", or "error"
:return: list of the cut string
|
[
"Split",
"a",
"string",
"into",
"a",
"list",
"of",
"N",
"characters",
"each",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/string_manipulation.py#L24-L69
|
cdgriffith/Reusables
|
reusables/string_manipulation.py
|
int_to_roman
|
def int_to_roman(integer):
"""
Convert an integer into a string of roman numbers.
.. code: python
reusables.int_to_roman(445)
# 'CDXLV'
:param integer:
:return: roman string
"""
if not isinstance(integer, int):
raise ValueError("Input integer must be of type int")
output = []
while integer > 0:
for r, i in sorted(_roman_dict.items(),
key=lambda x: x[1], reverse=True):
while integer >= i:
output.append(r)
integer -= i
return "".join(output)
|
python
|
def int_to_roman(integer):
"""
Convert an integer into a string of roman numbers.
.. code: python
reusables.int_to_roman(445)
# 'CDXLV'
:param integer:
:return: roman string
"""
if not isinstance(integer, int):
raise ValueError("Input integer must be of type int")
output = []
while integer > 0:
for r, i in sorted(_roman_dict.items(),
key=lambda x: x[1], reverse=True):
while integer >= i:
output.append(r)
integer -= i
return "".join(output)
|
[
"def",
"int_to_roman",
"(",
"integer",
")",
":",
"if",
"not",
"isinstance",
"(",
"integer",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input integer must be of type int\"",
")",
"output",
"=",
"[",
"]",
"while",
"integer",
">",
"0",
":",
"for",
"r",
",",
"i",
"in",
"sorted",
"(",
"_roman_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
":",
"while",
"integer",
">=",
"i",
":",
"output",
".",
"append",
"(",
"r",
")",
"integer",
"-=",
"i",
"return",
"\"\"",
".",
"join",
"(",
"output",
")"
] |
Convert an integer into a string of roman numbers.
.. code: python
reusables.int_to_roman(445)
# 'CDXLV'
:param integer:
:return: roman string
|
[
"Convert",
"an",
"integer",
"into",
"a",
"string",
"of",
"roman",
"numbers",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/string_manipulation.py#L72-L94
|
cdgriffith/Reusables
|
reusables/string_manipulation.py
|
roman_to_int
|
def roman_to_int(roman_string):
"""
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
"""
roman_string = roman_string.upper().strip()
if "IIII" in roman_string:
raise ValueError("Malformed roman string")
value = 0
skip_one = False
last_number = None
for i, letter in enumerate(roman_string):
if letter not in _roman_dict:
raise ValueError("Malformed roman string")
if skip_one:
skip_one = False
continue
if i < (len(roman_string) - 1):
double_check = letter + roman_string[i + 1]
if double_check in _roman_dict:
if last_number and _roman_dict[double_check] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[double_check]
value += _roman_dict[double_check]
skip_one = True
continue
if last_number and _roman_dict[letter] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[letter]
value += _roman_dict[letter]
return value
|
python
|
def roman_to_int(roman_string):
"""
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
"""
roman_string = roman_string.upper().strip()
if "IIII" in roman_string:
raise ValueError("Malformed roman string")
value = 0
skip_one = False
last_number = None
for i, letter in enumerate(roman_string):
if letter not in _roman_dict:
raise ValueError("Malformed roman string")
if skip_one:
skip_one = False
continue
if i < (len(roman_string) - 1):
double_check = letter + roman_string[i + 1]
if double_check in _roman_dict:
if last_number and _roman_dict[double_check] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[double_check]
value += _roman_dict[double_check]
skip_one = True
continue
if last_number and _roman_dict[letter] > last_number:
raise ValueError("Malformed roman string")
last_number = _roman_dict[letter]
value += _roman_dict[letter]
return value
|
[
"def",
"roman_to_int",
"(",
"roman_string",
")",
":",
"roman_string",
"=",
"roman_string",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"if",
"\"IIII\"",
"in",
"roman_string",
":",
"raise",
"ValueError",
"(",
"\"Malformed roman string\"",
")",
"value",
"=",
"0",
"skip_one",
"=",
"False",
"last_number",
"=",
"None",
"for",
"i",
",",
"letter",
"in",
"enumerate",
"(",
"roman_string",
")",
":",
"if",
"letter",
"not",
"in",
"_roman_dict",
":",
"raise",
"ValueError",
"(",
"\"Malformed roman string\"",
")",
"if",
"skip_one",
":",
"skip_one",
"=",
"False",
"continue",
"if",
"i",
"<",
"(",
"len",
"(",
"roman_string",
")",
"-",
"1",
")",
":",
"double_check",
"=",
"letter",
"+",
"roman_string",
"[",
"i",
"+",
"1",
"]",
"if",
"double_check",
"in",
"_roman_dict",
":",
"if",
"last_number",
"and",
"_roman_dict",
"[",
"double_check",
"]",
">",
"last_number",
":",
"raise",
"ValueError",
"(",
"\"Malformed roman string\"",
")",
"last_number",
"=",
"_roman_dict",
"[",
"double_check",
"]",
"value",
"+=",
"_roman_dict",
"[",
"double_check",
"]",
"skip_one",
"=",
"True",
"continue",
"if",
"last_number",
"and",
"_roman_dict",
"[",
"letter",
"]",
">",
"last_number",
":",
"raise",
"ValueError",
"(",
"\"Malformed roman string\"",
")",
"last_number",
"=",
"_roman_dict",
"[",
"letter",
"]",
"value",
"+=",
"_roman_dict",
"[",
"letter",
"]",
"return",
"value"
] |
Converts a string of roman numbers into an integer.
.. code: python
reusables.roman_to_int("XXXVI")
# 36
:param roman_string: XVI or similar
:return: parsed integer
|
[
"Converts",
"a",
"string",
"of",
"roman",
"numbers",
"into",
"an",
"integer",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/string_manipulation.py#L97-L135
|
cdgriffith/Reusables
|
reusables/string_manipulation.py
|
int_to_words
|
def int_to_words(number, european=False):
"""
Converts an integer or float to words.
.. code: python
reusables.int_to_number(445)
# 'four hundred forty-five'
reusables.int_to_number(1.45)
# 'one and forty-five hundredths'
:param number: String, integer, or float to convert to words. The decimal
can only be up to three places long, and max number allowed is 999
decillion.
:param european: If the string uses the european style formatting, i.e.
decimal points instead of commas and commas instead of decimal points,
set this parameter to True
:return: The translated string
"""
def ones(n):
return "" if n == 0 else _numbers[n]
def tens(n):
teen = int("{0}{1}".format(n[0], n[1]))
if n[0] == 0:
return ones(n[1])
if teen in _numbers:
return _numbers[teen]
else:
ten = _numbers[int("{0}0".format(n[0]))]
one = _numbers[n[1]]
return "{0}-{1}".format(ten, one)
def hundreds(n):
if n[0] == 0:
return tens(n[1:])
else:
t = tens(n[1:])
return "{0} hundred {1}".format(_numbers[n[0]], "" if not t else t)
def comma_separated(list_of_strings):
if len(list_of_strings) > 1:
return "{0} ".format("" if len(list_of_strings) == 2
else ",").join(list_of_strings)
else:
return list_of_strings[0]
def while_loop(list_of_numbers, final_list):
index = 0
group_set = int(len(list_of_numbers) / 3)
while group_set != 0:
value = hundreds(list_of_numbers[index:index + 3])
if value:
final_list.append("{0} {1}".format(value, _places[group_set])
if _places[group_set] else value)
group_set -= 1
index += 3
number_list = []
decimal_list = []
decimal = ''
number = str(number)
group_delimiter, point_delimiter = (",", ".") \
if not european else (".", ",")
if point_delimiter in number:
decimal = number.split(point_delimiter)[1]
number = number.split(point_delimiter)[0].replace(
group_delimiter, "")
elif group_delimiter in number:
number = number.replace(group_delimiter, "")
if not number.isdigit():
raise ValueError("Number is not numeric")
if decimal and not decimal.isdigit():
raise ValueError("Decimal is not numeric")
if int(number) == 0:
number_list.append("zero")
r = len(number) % 3
d_r = len(decimal) % 3
number = number.zfill(len(number) + 3 - r if r else 0)
f_decimal = decimal.zfill(len(decimal) + 3 - d_r if d_r else 0)
d = [int(x) for x in f_decimal]
n = [int(x) for x in number]
while_loop(n, number_list)
if decimal and int(decimal) != 0:
while_loop(d, decimal_list)
if decimal_list:
name = ''
if len(decimal) % 3 == 1:
name = 'ten'
elif len(decimal) % 3 == 2:
name = 'hundred'
place = int((str(len(decimal) / 3).split(".")[0]))
number_list.append("and {0} {1}{2}{3}ths".format(
comma_separated(decimal_list), name,
"-" if name and _places[place+1] else "", _places[place+1]))
return comma_separated(number_list)
|
python
|
def int_to_words(number, european=False):
"""
Converts an integer or float to words.
.. code: python
reusables.int_to_number(445)
# 'four hundred forty-five'
reusables.int_to_number(1.45)
# 'one and forty-five hundredths'
:param number: String, integer, or float to convert to words. The decimal
can only be up to three places long, and max number allowed is 999
decillion.
:param european: If the string uses the european style formatting, i.e.
decimal points instead of commas and commas instead of decimal points,
set this parameter to True
:return: The translated string
"""
def ones(n):
return "" if n == 0 else _numbers[n]
def tens(n):
teen = int("{0}{1}".format(n[0], n[1]))
if n[0] == 0:
return ones(n[1])
if teen in _numbers:
return _numbers[teen]
else:
ten = _numbers[int("{0}0".format(n[0]))]
one = _numbers[n[1]]
return "{0}-{1}".format(ten, one)
def hundreds(n):
if n[0] == 0:
return tens(n[1:])
else:
t = tens(n[1:])
return "{0} hundred {1}".format(_numbers[n[0]], "" if not t else t)
def comma_separated(list_of_strings):
if len(list_of_strings) > 1:
return "{0} ".format("" if len(list_of_strings) == 2
else ",").join(list_of_strings)
else:
return list_of_strings[0]
def while_loop(list_of_numbers, final_list):
index = 0
group_set = int(len(list_of_numbers) / 3)
while group_set != 0:
value = hundreds(list_of_numbers[index:index + 3])
if value:
final_list.append("{0} {1}".format(value, _places[group_set])
if _places[group_set] else value)
group_set -= 1
index += 3
number_list = []
decimal_list = []
decimal = ''
number = str(number)
group_delimiter, point_delimiter = (",", ".") \
if not european else (".", ",")
if point_delimiter in number:
decimal = number.split(point_delimiter)[1]
number = number.split(point_delimiter)[0].replace(
group_delimiter, "")
elif group_delimiter in number:
number = number.replace(group_delimiter, "")
if not number.isdigit():
raise ValueError("Number is not numeric")
if decimal and not decimal.isdigit():
raise ValueError("Decimal is not numeric")
if int(number) == 0:
number_list.append("zero")
r = len(number) % 3
d_r = len(decimal) % 3
number = number.zfill(len(number) + 3 - r if r else 0)
f_decimal = decimal.zfill(len(decimal) + 3 - d_r if d_r else 0)
d = [int(x) for x in f_decimal]
n = [int(x) for x in number]
while_loop(n, number_list)
if decimal and int(decimal) != 0:
while_loop(d, decimal_list)
if decimal_list:
name = ''
if len(decimal) % 3 == 1:
name = 'ten'
elif len(decimal) % 3 == 2:
name = 'hundred'
place = int((str(len(decimal) / 3).split(".")[0]))
number_list.append("and {0} {1}{2}{3}ths".format(
comma_separated(decimal_list), name,
"-" if name and _places[place+1] else "", _places[place+1]))
return comma_separated(number_list)
|
[
"def",
"int_to_words",
"(",
"number",
",",
"european",
"=",
"False",
")",
":",
"def",
"ones",
"(",
"n",
")",
":",
"return",
"\"\"",
"if",
"n",
"==",
"0",
"else",
"_numbers",
"[",
"n",
"]",
"def",
"tens",
"(",
"n",
")",
":",
"teen",
"=",
"int",
"(",
"\"{0}{1}\"",
".",
"format",
"(",
"n",
"[",
"0",
"]",
",",
"n",
"[",
"1",
"]",
")",
")",
"if",
"n",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"ones",
"(",
"n",
"[",
"1",
"]",
")",
"if",
"teen",
"in",
"_numbers",
":",
"return",
"_numbers",
"[",
"teen",
"]",
"else",
":",
"ten",
"=",
"_numbers",
"[",
"int",
"(",
"\"{0}0\"",
".",
"format",
"(",
"n",
"[",
"0",
"]",
")",
")",
"]",
"one",
"=",
"_numbers",
"[",
"n",
"[",
"1",
"]",
"]",
"return",
"\"{0}-{1}\"",
".",
"format",
"(",
"ten",
",",
"one",
")",
"def",
"hundreds",
"(",
"n",
")",
":",
"if",
"n",
"[",
"0",
"]",
"==",
"0",
":",
"return",
"tens",
"(",
"n",
"[",
"1",
":",
"]",
")",
"else",
":",
"t",
"=",
"tens",
"(",
"n",
"[",
"1",
":",
"]",
")",
"return",
"\"{0} hundred {1}\"",
".",
"format",
"(",
"_numbers",
"[",
"n",
"[",
"0",
"]",
"]",
",",
"\"\"",
"if",
"not",
"t",
"else",
"t",
")",
"def",
"comma_separated",
"(",
"list_of_strings",
")",
":",
"if",
"len",
"(",
"list_of_strings",
")",
">",
"1",
":",
"return",
"\"{0} \"",
".",
"format",
"(",
"\"\"",
"if",
"len",
"(",
"list_of_strings",
")",
"==",
"2",
"else",
"\",\"",
")",
".",
"join",
"(",
"list_of_strings",
")",
"else",
":",
"return",
"list_of_strings",
"[",
"0",
"]",
"def",
"while_loop",
"(",
"list_of_numbers",
",",
"final_list",
")",
":",
"index",
"=",
"0",
"group_set",
"=",
"int",
"(",
"len",
"(",
"list_of_numbers",
")",
"/",
"3",
")",
"while",
"group_set",
"!=",
"0",
":",
"value",
"=",
"hundreds",
"(",
"list_of_numbers",
"[",
"index",
":",
"index",
"+",
"3",
"]",
")",
"if",
"value",
":",
"final_list",
".",
"append",
"(",
"\"{0} {1}\"",
".",
"format",
"(",
"value",
",",
"_places",
"[",
"group_set",
"]",
")",
"if",
"_places",
"[",
"group_set",
"]",
"else",
"value",
")",
"group_set",
"-=",
"1",
"index",
"+=",
"3",
"number_list",
"=",
"[",
"]",
"decimal_list",
"=",
"[",
"]",
"decimal",
"=",
"''",
"number",
"=",
"str",
"(",
"number",
")",
"group_delimiter",
",",
"point_delimiter",
"=",
"(",
"\",\"",
",",
"\".\"",
")",
"if",
"not",
"european",
"else",
"(",
"\".\"",
",",
"\",\"",
")",
"if",
"point_delimiter",
"in",
"number",
":",
"decimal",
"=",
"number",
".",
"split",
"(",
"point_delimiter",
")",
"[",
"1",
"]",
"number",
"=",
"number",
".",
"split",
"(",
"point_delimiter",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"group_delimiter",
",",
"\"\"",
")",
"elif",
"group_delimiter",
"in",
"number",
":",
"number",
"=",
"number",
".",
"replace",
"(",
"group_delimiter",
",",
"\"\"",
")",
"if",
"not",
"number",
".",
"isdigit",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Number is not numeric\"",
")",
"if",
"decimal",
"and",
"not",
"decimal",
".",
"isdigit",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Decimal is not numeric\"",
")",
"if",
"int",
"(",
"number",
")",
"==",
"0",
":",
"number_list",
".",
"append",
"(",
"\"zero\"",
")",
"r",
"=",
"len",
"(",
"number",
")",
"%",
"3",
"d_r",
"=",
"len",
"(",
"decimal",
")",
"%",
"3",
"number",
"=",
"number",
".",
"zfill",
"(",
"len",
"(",
"number",
")",
"+",
"3",
"-",
"r",
"if",
"r",
"else",
"0",
")",
"f_decimal",
"=",
"decimal",
".",
"zfill",
"(",
"len",
"(",
"decimal",
")",
"+",
"3",
"-",
"d_r",
"if",
"d_r",
"else",
"0",
")",
"d",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"f_decimal",
"]",
"n",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"number",
"]",
"while_loop",
"(",
"n",
",",
"number_list",
")",
"if",
"decimal",
"and",
"int",
"(",
"decimal",
")",
"!=",
"0",
":",
"while_loop",
"(",
"d",
",",
"decimal_list",
")",
"if",
"decimal_list",
":",
"name",
"=",
"''",
"if",
"len",
"(",
"decimal",
")",
"%",
"3",
"==",
"1",
":",
"name",
"=",
"'ten'",
"elif",
"len",
"(",
"decimal",
")",
"%",
"3",
"==",
"2",
":",
"name",
"=",
"'hundred'",
"place",
"=",
"int",
"(",
"(",
"str",
"(",
"len",
"(",
"decimal",
")",
"/",
"3",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
")",
"number_list",
".",
"append",
"(",
"\"and {0} {1}{2}{3}ths\"",
".",
"format",
"(",
"comma_separated",
"(",
"decimal_list",
")",
",",
"name",
",",
"\"-\"",
"if",
"name",
"and",
"_places",
"[",
"place",
"+",
"1",
"]",
"else",
"\"\"",
",",
"_places",
"[",
"place",
"+",
"1",
"]",
")",
")",
"return",
"comma_separated",
"(",
"number_list",
")"
] |
Converts an integer or float to words.
.. code: python
reusables.int_to_number(445)
# 'four hundred forty-five'
reusables.int_to_number(1.45)
# 'one and forty-five hundredths'
:param number: String, integer, or float to convert to words. The decimal
can only be up to three places long, and max number allowed is 999
decillion.
:param european: If the string uses the european style formatting, i.e.
decimal points instead of commas and commas instead of decimal points,
set this parameter to True
:return: The translated string
|
[
"Converts",
"an",
"integer",
"or",
"float",
"to",
"words",
"."
] |
train
|
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/string_manipulation.py#L138-L247
|
aicenter/roadmap-processing
|
roadmaptools/osmfilter.py
|
filter_osm_file
|
def filter_osm_file():
""" Downloads (and compiles) osmfilter tool from web and
calls that osmfilter to only filter out only the road elements.
"""
print_info('Filtering OSM file...')
start_time = time.time()
if check_osmfilter():
# params = '--keep="highway=motorway =motorway_link =trunk =trunk_link =primary =primary_link =secondary' \
# ' =secondary_link =tertiary =tertiary_link =unclassified =unclassified_link =residential =residential_link' \
# ' =living_street" --drop="access=no"'
params = config.osm_filter_params
command = './osmfilter' if platform.system() == 'Linux' else 'osmfilter.exe'
if platform.system() == 'Linux':
filter_command = '%s "%s" %s | pv > "%s"' % (command, config.osm_map_filename, params,
config.filtered_osm_filename)
else:
filter_command = '%s "%s" %s > "%s"' % (
command, config.osm_map_filename, params, config.filtered_osm_filename)
os.system(filter_command)
else:
print_info('Osmfilter not available. Exiting.')
exit(1)
print_info('Filtering finished. (%.2f secs)' % (time.time() - start_time))
|
python
|
def filter_osm_file():
""" Downloads (and compiles) osmfilter tool from web and
calls that osmfilter to only filter out only the road elements.
"""
print_info('Filtering OSM file...')
start_time = time.time()
if check_osmfilter():
# params = '--keep="highway=motorway =motorway_link =trunk =trunk_link =primary =primary_link =secondary' \
# ' =secondary_link =tertiary =tertiary_link =unclassified =unclassified_link =residential =residential_link' \
# ' =living_street" --drop="access=no"'
params = config.osm_filter_params
command = './osmfilter' if platform.system() == 'Linux' else 'osmfilter.exe'
if platform.system() == 'Linux':
filter_command = '%s "%s" %s | pv > "%s"' % (command, config.osm_map_filename, params,
config.filtered_osm_filename)
else:
filter_command = '%s "%s" %s > "%s"' % (
command, config.osm_map_filename, params, config.filtered_osm_filename)
os.system(filter_command)
else:
print_info('Osmfilter not available. Exiting.')
exit(1)
print_info('Filtering finished. (%.2f secs)' % (time.time() - start_time))
|
[
"def",
"filter_osm_file",
"(",
")",
":",
"print_info",
"(",
"'Filtering OSM file...'",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"check_osmfilter",
"(",
")",
":",
"# params = '--keep=\"highway=motorway =motorway_link =trunk =trunk_link =primary =primary_link =secondary' \\\r",
"# \t\t ' =secondary_link =tertiary =tertiary_link =unclassified =unclassified_link =residential =residential_link' \\\r",
"# \t\t ' =living_street\" --drop=\"access=no\"'\r",
"params",
"=",
"config",
".",
"osm_filter_params",
"command",
"=",
"'./osmfilter'",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Linux'",
"else",
"'osmfilter.exe'",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Linux'",
":",
"filter_command",
"=",
"'%s \"%s\" %s | pv > \"%s\"'",
"%",
"(",
"command",
",",
"config",
".",
"osm_map_filename",
",",
"params",
",",
"config",
".",
"filtered_osm_filename",
")",
"else",
":",
"filter_command",
"=",
"'%s \"%s\" %s > \"%s\"'",
"%",
"(",
"command",
",",
"config",
".",
"osm_map_filename",
",",
"params",
",",
"config",
".",
"filtered_osm_filename",
")",
"os",
".",
"system",
"(",
"filter_command",
")",
"else",
":",
"print_info",
"(",
"'Osmfilter not available. Exiting.'",
")",
"exit",
"(",
"1",
")",
"print_info",
"(",
"'Filtering finished. (%.2f secs)'",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")"
] |
Downloads (and compiles) osmfilter tool from web and
calls that osmfilter to only filter out only the road elements.
|
[
"Downloads",
"(",
"and",
"compiles",
")",
"osmfilter",
"tool",
"from",
"web",
"and",
"calls",
"that",
"osmfilter",
"to",
"only",
"filter",
"out",
"only",
"the",
"road",
"elements",
"."
] |
train
|
https://github.com/aicenter/roadmap-processing/blob/d9fb6e0b3bc1f11302a9e2ac62ee6db9484e2018/roadmaptools/osmfilter.py#L10-L37
|
Koed00/django-rq-jobs
|
django_rq_jobs/models.py
|
task_list
|
def task_list():
"""
Scans the modules set in RQ_JOBS_MODULES for RQ jobs decorated with @task
Compiles a readable list for Job model task choices
"""
try:
jobs_module = settings.RQ_JOBS_MODULE
except AttributeError:
raise ImproperlyConfigured(_("You have to define RQ_JOBS_MODULE in settings.py"))
if isinstance(jobs_module, string_types):
jobs_modules = (jobs_module,)
elif isinstance(jobs_module, (tuple, list)):
jobs_modules = jobs_module
else:
raise ImproperlyConfigured(_("RQ_JOBS_MODULE must be a string or a tuple"))
choices = []
for module in jobs_modules:
try:
tasks = importlib.import_module(module)
except ImportError:
raise ImproperlyConfigured(_("Can not find module {}").format(module))
module_choices = [('%s.%s' % (module, x), underscore_to_camelcase(x)) for x, y in list(tasks.__dict__.items())
if type(y) == FunctionType and hasattr(y, 'delay')]
choices.extend(module_choices)
choices.sort(key=lambda tup: tup[1])
return choices
|
python
|
def task_list():
"""
Scans the modules set in RQ_JOBS_MODULES for RQ jobs decorated with @task
Compiles a readable list for Job model task choices
"""
try:
jobs_module = settings.RQ_JOBS_MODULE
except AttributeError:
raise ImproperlyConfigured(_("You have to define RQ_JOBS_MODULE in settings.py"))
if isinstance(jobs_module, string_types):
jobs_modules = (jobs_module,)
elif isinstance(jobs_module, (tuple, list)):
jobs_modules = jobs_module
else:
raise ImproperlyConfigured(_("RQ_JOBS_MODULE must be a string or a tuple"))
choices = []
for module in jobs_modules:
try:
tasks = importlib.import_module(module)
except ImportError:
raise ImproperlyConfigured(_("Can not find module {}").format(module))
module_choices = [('%s.%s' % (module, x), underscore_to_camelcase(x)) for x, y in list(tasks.__dict__.items())
if type(y) == FunctionType and hasattr(y, 'delay')]
choices.extend(module_choices)
choices.sort(key=lambda tup: tup[1])
return choices
|
[
"def",
"task_list",
"(",
")",
":",
"try",
":",
"jobs_module",
"=",
"settings",
".",
"RQ_JOBS_MODULE",
"except",
"AttributeError",
":",
"raise",
"ImproperlyConfigured",
"(",
"_",
"(",
"\"You have to define RQ_JOBS_MODULE in settings.py\"",
")",
")",
"if",
"isinstance",
"(",
"jobs_module",
",",
"string_types",
")",
":",
"jobs_modules",
"=",
"(",
"jobs_module",
",",
")",
"elif",
"isinstance",
"(",
"jobs_module",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"jobs_modules",
"=",
"jobs_module",
"else",
":",
"raise",
"ImproperlyConfigured",
"(",
"_",
"(",
"\"RQ_JOBS_MODULE must be a string or a tuple\"",
")",
")",
"choices",
"=",
"[",
"]",
"for",
"module",
"in",
"jobs_modules",
":",
"try",
":",
"tasks",
"=",
"importlib",
".",
"import_module",
"(",
"module",
")",
"except",
"ImportError",
":",
"raise",
"ImproperlyConfigured",
"(",
"_",
"(",
"\"Can not find module {}\"",
")",
".",
"format",
"(",
"module",
")",
")",
"module_choices",
"=",
"[",
"(",
"'%s.%s'",
"%",
"(",
"module",
",",
"x",
")",
",",
"underscore_to_camelcase",
"(",
"x",
")",
")",
"for",
"x",
",",
"y",
"in",
"list",
"(",
"tasks",
".",
"__dict__",
".",
"items",
"(",
")",
")",
"if",
"type",
"(",
"y",
")",
"==",
"FunctionType",
"and",
"hasattr",
"(",
"y",
",",
"'delay'",
")",
"]",
"choices",
".",
"extend",
"(",
"module_choices",
")",
"choices",
".",
"sort",
"(",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"1",
"]",
")",
"return",
"choices"
] |
Scans the modules set in RQ_JOBS_MODULES for RQ jobs decorated with @task
Compiles a readable list for Job model task choices
|
[
"Scans",
"the",
"modules",
"set",
"in",
"RQ_JOBS_MODULES",
"for",
"RQ",
"jobs",
"decorated",
"with"
] |
train
|
https://github.com/Koed00/django-rq-jobs/blob/b25ffd15c91858406494ae0c29babf00c268db18/django_rq_jobs/models.py#L33-L65
|
Koed00/django-rq-jobs
|
django_rq_jobs/models.py
|
Job.rq_job
|
def rq_job(self):
"""The last RQ Job this ran on"""
if not self.rq_id or not self.rq_origin:
return
try:
return RQJob.fetch(self.rq_id, connection=get_connection(self.rq_origin))
except NoSuchJobError:
return
|
python
|
def rq_job(self):
"""The last RQ Job this ran on"""
if not self.rq_id or not self.rq_origin:
return
try:
return RQJob.fetch(self.rq_id, connection=get_connection(self.rq_origin))
except NoSuchJobError:
return
|
[
"def",
"rq_job",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"rq_id",
"or",
"not",
"self",
".",
"rq_origin",
":",
"return",
"try",
":",
"return",
"RQJob",
".",
"fetch",
"(",
"self",
".",
"rq_id",
",",
"connection",
"=",
"get_connection",
"(",
"self",
".",
"rq_origin",
")",
")",
"except",
"NoSuchJobError",
":",
"return"
] |
The last RQ Job this ran on
|
[
"The",
"last",
"RQ",
"Job",
"this",
"ran",
"on"
] |
train
|
https://github.com/Koed00/django-rq-jobs/blob/b25ffd15c91858406494ae0c29babf00c268db18/django_rq_jobs/models.py#L95-L102
|
Koed00/django-rq-jobs
|
django_rq_jobs/models.py
|
Job.rq_link
|
def rq_link(self):
"""Link to Django-RQ status page for this job"""
if self.rq_job:
url = reverse('rq_job_detail',
kwargs={'job_id': self.rq_id, 'queue_index': queue_index_by_name(self.rq_origin)})
return '<a href="{}">{}</a>'.format(url, self.rq_id)
|
python
|
def rq_link(self):
"""Link to Django-RQ status page for this job"""
if self.rq_job:
url = reverse('rq_job_detail',
kwargs={'job_id': self.rq_id, 'queue_index': queue_index_by_name(self.rq_origin)})
return '<a href="{}">{}</a>'.format(url, self.rq_id)
|
[
"def",
"rq_link",
"(",
"self",
")",
":",
"if",
"self",
".",
"rq_job",
":",
"url",
"=",
"reverse",
"(",
"'rq_job_detail'",
",",
"kwargs",
"=",
"{",
"'job_id'",
":",
"self",
".",
"rq_id",
",",
"'queue_index'",
":",
"queue_index_by_name",
"(",
"self",
".",
"rq_origin",
")",
"}",
")",
"return",
"'<a href=\"{}\">{}</a>'",
".",
"format",
"(",
"url",
",",
"self",
".",
"rq_id",
")"
] |
Link to Django-RQ status page for this job
|
[
"Link",
"to",
"Django",
"-",
"RQ",
"status",
"page",
"for",
"this",
"job"
] |
train
|
https://github.com/Koed00/django-rq-jobs/blob/b25ffd15c91858406494ae0c29babf00c268db18/django_rq_jobs/models.py#L109-L114
|
Koed00/django-rq-jobs
|
django_rq_jobs/models.py
|
Job.rq_task
|
def rq_task(self):
"""
The function to call for this task.
Config errors are caught by tasks_list() already.
"""
task_path = self.task.split('.')
module_name = '.'.join(task_path[:-1])
task_name = task_path[-1]
module = importlib.import_module(module_name)
return getattr(module, task_name)
|
python
|
def rq_task(self):
"""
The function to call for this task.
Config errors are caught by tasks_list() already.
"""
task_path = self.task.split('.')
module_name = '.'.join(task_path[:-1])
task_name = task_path[-1]
module = importlib.import_module(module_name)
return getattr(module, task_name)
|
[
"def",
"rq_task",
"(",
"self",
")",
":",
"task_path",
"=",
"self",
".",
"task",
".",
"split",
"(",
"'.'",
")",
"module_name",
"=",
"'.'",
".",
"join",
"(",
"task_path",
"[",
":",
"-",
"1",
"]",
")",
"task_name",
"=",
"task_path",
"[",
"-",
"1",
"]",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"getattr",
"(",
"module",
",",
"task_name",
")"
] |
The function to call for this task.
Config errors are caught by tasks_list() already.
|
[
"The",
"function",
"to",
"call",
"for",
"this",
"task",
".",
"Config",
"errors",
"are",
"caught",
"by",
"tasks_list",
"()",
"already",
"."
] |
train
|
https://github.com/Koed00/django-rq-jobs/blob/b25ffd15c91858406494ae0c29babf00c268db18/django_rq_jobs/models.py#L117-L127
|
Koed00/django-rq-jobs
|
django_rq_jobs/management/commands/rqjobs.py
|
fix_module
|
def fix_module(job):
"""
Fix for tasks without a module. Provides backwards compatibility with < 0.1.5
"""
modules = settings.RQ_JOBS_MODULE
if not type(modules) == tuple:
modules = [modules]
for module in modules:
try:
module_match = importlib.import_module(module)
if hasattr(module_match, job.task):
job.task = '{}.{}'.format(module, job.task)
break
except ImportError:
continue
return job
|
python
|
def fix_module(job):
"""
Fix for tasks without a module. Provides backwards compatibility with < 0.1.5
"""
modules = settings.RQ_JOBS_MODULE
if not type(modules) == tuple:
modules = [modules]
for module in modules:
try:
module_match = importlib.import_module(module)
if hasattr(module_match, job.task):
job.task = '{}.{}'.format(module, job.task)
break
except ImportError:
continue
return job
|
[
"def",
"fix_module",
"(",
"job",
")",
":",
"modules",
"=",
"settings",
".",
"RQ_JOBS_MODULE",
"if",
"not",
"type",
"(",
"modules",
")",
"==",
"tuple",
":",
"modules",
"=",
"[",
"modules",
"]",
"for",
"module",
"in",
"modules",
":",
"try",
":",
"module_match",
"=",
"importlib",
".",
"import_module",
"(",
"module",
")",
"if",
"hasattr",
"(",
"module_match",
",",
"job",
".",
"task",
")",
":",
"job",
".",
"task",
"=",
"'{}.{}'",
".",
"format",
"(",
"module",
",",
"job",
".",
"task",
")",
"break",
"except",
"ImportError",
":",
"continue",
"return",
"job"
] |
Fix for tasks without a module. Provides backwards compatibility with < 0.1.5
|
[
"Fix",
"for",
"tasks",
"without",
"a",
"module",
".",
"Provides",
"backwards",
"compatibility",
"with",
"<",
"0",
".",
"1",
".",
"5"
] |
train
|
https://github.com/Koed00/django-rq-jobs/blob/b25ffd15c91858406494ae0c29babf00c268db18/django_rq_jobs/management/commands/rqjobs.py#L59-L74
|
Python-Tools/aioorm
|
aioorm/database.py
|
drop_model_tables
|
async def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
await m.drop_table(**drop_table_kwargs)
|
python
|
async def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
await m.drop_table(**drop_table_kwargs)
|
[
"async",
"def",
"drop_model_tables",
"(",
"models",
",",
"*",
"*",
"drop_table_kwargs",
")",
":",
"for",
"m",
"in",
"reversed",
"(",
"sort_models_topologically",
"(",
"models",
")",
")",
":",
"await",
"m",
".",
"drop_table",
"(",
"*",
"*",
"drop_table_kwargs",
")"
] |
Drop tables for all given models (in the right order).
|
[
"Drop",
"tables",
"for",
"all",
"given",
"models",
"(",
"in",
"the",
"right",
"order",
")",
"."
] |
train
|
https://github.com/Python-Tools/aioorm/blob/f305e253ce748cda91b8bc9ec9c6b56e0e7681f7/aioorm/database.py#L244-L247
|
Python-Tools/aioorm
|
aioorm/shortcuts.py
|
model_to_dict
|
async def model_to_dict(model, recurse=True, backrefs=False, only=None,
exclude=None, seen=None, extra_attrs=None,
fields_from_query=None, max_depth=None):
"""
Convert a model instance (and any related objects) to a dictionary.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param only: A list (or set) of field instances indicating which fields
should be included.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param list extra_attrs: Names of model instance attributes or methods
that should be included.
:param SelectQuery fields_from_query: Query that was source of model. Take
fields explicitly selected by the query and serialize them.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
"""
max_depth = -1 if max_depth is None else max_depth
if max_depth == 0:
recurse = False
only = _clone_set(only)
extra_attrs = _clone_set(extra_attrs)
if fields_from_query is not None:
for item in fields_from_query._select:
if isinstance(item, Field):
only.add(item)
elif isinstance(item, Node) and item._alias:
extra_attrs.add(item._alias)
data = {}
exclude = _clone_set(exclude)
seen = _clone_set(seen)
exclude |= seen
model_class = type(model)
for field in model._meta.declared_fields:
if field in exclude or (only and (field not in only)):
continue
field_data = model._data.get(field.name)
if isinstance(field, ForeignKeyField) and recurse:
if field_data:
seen.add(field)
rel_obj = getattr(model, field.name)
if iscoroutine(rel_obj):
rel_obj = await rel_obj
field_data = await model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
max_depth=max_depth - 1)
else:
field_data = None
data[field.name] = field_data
if extra_attrs:
for attr_name in extra_attrs:
attr = getattr(model, attr_name)
if callable(attr):
data[attr_name] = attr()
else:
data[attr_name] = attr
if backrefs and recurse:
for related_name, foreign_key in model._meta.reverse_rel.items():
descriptor = getattr(model_class, related_name)
if descriptor in exclude or foreign_key in exclude:
continue
if only and (descriptor not in only) and (foreign_key not in only):
continue
accum = []
exclude.add(foreign_key)
related_query = getattr(
model,
related_name + '_prefetch',
getattr(model, related_name))
async for rel_obj in related_query:
accum.append(await model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[related_name] = accum
return data
|
python
|
async def model_to_dict(model, recurse=True, backrefs=False, only=None,
exclude=None, seen=None, extra_attrs=None,
fields_from_query=None, max_depth=None):
"""
Convert a model instance (and any related objects) to a dictionary.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param only: A list (or set) of field instances indicating which fields
should be included.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param list extra_attrs: Names of model instance attributes or methods
that should be included.
:param SelectQuery fields_from_query: Query that was source of model. Take
fields explicitly selected by the query and serialize them.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
"""
max_depth = -1 if max_depth is None else max_depth
if max_depth == 0:
recurse = False
only = _clone_set(only)
extra_attrs = _clone_set(extra_attrs)
if fields_from_query is not None:
for item in fields_from_query._select:
if isinstance(item, Field):
only.add(item)
elif isinstance(item, Node) and item._alias:
extra_attrs.add(item._alias)
data = {}
exclude = _clone_set(exclude)
seen = _clone_set(seen)
exclude |= seen
model_class = type(model)
for field in model._meta.declared_fields:
if field in exclude or (only and (field not in only)):
continue
field_data = model._data.get(field.name)
if isinstance(field, ForeignKeyField) and recurse:
if field_data:
seen.add(field)
rel_obj = getattr(model, field.name)
if iscoroutine(rel_obj):
rel_obj = await rel_obj
field_data = await model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
max_depth=max_depth - 1)
else:
field_data = None
data[field.name] = field_data
if extra_attrs:
for attr_name in extra_attrs:
attr = getattr(model, attr_name)
if callable(attr):
data[attr_name] = attr()
else:
data[attr_name] = attr
if backrefs and recurse:
for related_name, foreign_key in model._meta.reverse_rel.items():
descriptor = getattr(model_class, related_name)
if descriptor in exclude or foreign_key in exclude:
continue
if only and (descriptor not in only) and (foreign_key not in only):
continue
accum = []
exclude.add(foreign_key)
related_query = getattr(
model,
related_name + '_prefetch',
getattr(model, related_name))
async for rel_obj in related_query:
accum.append(await model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[related_name] = accum
return data
|
[
"async",
"def",
"model_to_dict",
"(",
"model",
",",
"recurse",
"=",
"True",
",",
"backrefs",
"=",
"False",
",",
"only",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"seen",
"=",
"None",
",",
"extra_attrs",
"=",
"None",
",",
"fields_from_query",
"=",
"None",
",",
"max_depth",
"=",
"None",
")",
":",
"max_depth",
"=",
"-",
"1",
"if",
"max_depth",
"is",
"None",
"else",
"max_depth",
"if",
"max_depth",
"==",
"0",
":",
"recurse",
"=",
"False",
"only",
"=",
"_clone_set",
"(",
"only",
")",
"extra_attrs",
"=",
"_clone_set",
"(",
"extra_attrs",
")",
"if",
"fields_from_query",
"is",
"not",
"None",
":",
"for",
"item",
"in",
"fields_from_query",
".",
"_select",
":",
"if",
"isinstance",
"(",
"item",
",",
"Field",
")",
":",
"only",
".",
"add",
"(",
"item",
")",
"elif",
"isinstance",
"(",
"item",
",",
"Node",
")",
"and",
"item",
".",
"_alias",
":",
"extra_attrs",
".",
"add",
"(",
"item",
".",
"_alias",
")",
"data",
"=",
"{",
"}",
"exclude",
"=",
"_clone_set",
"(",
"exclude",
")",
"seen",
"=",
"_clone_set",
"(",
"seen",
")",
"exclude",
"|=",
"seen",
"model_class",
"=",
"type",
"(",
"model",
")",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"declared_fields",
":",
"if",
"field",
"in",
"exclude",
"or",
"(",
"only",
"and",
"(",
"field",
"not",
"in",
"only",
")",
")",
":",
"continue",
"field_data",
"=",
"model",
".",
"_data",
".",
"get",
"(",
"field",
".",
"name",
")",
"if",
"isinstance",
"(",
"field",
",",
"ForeignKeyField",
")",
"and",
"recurse",
":",
"if",
"field_data",
":",
"seen",
".",
"add",
"(",
"field",
")",
"rel_obj",
"=",
"getattr",
"(",
"model",
",",
"field",
".",
"name",
")",
"if",
"iscoroutine",
"(",
"rel_obj",
")",
":",
"rel_obj",
"=",
"await",
"rel_obj",
"field_data",
"=",
"await",
"model_to_dict",
"(",
"rel_obj",
",",
"recurse",
"=",
"recurse",
",",
"backrefs",
"=",
"backrefs",
",",
"only",
"=",
"only",
",",
"exclude",
"=",
"exclude",
",",
"seen",
"=",
"seen",
",",
"max_depth",
"=",
"max_depth",
"-",
"1",
")",
"else",
":",
"field_data",
"=",
"None",
"data",
"[",
"field",
".",
"name",
"]",
"=",
"field_data",
"if",
"extra_attrs",
":",
"for",
"attr_name",
"in",
"extra_attrs",
":",
"attr",
"=",
"getattr",
"(",
"model",
",",
"attr_name",
")",
"if",
"callable",
"(",
"attr",
")",
":",
"data",
"[",
"attr_name",
"]",
"=",
"attr",
"(",
")",
"else",
":",
"data",
"[",
"attr_name",
"]",
"=",
"attr",
"if",
"backrefs",
"and",
"recurse",
":",
"for",
"related_name",
",",
"foreign_key",
"in",
"model",
".",
"_meta",
".",
"reverse_rel",
".",
"items",
"(",
")",
":",
"descriptor",
"=",
"getattr",
"(",
"model_class",
",",
"related_name",
")",
"if",
"descriptor",
"in",
"exclude",
"or",
"foreign_key",
"in",
"exclude",
":",
"continue",
"if",
"only",
"and",
"(",
"descriptor",
"not",
"in",
"only",
")",
"and",
"(",
"foreign_key",
"not",
"in",
"only",
")",
":",
"continue",
"accum",
"=",
"[",
"]",
"exclude",
".",
"add",
"(",
"foreign_key",
")",
"related_query",
"=",
"getattr",
"(",
"model",
",",
"related_name",
"+",
"'_prefetch'",
",",
"getattr",
"(",
"model",
",",
"related_name",
")",
")",
"async",
"for",
"rel_obj",
"in",
"related_query",
":",
"accum",
".",
"append",
"(",
"await",
"model_to_dict",
"(",
"rel_obj",
",",
"recurse",
"=",
"recurse",
",",
"backrefs",
"=",
"backrefs",
",",
"only",
"=",
"only",
",",
"exclude",
"=",
"exclude",
",",
"max_depth",
"=",
"max_depth",
"-",
"1",
")",
")",
"data",
"[",
"related_name",
"]",
"=",
"accum",
"return",
"data"
] |
Convert a model instance (and any related objects) to a dictionary.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param only: A list (or set) of field instances indicating which fields
should be included.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param list extra_attrs: Names of model instance attributes or methods
that should be included.
:param SelectQuery fields_from_query: Query that was source of model. Take
fields explicitly selected by the query and serialize them.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
|
[
"Convert",
"a",
"model",
"instance",
"(",
"and",
"any",
"related",
"objects",
")",
"to",
"a",
"dictionary",
".",
":",
"param",
"bool",
"recurse",
":",
"Whether",
"foreign",
"-",
"keys",
"should",
"be",
"recursed",
".",
":",
"param",
"bool",
"backrefs",
":",
"Whether",
"lists",
"of",
"related",
"objects",
"should",
"be",
"recursed",
".",
":",
"param",
"only",
":",
"A",
"list",
"(",
"or",
"set",
")",
"of",
"field",
"instances",
"indicating",
"which",
"fields",
"should",
"be",
"included",
".",
":",
"param",
"exclude",
":",
"A",
"list",
"(",
"or",
"set",
")",
"of",
"field",
"instances",
"that",
"should",
"be",
"excluded",
"from",
"the",
"dictionary",
".",
":",
"param",
"list",
"extra_attrs",
":",
"Names",
"of",
"model",
"instance",
"attributes",
"or",
"methods",
"that",
"should",
"be",
"included",
".",
":",
"param",
"SelectQuery",
"fields_from_query",
":",
"Query",
"that",
"was",
"source",
"of",
"model",
".",
"Take",
"fields",
"explicitly",
"selected",
"by",
"the",
"query",
"and",
"serialize",
"them",
".",
":",
"param",
"int",
"max_depth",
":",
"Maximum",
"depth",
"to",
"recurse",
"value",
"<",
"=",
"0",
"means",
"no",
"max",
"."
] |
train
|
https://github.com/Python-Tools/aioorm/blob/f305e253ce748cda91b8bc9ec9c6b56e0e7681f7/aioorm/shortcuts.py#L6-L101
|
Python-Tools/aioorm
|
aioorm/utils/csv_utils/csv_loader.py
|
RowConverter.extract_rows
|
async def extract_rows(self, file_or_name, **reader_kwargs):
"""
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
"""
rows = []
rows_to_read = self.sample_size
async with self.get_reader(file_or_name, **reader_kwargs) as reader:
if self.has_header:
rows_to_read += 1
for i in range(self.sample_size):
try:
row = await reader.__anext__()
except AttributeError as te:
row = next(reader)
except:
raise
rows.append(row)
if self.has_header:
header, rows = rows[0], rows[1:]
else:
header = ['field_%d' % i for i in range(len(rows[0]))]
return header, rows
|
python
|
async def extract_rows(self, file_or_name, **reader_kwargs):
"""
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
"""
rows = []
rows_to_read = self.sample_size
async with self.get_reader(file_or_name, **reader_kwargs) as reader:
if self.has_header:
rows_to_read += 1
for i in range(self.sample_size):
try:
row = await reader.__anext__()
except AttributeError as te:
row = next(reader)
except:
raise
rows.append(row)
if self.has_header:
header, rows = rows[0], rows[1:]
else:
header = ['field_%d' % i for i in range(len(rows[0]))]
return header, rows
|
[
"async",
"def",
"extract_rows",
"(",
"self",
",",
"file_or_name",
",",
"*",
"*",
"reader_kwargs",
")",
":",
"rows",
"=",
"[",
"]",
"rows_to_read",
"=",
"self",
".",
"sample_size",
"async",
"with",
"self",
".",
"get_reader",
"(",
"file_or_name",
",",
"*",
"*",
"reader_kwargs",
")",
"as",
"reader",
":",
"if",
"self",
".",
"has_header",
":",
"rows_to_read",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"sample_size",
")",
":",
"try",
":",
"row",
"=",
"await",
"reader",
".",
"__anext__",
"(",
")",
"except",
"AttributeError",
"as",
"te",
":",
"row",
"=",
"next",
"(",
"reader",
")",
"except",
":",
"raise",
"rows",
".",
"append",
"(",
"row",
")",
"if",
"self",
".",
"has_header",
":",
"header",
",",
"rows",
"=",
"rows",
"[",
"0",
"]",
",",
"rows",
"[",
"1",
":",
"]",
"else",
":",
"header",
"=",
"[",
"'field_%d'",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"rows",
"[",
"0",
"]",
")",
")",
"]",
"return",
"header",
",",
"rows"
] |
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
|
[
"Extract",
"self",
".",
"sample_size",
"rows",
"from",
"the",
"CSV",
"file",
"and",
"analyze",
"their",
"data",
"-",
"types",
".",
":",
"param",
"str",
"file_or_name",
":",
"A",
"string",
"filename",
"or",
"a",
"file",
"handle",
".",
":",
"param",
"reader_kwargs",
":",
"Arbitrary",
"parameters",
"to",
"pass",
"to",
"the",
"CSV",
"reader",
".",
":",
"returns",
":",
"A",
"2",
"-",
"tuple",
"containing",
"a",
"list",
"of",
"headers",
"and",
"list",
"of",
"rows",
"read",
"from",
"the",
"CSV",
"file",
"."
] |
train
|
https://github.com/Python-Tools/aioorm/blob/f305e253ce748cda91b8bc9ec9c6b56e0e7681f7/aioorm/utils/csv_utils/csv_loader.py#L123-L150
|
Python-Tools/aioorm
|
aioorm/utils/csv_utils/csv_loader.py
|
RowConverter.get_checks
|
def get_checks(self):
"""Return a list of functions to use when testing values."""
return [
self.is_date,
self.is_datetime,
self.is_integer,
self.is_float,
self.default]
|
python
|
def get_checks(self):
"""Return a list of functions to use when testing values."""
return [
self.is_date,
self.is_datetime,
self.is_integer,
self.is_float,
self.default]
|
[
"def",
"get_checks",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"is_date",
",",
"self",
".",
"is_datetime",
",",
"self",
".",
"is_integer",
",",
"self",
".",
"is_float",
",",
"self",
".",
"default",
"]"
] |
Return a list of functions to use when testing values.
|
[
"Return",
"a",
"list",
"of",
"functions",
"to",
"use",
"when",
"testing",
"values",
"."
] |
train
|
https://github.com/Python-Tools/aioorm/blob/f305e253ce748cda91b8bc9ec9c6b56e0e7681f7/aioorm/utils/csv_utils/csv_loader.py#L152-L159
|
Python-Tools/aioorm
|
aioorm/utils/csv_utils/csv_loader.py
|
RowConverter.analyze
|
def analyze(self, rows):
"""
Analyze the given rows and try to determine the type of value stored.
:param list rows: A list-of-lists containing one or more rows from a
csv file.
:returns: A list of peewee Field objects for each column in the CSV.
"""
transposed = zip(*rows)
checks = self.get_checks()
column_types = []
for i, column in enumerate(transposed):
# Remove any empty values.
col_vals = [val for val in column if val != '']
for check in checks:
results = set(check(val) for val in col_vals)
if all(results):
column_types.append(check.field())
break
return column_types
|
python
|
def analyze(self, rows):
"""
Analyze the given rows and try to determine the type of value stored.
:param list rows: A list-of-lists containing one or more rows from a
csv file.
:returns: A list of peewee Field objects for each column in the CSV.
"""
transposed = zip(*rows)
checks = self.get_checks()
column_types = []
for i, column in enumerate(transposed):
# Remove any empty values.
col_vals = [val for val in column if val != '']
for check in checks:
results = set(check(val) for val in col_vals)
if all(results):
column_types.append(check.field())
break
return column_types
|
[
"def",
"analyze",
"(",
"self",
",",
"rows",
")",
":",
"transposed",
"=",
"zip",
"(",
"*",
"rows",
")",
"checks",
"=",
"self",
".",
"get_checks",
"(",
")",
"column_types",
"=",
"[",
"]",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"transposed",
")",
":",
"# Remove any empty values.",
"col_vals",
"=",
"[",
"val",
"for",
"val",
"in",
"column",
"if",
"val",
"!=",
"''",
"]",
"for",
"check",
"in",
"checks",
":",
"results",
"=",
"set",
"(",
"check",
"(",
"val",
")",
"for",
"val",
"in",
"col_vals",
")",
"if",
"all",
"(",
"results",
")",
":",
"column_types",
".",
"append",
"(",
"check",
".",
"field",
"(",
")",
")",
"break",
"return",
"column_types"
] |
Analyze the given rows and try to determine the type of value stored.
:param list rows: A list-of-lists containing one or more rows from a
csv file.
:returns: A list of peewee Field objects for each column in the CSV.
|
[
"Analyze",
"the",
"given",
"rows",
"and",
"try",
"to",
"determine",
"the",
"type",
"of",
"value",
"stored",
".",
":",
"param",
"list",
"rows",
":",
"A",
"list",
"-",
"of",
"-",
"lists",
"containing",
"one",
"or",
"more",
"rows",
"from",
"a",
"csv",
"file",
".",
":",
"returns",
":",
"A",
"list",
"of",
"peewee",
"Field",
"objects",
"for",
"each",
"column",
"in",
"the",
"CSV",
"."
] |
train
|
https://github.com/Python-Tools/aioorm/blob/f305e253ce748cda91b8bc9ec9c6b56e0e7681f7/aioorm/utils/csv_utils/csv_loader.py#L161-L180
|
BetterWorks/django-bleachfields
|
bleachfields/bleachfield.py
|
BleachField.clean_text
|
def clean_text(self, text):
'''Clean text using bleach.'''
if text is None:
return ''
text = re.sub(ILLEGAL_CHARACTERS_RE, '', text)
if '<' in text or '<' in text:
text = clean(text, tags=self.tags, strip=self.strip)
return unescape(text)
|
python
|
def clean_text(self, text):
'''Clean text using bleach.'''
if text is None:
return ''
text = re.sub(ILLEGAL_CHARACTERS_RE, '', text)
if '<' in text or '<' in text:
text = clean(text, tags=self.tags, strip=self.strip)
return unescape(text)
|
[
"def",
"clean_text",
"(",
"self",
",",
"text",
")",
":",
"if",
"text",
"is",
"None",
":",
"return",
"''",
"text",
"=",
"re",
".",
"sub",
"(",
"ILLEGAL_CHARACTERS_RE",
",",
"''",
",",
"text",
")",
"if",
"'<'",
"in",
"text",
"or",
"'<'",
"in",
"text",
":",
"text",
"=",
"clean",
"(",
"text",
",",
"tags",
"=",
"self",
".",
"tags",
",",
"strip",
"=",
"self",
".",
"strip",
")",
"return",
"unescape",
"(",
"text",
")"
] |
Clean text using bleach.
|
[
"Clean",
"text",
"using",
"bleach",
"."
] |
train
|
https://github.com/BetterWorks/django-bleachfields/blob/6b49aad6daa8c1357af31a2f7941352561d04cd6/bleachfields/bleachfield.py#L26-L34
|
nwilming/ocupy
|
ocupy/loader.py
|
LoadFromDisk.path
|
def path(self, category = None, image = None, feature = None):
"""
Constructs the path to categories, images and features.
This path function assumes that the following storage scheme is used on
the hard disk to access categories, images and features:
- categories: /impath/category
- images: /impath/category/category_image.png
- features: /ftrpath/category/feature/category_image.mat
The path function is called to query the location of categories, images
and features before they are loaded. Thus, if your features are organized
in a different way, you can simply replace this method such that it returns
appropriate paths' and the LoadFromDisk loader will use your naming
scheme.
"""
filename = None
if not category is None:
filename = join(self.impath, str(category))
if not image is None:
assert not category is None, "The category has to be given if the image is given"
filename = join(filename,
'%s_%s.png' % (str(category), str(image)))
if not feature is None:
assert category != None and image != None, "If a feature name is given the category and image also have to be given."
filename = join(self.ftrpath, str(category), feature,
'%s_%s.mat' % (str(category), str(image)))
return filename
|
python
|
def path(self, category = None, image = None, feature = None):
"""
Constructs the path to categories, images and features.
This path function assumes that the following storage scheme is used on
the hard disk to access categories, images and features:
- categories: /impath/category
- images: /impath/category/category_image.png
- features: /ftrpath/category/feature/category_image.mat
The path function is called to query the location of categories, images
and features before they are loaded. Thus, if your features are organized
in a different way, you can simply replace this method such that it returns
appropriate paths' and the LoadFromDisk loader will use your naming
scheme.
"""
filename = None
if not category is None:
filename = join(self.impath, str(category))
if not image is None:
assert not category is None, "The category has to be given if the image is given"
filename = join(filename,
'%s_%s.png' % (str(category), str(image)))
if not feature is None:
assert category != None and image != None, "If a feature name is given the category and image also have to be given."
filename = join(self.ftrpath, str(category), feature,
'%s_%s.mat' % (str(category), str(image)))
return filename
|
[
"def",
"path",
"(",
"self",
",",
"category",
"=",
"None",
",",
"image",
"=",
"None",
",",
"feature",
"=",
"None",
")",
":",
"filename",
"=",
"None",
"if",
"not",
"category",
"is",
"None",
":",
"filename",
"=",
"join",
"(",
"self",
".",
"impath",
",",
"str",
"(",
"category",
")",
")",
"if",
"not",
"image",
"is",
"None",
":",
"assert",
"not",
"category",
"is",
"None",
",",
"\"The category has to be given if the image is given\"",
"filename",
"=",
"join",
"(",
"filename",
",",
"'%s_%s.png'",
"%",
"(",
"str",
"(",
"category",
")",
",",
"str",
"(",
"image",
")",
")",
")",
"if",
"not",
"feature",
"is",
"None",
":",
"assert",
"category",
"!=",
"None",
"and",
"image",
"!=",
"None",
",",
"\"If a feature name is given the category and image also have to be given.\"",
"filename",
"=",
"join",
"(",
"self",
".",
"ftrpath",
",",
"str",
"(",
"category",
")",
",",
"feature",
",",
"'%s_%s.mat'",
"%",
"(",
"str",
"(",
"category",
")",
",",
"str",
"(",
"image",
")",
")",
")",
"return",
"filename"
] |
Constructs the path to categories, images and features.
This path function assumes that the following storage scheme is used on
the hard disk to access categories, images and features:
- categories: /impath/category
- images: /impath/category/category_image.png
- features: /ftrpath/category/feature/category_image.mat
The path function is called to query the location of categories, images
and features before they are loaded. Thus, if your features are organized
in a different way, you can simply replace this method such that it returns
appropriate paths' and the LoadFromDisk loader will use your naming
scheme.
|
[
"Constructs",
"the",
"path",
"to",
"categories",
"images",
"and",
"features",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/loader.py#L168-L195
|
nwilming/ocupy
|
ocupy/loader.py
|
LoadFromDisk.get_image
|
def get_image(self, cat, img):
""" Loads an image from disk. """
filename = self.path(cat, img)
data = []
if filename.endswith('mat'):
data = loadmat(filename)['output']
else:
data = imread(filename)
if self.size is not None:
return imresize(data, self.size)
else:
return data
|
python
|
def get_image(self, cat, img):
""" Loads an image from disk. """
filename = self.path(cat, img)
data = []
if filename.endswith('mat'):
data = loadmat(filename)['output']
else:
data = imread(filename)
if self.size is not None:
return imresize(data, self.size)
else:
return data
|
[
"def",
"get_image",
"(",
"self",
",",
"cat",
",",
"img",
")",
":",
"filename",
"=",
"self",
".",
"path",
"(",
"cat",
",",
"img",
")",
"data",
"=",
"[",
"]",
"if",
"filename",
".",
"endswith",
"(",
"'mat'",
")",
":",
"data",
"=",
"loadmat",
"(",
"filename",
")",
"[",
"'output'",
"]",
"else",
":",
"data",
"=",
"imread",
"(",
"filename",
")",
"if",
"self",
".",
"size",
"is",
"not",
"None",
":",
"return",
"imresize",
"(",
"data",
",",
"self",
".",
"size",
")",
"else",
":",
"return",
"data"
] |
Loads an image from disk.
|
[
"Loads",
"an",
"image",
"from",
"disk",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/loader.py#L197-L208
|
nwilming/ocupy
|
ocupy/loader.py
|
LoadFromDisk.get_feature
|
def get_feature(self, cat, img, feature):
"""
Load a feature from disk.
"""
filename = self.path(cat, img, feature)
data = loadmat(filename)
name = [k for k in list(data.keys()) if not k.startswith('__')]
if self.size is not None:
return imresize(data[name.pop()], self.size)
return data[name.pop()]
|
python
|
def get_feature(self, cat, img, feature):
"""
Load a feature from disk.
"""
filename = self.path(cat, img, feature)
data = loadmat(filename)
name = [k for k in list(data.keys()) if not k.startswith('__')]
if self.size is not None:
return imresize(data[name.pop()], self.size)
return data[name.pop()]
|
[
"def",
"get_feature",
"(",
"self",
",",
"cat",
",",
"img",
",",
"feature",
")",
":",
"filename",
"=",
"self",
".",
"path",
"(",
"cat",
",",
"img",
",",
"feature",
")",
"data",
"=",
"loadmat",
"(",
"filename",
")",
"name",
"=",
"[",
"k",
"for",
"k",
"in",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"'__'",
")",
"]",
"if",
"self",
".",
"size",
"is",
"not",
"None",
":",
"return",
"imresize",
"(",
"data",
"[",
"name",
".",
"pop",
"(",
")",
"]",
",",
"self",
".",
"size",
")",
"return",
"data",
"[",
"name",
".",
"pop",
"(",
")",
"]"
] |
Load a feature from disk.
|
[
"Load",
"a",
"feature",
"from",
"disk",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/loader.py#L210-L219
|
nwilming/ocupy
|
ocupy/loader.py
|
SaveToDisk.save_image
|
def save_image(self, cat, img, data):
"""Saves a new image."""
filename = self.path(cat, img)
mkdir(filename)
if type(data) == np.ndarray:
data = Image.fromarray(data).convert('RGB')
data.save(filename)
|
python
|
def save_image(self, cat, img, data):
"""Saves a new image."""
filename = self.path(cat, img)
mkdir(filename)
if type(data) == np.ndarray:
data = Image.fromarray(data).convert('RGB')
data.save(filename)
|
[
"def",
"save_image",
"(",
"self",
",",
"cat",
",",
"img",
",",
"data",
")",
":",
"filename",
"=",
"self",
".",
"path",
"(",
"cat",
",",
"img",
")",
"mkdir",
"(",
"filename",
")",
"if",
"type",
"(",
"data",
")",
"==",
"np",
".",
"ndarray",
":",
"data",
"=",
"Image",
".",
"fromarray",
"(",
"data",
")",
".",
"convert",
"(",
"'RGB'",
")",
"data",
".",
"save",
"(",
"filename",
")"
] |
Saves a new image.
|
[
"Saves",
"a",
"new",
"image",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/loader.py#L243-L249
|
nwilming/ocupy
|
ocupy/loader.py
|
SaveToDisk.save_feature
|
def save_feature(self, cat, img, feature, data):
"""Saves a new feature."""
filename = self.path(cat, img, feature)
mkdir(filename)
savemat(filename, {'output':data})
|
python
|
def save_feature(self, cat, img, feature, data):
"""Saves a new feature."""
filename = self.path(cat, img, feature)
mkdir(filename)
savemat(filename, {'output':data})
|
[
"def",
"save_feature",
"(",
"self",
",",
"cat",
",",
"img",
",",
"feature",
",",
"data",
")",
":",
"filename",
"=",
"self",
".",
"path",
"(",
"cat",
",",
"img",
",",
"feature",
")",
"mkdir",
"(",
"filename",
")",
"savemat",
"(",
"filename",
",",
"{",
"'output'",
":",
"data",
"}",
")"
] |
Saves a new feature.
|
[
"Saves",
"a",
"new",
"feature",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/loader.py#L251-L255
|
nwilming/ocupy
|
ocupy/datamat_tools.py
|
factorise_field
|
def factorise_field(dm, field_name, boundary_char = None, parameter_name=None):
"""This removes a common beginning from the data of the fields, placing
the common element in a parameter and the different endings in the fields.
if parameter_name is None, then it will be <field_name>_common.
So far, it's probably only useful for the file_name.
TODO: remove field entirely if no unique elements exist.
"""
old_data = dm.field(field_name)
if isinstance(old_data[0], str) or isinstance(old_data[0], str):
(new_data, common) = factorise_strings(old_data, boundary_char)
new_data = array(new_data)
else:
raise NotImplementedError('factorising of fields not implemented for anything but string/unicode objects')
if len(common) > 0:
dm.__dict__[field_name] = new_data
if parameter_name is None:
parameter_name = field_name + '_common'
dm.add_parameter(parameter_name, common)
|
python
|
def factorise_field(dm, field_name, boundary_char = None, parameter_name=None):
"""This removes a common beginning from the data of the fields, placing
the common element in a parameter and the different endings in the fields.
if parameter_name is None, then it will be <field_name>_common.
So far, it's probably only useful for the file_name.
TODO: remove field entirely if no unique elements exist.
"""
old_data = dm.field(field_name)
if isinstance(old_data[0], str) or isinstance(old_data[0], str):
(new_data, common) = factorise_strings(old_data, boundary_char)
new_data = array(new_data)
else:
raise NotImplementedError('factorising of fields not implemented for anything but string/unicode objects')
if len(common) > 0:
dm.__dict__[field_name] = new_data
if parameter_name is None:
parameter_name = field_name + '_common'
dm.add_parameter(parameter_name, common)
|
[
"def",
"factorise_field",
"(",
"dm",
",",
"field_name",
",",
"boundary_char",
"=",
"None",
",",
"parameter_name",
"=",
"None",
")",
":",
"old_data",
"=",
"dm",
".",
"field",
"(",
"field_name",
")",
"if",
"isinstance",
"(",
"old_data",
"[",
"0",
"]",
",",
"str",
")",
"or",
"isinstance",
"(",
"old_data",
"[",
"0",
"]",
",",
"str",
")",
":",
"(",
"new_data",
",",
"common",
")",
"=",
"factorise_strings",
"(",
"old_data",
",",
"boundary_char",
")",
"new_data",
"=",
"array",
"(",
"new_data",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'factorising of fields not implemented for anything but string/unicode objects'",
")",
"if",
"len",
"(",
"common",
")",
">",
"0",
":",
"dm",
".",
"__dict__",
"[",
"field_name",
"]",
"=",
"new_data",
"if",
"parameter_name",
"is",
"None",
":",
"parameter_name",
"=",
"field_name",
"+",
"'_common'",
"dm",
".",
"add_parameter",
"(",
"parameter_name",
",",
"common",
")"
] |
This removes a common beginning from the data of the fields, placing
the common element in a parameter and the different endings in the fields.
if parameter_name is None, then it will be <field_name>_common.
So far, it's probably only useful for the file_name.
TODO: remove field entirely if no unique elements exist.
|
[
"This",
"removes",
"a",
"common",
"beginning",
"from",
"the",
"data",
"of",
"the",
"fields",
"placing",
"the",
"common",
"element",
"in",
"a",
"parameter",
"and",
"the",
"different",
"endings",
"in",
"the",
"fields",
".",
"if",
"parameter_name",
"is",
"None",
"then",
"it",
"will",
"be",
"<field_name",
">",
"_common",
".",
"So",
"far",
"it",
"s",
"probably",
"only",
"useful",
"for",
"the",
"file_name",
".",
"TODO",
":",
"remove",
"field",
"entirely",
"if",
"no",
"unique",
"elements",
"exist",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat_tools.py#L11-L34
|
nwilming/ocupy
|
ocupy/utils.py
|
randsample
|
def randsample(vec, nr_samples, with_replacement = False):
"""
Draws nr_samples random samples from vec.
"""
if not with_replacement:
return np.random.permutation(vec)[0:nr_samples]
else:
return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)]
|
python
|
def randsample(vec, nr_samples, with_replacement = False):
"""
Draws nr_samples random samples from vec.
"""
if not with_replacement:
return np.random.permutation(vec)[0:nr_samples]
else:
return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)]
|
[
"def",
"randsample",
"(",
"vec",
",",
"nr_samples",
",",
"with_replacement",
"=",
"False",
")",
":",
"if",
"not",
"with_replacement",
":",
"return",
"np",
".",
"random",
".",
"permutation",
"(",
"vec",
")",
"[",
"0",
":",
"nr_samples",
"]",
"else",
":",
"return",
"np",
".",
"asarray",
"(",
"vec",
")",
"[",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"vec",
")",
",",
"nr_samples",
")",
"]"
] |
Draws nr_samples random samples from vec.
|
[
"Draws",
"nr_samples",
"random",
"samples",
"from",
"vec",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L63-L70
|
nwilming/ocupy
|
ocupy/utils.py
|
calc_resize_factor
|
def calc_resize_factor(prediction, image_size):
"""
Calculates how much prediction.shape and image_size differ.
"""
resize_factor_x = prediction.shape[1] / float(image_size[1])
resize_factor_y = prediction.shape[0] / float(image_size[0])
if abs(resize_factor_x - resize_factor_y) > 1.0/image_size[1] :
raise RuntimeError("""The aspect ratio of the fixations does not
match with the prediction: %f vs. %f"""
%(resize_factor_y, resize_factor_x))
return (resize_factor_y, resize_factor_x)
|
python
|
def calc_resize_factor(prediction, image_size):
"""
Calculates how much prediction.shape and image_size differ.
"""
resize_factor_x = prediction.shape[1] / float(image_size[1])
resize_factor_y = prediction.shape[0] / float(image_size[0])
if abs(resize_factor_x - resize_factor_y) > 1.0/image_size[1] :
raise RuntimeError("""The aspect ratio of the fixations does not
match with the prediction: %f vs. %f"""
%(resize_factor_y, resize_factor_x))
return (resize_factor_y, resize_factor_x)
|
[
"def",
"calc_resize_factor",
"(",
"prediction",
",",
"image_size",
")",
":",
"resize_factor_x",
"=",
"prediction",
".",
"shape",
"[",
"1",
"]",
"/",
"float",
"(",
"image_size",
"[",
"1",
"]",
")",
"resize_factor_y",
"=",
"prediction",
".",
"shape",
"[",
"0",
"]",
"/",
"float",
"(",
"image_size",
"[",
"0",
"]",
")",
"if",
"abs",
"(",
"resize_factor_x",
"-",
"resize_factor_y",
")",
">",
"1.0",
"/",
"image_size",
"[",
"1",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"\"\"The aspect ratio of the fixations does not\n match with the prediction: %f vs. %f\"\"\"",
"%",
"(",
"resize_factor_y",
",",
"resize_factor_x",
")",
")",
"return",
"(",
"resize_factor_y",
",",
"resize_factor_x",
")"
] |
Calculates how much prediction.shape and image_size differ.
|
[
"Calculates",
"how",
"much",
"prediction",
".",
"shape",
"and",
"image_size",
"differ",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L78-L88
|
nwilming/ocupy
|
ocupy/utils.py
|
dict_2_mat
|
def dict_2_mat(data, fill = True):
"""
Creates a NumPy array from a dictionary with only integers as keys and
NumPy arrays as values. Dimension 0 of the resulting array is formed from
data.keys(). Missing values in keys can be filled up with np.nan (default)
or ignored.
Parameters
----------
data : dict
a dictionary with integers as keys and array-likes of the same shape
as values
fill : boolean
flag specifying if the resulting matrix will keep a correspondence
between dictionary keys and matrix indices by filling up missing keys
with matrices of NaNs. Defaults to True
Returns
-------
numpy array with one more dimension than the values of the input dict
"""
if any([type(k) != int for k in list(data.keys())]):
raise RuntimeError("Dictionary cannot be converted to matrix, " +
"not all keys are ints")
base_shape = np.array(list(data.values())[0]).shape
result_shape = list(base_shape)
if fill:
result_shape.insert(0, max(data.keys()) + 1)
else:
result_shape.insert(0, len(list(data.keys())))
result = np.empty(result_shape) + np.nan
for (i, (k, v)) in enumerate(data.items()):
v = np.array(v)
if v.shape != base_shape:
raise RuntimeError("Dictionary cannot be converted to matrix, " +
"not all values have same dimensions")
result[fill and [k][0] or [i][0]] = v
return result
|
python
|
def dict_2_mat(data, fill = True):
"""
Creates a NumPy array from a dictionary with only integers as keys and
NumPy arrays as values. Dimension 0 of the resulting array is formed from
data.keys(). Missing values in keys can be filled up with np.nan (default)
or ignored.
Parameters
----------
data : dict
a dictionary with integers as keys and array-likes of the same shape
as values
fill : boolean
flag specifying if the resulting matrix will keep a correspondence
between dictionary keys and matrix indices by filling up missing keys
with matrices of NaNs. Defaults to True
Returns
-------
numpy array with one more dimension than the values of the input dict
"""
if any([type(k) != int for k in list(data.keys())]):
raise RuntimeError("Dictionary cannot be converted to matrix, " +
"not all keys are ints")
base_shape = np.array(list(data.values())[0]).shape
result_shape = list(base_shape)
if fill:
result_shape.insert(0, max(data.keys()) + 1)
else:
result_shape.insert(0, len(list(data.keys())))
result = np.empty(result_shape) + np.nan
for (i, (k, v)) in enumerate(data.items()):
v = np.array(v)
if v.shape != base_shape:
raise RuntimeError("Dictionary cannot be converted to matrix, " +
"not all values have same dimensions")
result[fill and [k][0] or [i][0]] = v
return result
|
[
"def",
"dict_2_mat",
"(",
"data",
",",
"fill",
"=",
"True",
")",
":",
"if",
"any",
"(",
"[",
"type",
"(",
"k",
")",
"!=",
"int",
"for",
"k",
"in",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"]",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Dictionary cannot be converted to matrix, \"",
"+",
"\"not all keys are ints\"",
")",
"base_shape",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"data",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
".",
"shape",
"result_shape",
"=",
"list",
"(",
"base_shape",
")",
"if",
"fill",
":",
"result_shape",
".",
"insert",
"(",
"0",
",",
"max",
"(",
"data",
".",
"keys",
"(",
")",
")",
"+",
"1",
")",
"else",
":",
"result_shape",
".",
"insert",
"(",
"0",
",",
"len",
"(",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
")",
")",
"result",
"=",
"np",
".",
"empty",
"(",
"result_shape",
")",
"+",
"np",
".",
"nan",
"for",
"(",
"i",
",",
"(",
"k",
",",
"v",
")",
")",
"in",
"enumerate",
"(",
"data",
".",
"items",
"(",
")",
")",
":",
"v",
"=",
"np",
".",
"array",
"(",
"v",
")",
"if",
"v",
".",
"shape",
"!=",
"base_shape",
":",
"raise",
"RuntimeError",
"(",
"\"Dictionary cannot be converted to matrix, \"",
"+",
"\"not all values have same dimensions\"",
")",
"result",
"[",
"fill",
"and",
"[",
"k",
"]",
"[",
"0",
"]",
"or",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
"=",
"v",
"return",
"result"
] |
Creates a NumPy array from a dictionary with only integers as keys and
NumPy arrays as values. Dimension 0 of the resulting array is formed from
data.keys(). Missing values in keys can be filled up with np.nan (default)
or ignored.
Parameters
----------
data : dict
a dictionary with integers as keys and array-likes of the same shape
as values
fill : boolean
flag specifying if the resulting matrix will keep a correspondence
between dictionary keys and matrix indices by filling up missing keys
with matrices of NaNs. Defaults to True
Returns
-------
numpy array with one more dimension than the values of the input dict
|
[
"Creates",
"a",
"NumPy",
"array",
"from",
"a",
"dictionary",
"with",
"only",
"integers",
"as",
"keys",
"and",
"NumPy",
"arrays",
"as",
"values",
".",
"Dimension",
"0",
"of",
"the",
"resulting",
"array",
"is",
"formed",
"from",
"data",
".",
"keys",
"()",
".",
"Missing",
"values",
"in",
"keys",
"can",
"be",
"filled",
"up",
"with",
"np",
".",
"nan",
"(",
"default",
")",
"or",
"ignored",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L90-L128
|
nwilming/ocupy
|
ocupy/utils.py
|
dict_fun
|
def dict_fun(data, function):
"""
Apply a function to all values in a dictionary, return a dictionary with
results.
Parameters
----------
data : dict
a dictionary whose values are adequate input to the second argument
of this function.
function : function
a function that takes one argument
Returns
-------
a dictionary with the same keys as data, such that
result[key] = function(data[key])
"""
return dict((k, function(v)) for k, v in list(data.items()))
|
python
|
def dict_fun(data, function):
"""
Apply a function to all values in a dictionary, return a dictionary with
results.
Parameters
----------
data : dict
a dictionary whose values are adequate input to the second argument
of this function.
function : function
a function that takes one argument
Returns
-------
a dictionary with the same keys as data, such that
result[key] = function(data[key])
"""
return dict((k, function(v)) for k, v in list(data.items()))
|
[
"def",
"dict_fun",
"(",
"data",
",",
"function",
")",
":",
"return",
"dict",
"(",
"(",
"k",
",",
"function",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"data",
".",
"items",
"(",
")",
")",
")"
] |
Apply a function to all values in a dictionary, return a dictionary with
results.
Parameters
----------
data : dict
a dictionary whose values are adequate input to the second argument
of this function.
function : function
a function that takes one argument
Returns
-------
a dictionary with the same keys as data, such that
result[key] = function(data[key])
|
[
"Apply",
"a",
"function",
"to",
"all",
"values",
"in",
"a",
"dictionary",
"return",
"a",
"dictionary",
"with",
"results",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L130-L148
|
nwilming/ocupy
|
ocupy/utils.py
|
snip_string_middle
|
def snip_string_middle(string, max_len=20, snip_string='...'):
"""
>>> snip_string_middle('this is long', 8)
'th...ong'
>>> snip_string_middle('this is long', 12)
'this is long'
>>> snip_string_middle('this is long', 8, '~')
'thi~long'
"""
#warn('use snip_string() instead', DeprecationWarning)
if len(string) <= max_len:
new_string = string
else:
visible_len = (max_len - len(snip_string))
start_len = visible_len//2
end_len = visible_len-start_len
new_string = string[0:start_len]+ snip_string + string[-end_len:]
return new_string
|
python
|
def snip_string_middle(string, max_len=20, snip_string='...'):
"""
>>> snip_string_middle('this is long', 8)
'th...ong'
>>> snip_string_middle('this is long', 12)
'this is long'
>>> snip_string_middle('this is long', 8, '~')
'thi~long'
"""
#warn('use snip_string() instead', DeprecationWarning)
if len(string) <= max_len:
new_string = string
else:
visible_len = (max_len - len(snip_string))
start_len = visible_len//2
end_len = visible_len-start_len
new_string = string[0:start_len]+ snip_string + string[-end_len:]
return new_string
|
[
"def",
"snip_string_middle",
"(",
"string",
",",
"max_len",
"=",
"20",
",",
"snip_string",
"=",
"'...'",
")",
":",
"#warn('use snip_string() instead', DeprecationWarning)",
"if",
"len",
"(",
"string",
")",
"<=",
"max_len",
":",
"new_string",
"=",
"string",
"else",
":",
"visible_len",
"=",
"(",
"max_len",
"-",
"len",
"(",
"snip_string",
")",
")",
"start_len",
"=",
"visible_len",
"//",
"2",
"end_len",
"=",
"visible_len",
"-",
"start_len",
"new_string",
"=",
"string",
"[",
"0",
":",
"start_len",
"]",
"+",
"snip_string",
"+",
"string",
"[",
"-",
"end_len",
":",
"]",
"return",
"new_string"
] |
>>> snip_string_middle('this is long', 8)
'th...ong'
>>> snip_string_middle('this is long', 12)
'this is long'
>>> snip_string_middle('this is long', 8, '~')
'thi~long'
|
[
">>>",
"snip_string_middle",
"(",
"this",
"is",
"long",
"8",
")",
"th",
"...",
"ong",
">>>",
"snip_string_middle",
"(",
"this",
"is",
"long",
"12",
")",
"this",
"is",
"long",
">>>",
"snip_string_middle",
"(",
"this",
"is",
"long",
"8",
"~",
")",
"thi~long"
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L150-L171
|
nwilming/ocupy
|
ocupy/utils.py
|
snip_string
|
def snip_string(string, max_len=20, snip_string='...', snip_point=0.5):
"""
Snips a string so that it is no longer than max_len, replacing deleted
characters with the snip_string.
The snip is done at snip_point, which is a fraction between 0 and 1,
indicating relatively where along the string to snip. snip_point of
0.5 would be the middle.
>>> snip_string('this is long', 8)
'this ...'
>>> snip_string('this is long', 8, snip_point=0.5)
'th...ong'
>>> snip_string('this is long', 12)
'this is long'
>>> snip_string('this is long', 8, '~')
'this is~'
>>> snip_string('this is long', 8, '~', 0.5)
'thi~long'
"""
if len(string) <= max_len:
new_string = string
else:
visible_len = (max_len - len(snip_string))
start_len = int(visible_len*snip_point)
end_len = visible_len-start_len
new_string = string[0:start_len]+ snip_string
if end_len > 0:
new_string += string[-end_len:]
return new_string
|
python
|
def snip_string(string, max_len=20, snip_string='...', snip_point=0.5):
"""
Snips a string so that it is no longer than max_len, replacing deleted
characters with the snip_string.
The snip is done at snip_point, which is a fraction between 0 and 1,
indicating relatively where along the string to snip. snip_point of
0.5 would be the middle.
>>> snip_string('this is long', 8)
'this ...'
>>> snip_string('this is long', 8, snip_point=0.5)
'th...ong'
>>> snip_string('this is long', 12)
'this is long'
>>> snip_string('this is long', 8, '~')
'this is~'
>>> snip_string('this is long', 8, '~', 0.5)
'thi~long'
"""
if len(string) <= max_len:
new_string = string
else:
visible_len = (max_len - len(snip_string))
start_len = int(visible_len*snip_point)
end_len = visible_len-start_len
new_string = string[0:start_len]+ snip_string
if end_len > 0:
new_string += string[-end_len:]
return new_string
|
[
"def",
"snip_string",
"(",
"string",
",",
"max_len",
"=",
"20",
",",
"snip_string",
"=",
"'...'",
",",
"snip_point",
"=",
"0.5",
")",
":",
"if",
"len",
"(",
"string",
")",
"<=",
"max_len",
":",
"new_string",
"=",
"string",
"else",
":",
"visible_len",
"=",
"(",
"max_len",
"-",
"len",
"(",
"snip_string",
")",
")",
"start_len",
"=",
"int",
"(",
"visible_len",
"*",
"snip_point",
")",
"end_len",
"=",
"visible_len",
"-",
"start_len",
"new_string",
"=",
"string",
"[",
"0",
":",
"start_len",
"]",
"+",
"snip_string",
"if",
"end_len",
">",
"0",
":",
"new_string",
"+=",
"string",
"[",
"-",
"end_len",
":",
"]",
"return",
"new_string"
] |
Snips a string so that it is no longer than max_len, replacing deleted
characters with the snip_string.
The snip is done at snip_point, which is a fraction between 0 and 1,
indicating relatively where along the string to snip. snip_point of
0.5 would be the middle.
>>> snip_string('this is long', 8)
'this ...'
>>> snip_string('this is long', 8, snip_point=0.5)
'th...ong'
>>> snip_string('this is long', 12)
'this is long'
>>> snip_string('this is long', 8, '~')
'this is~'
>>> snip_string('this is long', 8, '~', 0.5)
'thi~long'
|
[
"Snips",
"a",
"string",
"so",
"that",
"it",
"is",
"no",
"longer",
"than",
"max_len",
"replacing",
"deleted",
"characters",
"with",
"the",
"snip_string",
".",
"The",
"snip",
"is",
"done",
"at",
"snip_point",
"which",
"is",
"a",
"fraction",
"between",
"0",
"and",
"1",
"indicating",
"relatively",
"where",
"along",
"the",
"string",
"to",
"snip",
".",
"snip_point",
"of",
"0",
".",
"5",
"would",
"be",
"the",
"middle",
".",
">>>",
"snip_string",
"(",
"this",
"is",
"long",
"8",
")",
"this",
"...",
">>>",
"snip_string",
"(",
"this",
"is",
"long",
"8",
"snip_point",
"=",
"0",
".",
"5",
")",
"th",
"...",
"ong",
">>>",
"snip_string",
"(",
"this",
"is",
"long",
"12",
")",
"this",
"is",
"long",
">>>",
"snip_string",
"(",
"this",
"is",
"long",
"8",
"~",
")",
"this",
"is~",
">>>",
"snip_string",
"(",
"this",
"is",
"long",
"8",
"~",
"0",
".",
"5",
")",
"thi~long"
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L173-L203
|
nwilming/ocupy
|
ocupy/utils.py
|
find_common_beginning
|
def find_common_beginning(string_list, boundary_char = None):
"""Given a list of strings, finds finds the longest string that is common
to the *beginning* of all strings in the list.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
"""
common=''
# by definition there is nothing common to 1 item...
if len(string_list) > 1:
shortestLen = min([len(el) for el in string_list])
for idx in range(shortestLen):
chars = [s[idx] for s in string_list]
if chars.count(chars[0]) != len(chars): # test if any chars differ
break
common+=chars[0]
if boundary_char is not None:
try:
end_idx = common.rindex(boundary_char)
common = common[0:end_idx+1]
except ValueError:
common = ''
return common
|
python
|
def find_common_beginning(string_list, boundary_char = None):
"""Given a list of strings, finds finds the longest string that is common
to the *beginning* of all strings in the list.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
"""
common=''
# by definition there is nothing common to 1 item...
if len(string_list) > 1:
shortestLen = min([len(el) for el in string_list])
for idx in range(shortestLen):
chars = [s[idx] for s in string_list]
if chars.count(chars[0]) != len(chars): # test if any chars differ
break
common+=chars[0]
if boundary_char is not None:
try:
end_idx = common.rindex(boundary_char)
common = common[0:end_idx+1]
except ValueError:
common = ''
return common
|
[
"def",
"find_common_beginning",
"(",
"string_list",
",",
"boundary_char",
"=",
"None",
")",
":",
"common",
"=",
"''",
"# by definition there is nothing common to 1 item...",
"if",
"len",
"(",
"string_list",
")",
">",
"1",
":",
"shortestLen",
"=",
"min",
"(",
"[",
"len",
"(",
"el",
")",
"for",
"el",
"in",
"string_list",
"]",
")",
"for",
"idx",
"in",
"range",
"(",
"shortestLen",
")",
":",
"chars",
"=",
"[",
"s",
"[",
"idx",
"]",
"for",
"s",
"in",
"string_list",
"]",
"if",
"chars",
".",
"count",
"(",
"chars",
"[",
"0",
"]",
")",
"!=",
"len",
"(",
"chars",
")",
":",
"# test if any chars differ",
"break",
"common",
"+=",
"chars",
"[",
"0",
"]",
"if",
"boundary_char",
"is",
"not",
"None",
":",
"try",
":",
"end_idx",
"=",
"common",
".",
"rindex",
"(",
"boundary_char",
")",
"common",
"=",
"common",
"[",
"0",
":",
"end_idx",
"+",
"1",
"]",
"except",
"ValueError",
":",
"common",
"=",
"''",
"return",
"common"
] |
Given a list of strings, finds finds the longest string that is common
to the *beginning* of all strings in the list.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
|
[
"Given",
"a",
"list",
"of",
"strings",
"finds",
"finds",
"the",
"longest",
"string",
"that",
"is",
"common",
"to",
"the",
"*",
"beginning",
"*",
"of",
"all",
"strings",
"in",
"the",
"list",
".",
"boundary_char",
"defines",
"a",
"boundary",
"that",
"must",
"be",
"preserved",
"so",
"that",
"the",
"common",
"string",
"removed",
"must",
"end",
"with",
"this",
"char",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L205-L233
|
nwilming/ocupy
|
ocupy/utils.py
|
factorise_strings
|
def factorise_strings (string_list, boundary_char=None):
"""Given a list of strings, finds the longest string that is common
to the *beginning* of all strings in the list and
returns a new list whose elements lack this common beginning.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
>>> cmn='something/to/begin with?'
>>> blah=[cmn+'yes',cmn+'no',cmn+'?maybe']
>>> (blee, bleecmn) = factorise_strings(blah)
>>> blee
['yes', 'no', '?maybe']
>>> bleecmn == cmn
True
>>> blah = ['de.uos.nbp.senhance', 'de.uos.nbp.heartFelt']
>>> (blee, bleecmn) = factorise_strings(blah, '.')
>>> blee
['senhance', 'heartFelt']
>>> bleecmn
'de.uos.nbp.'
>>> blah = ['/some/deep/dir/subdir', '/some/deep/other/dir', '/some/deep/other/dir2']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> blee
['dir/subdir', 'other/dir', 'other/dir2']
>>> bleecmn
'/some/deep/'
>>> blah = ['/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p20/2012-01-27T09.01.14-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p21/2012-01-27T11.03.08-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p23/2012-01-31T12.02.55-ecg.csv']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> bleecmn
'/net/store/nbp/heartFelt/data/ecg/emotive_interoception/'
rmuil 2012/02/01
"""
cmn = find_common_beginning(string_list, boundary_char)
new_list = [el[len(cmn):] for el in string_list]
return (new_list, cmn)
|
python
|
def factorise_strings (string_list, boundary_char=None):
"""Given a list of strings, finds the longest string that is common
to the *beginning* of all strings in the list and
returns a new list whose elements lack this common beginning.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
>>> cmn='something/to/begin with?'
>>> blah=[cmn+'yes',cmn+'no',cmn+'?maybe']
>>> (blee, bleecmn) = factorise_strings(blah)
>>> blee
['yes', 'no', '?maybe']
>>> bleecmn == cmn
True
>>> blah = ['de.uos.nbp.senhance', 'de.uos.nbp.heartFelt']
>>> (blee, bleecmn) = factorise_strings(blah, '.')
>>> blee
['senhance', 'heartFelt']
>>> bleecmn
'de.uos.nbp.'
>>> blah = ['/some/deep/dir/subdir', '/some/deep/other/dir', '/some/deep/other/dir2']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> blee
['dir/subdir', 'other/dir', 'other/dir2']
>>> bleecmn
'/some/deep/'
>>> blah = ['/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p20/2012-01-27T09.01.14-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p21/2012-01-27T11.03.08-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p23/2012-01-31T12.02.55-ecg.csv']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> bleecmn
'/net/store/nbp/heartFelt/data/ecg/emotive_interoception/'
rmuil 2012/02/01
"""
cmn = find_common_beginning(string_list, boundary_char)
new_list = [el[len(cmn):] for el in string_list]
return (new_list, cmn)
|
[
"def",
"factorise_strings",
"(",
"string_list",
",",
"boundary_char",
"=",
"None",
")",
":",
"cmn",
"=",
"find_common_beginning",
"(",
"string_list",
",",
"boundary_char",
")",
"new_list",
"=",
"[",
"el",
"[",
"len",
"(",
"cmn",
")",
":",
"]",
"for",
"el",
"in",
"string_list",
"]",
"return",
"(",
"new_list",
",",
"cmn",
")"
] |
Given a list of strings, finds the longest string that is common
to the *beginning* of all strings in the list and
returns a new list whose elements lack this common beginning.
boundary_char defines a boundary that must be preserved, so that the
common string removed must end with this char.
>>> cmn='something/to/begin with?'
>>> blah=[cmn+'yes',cmn+'no',cmn+'?maybe']
>>> (blee, bleecmn) = factorise_strings(blah)
>>> blee
['yes', 'no', '?maybe']
>>> bleecmn == cmn
True
>>> blah = ['de.uos.nbp.senhance', 'de.uos.nbp.heartFelt']
>>> (blee, bleecmn) = factorise_strings(blah, '.')
>>> blee
['senhance', 'heartFelt']
>>> bleecmn
'de.uos.nbp.'
>>> blah = ['/some/deep/dir/subdir', '/some/deep/other/dir', '/some/deep/other/dir2']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> blee
['dir/subdir', 'other/dir', 'other/dir2']
>>> bleecmn
'/some/deep/'
>>> blah = ['/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p20/2012-01-27T09.01.14-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p21/2012-01-27T11.03.08-ecg.csv', '/net/store/nbp/heartFelt/data/ecg/emotive_interoception/p23/2012-01-31T12.02.55-ecg.csv']
>>> (blee, bleecmn) = factorise_strings(blah, '/')
>>> bleecmn
'/net/store/nbp/heartFelt/data/ecg/emotive_interoception/'
rmuil 2012/02/01
|
[
"Given",
"a",
"list",
"of",
"strings",
"finds",
"the",
"longest",
"string",
"that",
"is",
"common",
"to",
"the",
"*",
"beginning",
"*",
"of",
"all",
"strings",
"in",
"the",
"list",
"and",
"returns",
"a",
"new",
"list",
"whose",
"elements",
"lack",
"this",
"common",
"beginning",
".",
"boundary_char",
"defines",
"a",
"boundary",
"that",
"must",
"be",
"preserved",
"so",
"that",
"the",
"common",
"string",
"removed",
"must",
"end",
"with",
"this",
"char",
".",
">>>",
"cmn",
"=",
"something",
"/",
"to",
"/",
"begin",
"with?",
">>>",
"blah",
"=",
"[",
"cmn",
"+",
"yes",
"cmn",
"+",
"no",
"cmn",
"+",
"?maybe",
"]",
">>>",
"(",
"blee",
"bleecmn",
")",
"=",
"factorise_strings",
"(",
"blah",
")",
">>>",
"blee",
"[",
"yes",
"no",
"?maybe",
"]",
">>>",
"bleecmn",
"==",
"cmn",
"True",
">>>",
"blah",
"=",
"[",
"de",
".",
"uos",
".",
"nbp",
".",
"senhance",
"de",
".",
"uos",
".",
"nbp",
".",
"heartFelt",
"]",
">>>",
"(",
"blee",
"bleecmn",
")",
"=",
"factorise_strings",
"(",
"blah",
".",
")",
">>>",
"blee",
"[",
"senhance",
"heartFelt",
"]",
">>>",
"bleecmn",
"de",
".",
"uos",
".",
"nbp",
".",
">>>",
"blah",
"=",
"[",
"/",
"some",
"/",
"deep",
"/",
"dir",
"/",
"subdir",
"/",
"some",
"/",
"deep",
"/",
"other",
"/",
"dir",
"/",
"some",
"/",
"deep",
"/",
"other",
"/",
"dir2",
"]",
">>>",
"(",
"blee",
"bleecmn",
")",
"=",
"factorise_strings",
"(",
"blah",
"/",
")",
">>>",
"blee",
"[",
"dir",
"/",
"subdir",
"other",
"/",
"dir",
"other",
"/",
"dir2",
"]",
">>>",
"bleecmn",
"/",
"some",
"/",
"deep",
"/",
">>>",
"blah",
"=",
"[",
"/",
"net",
"/",
"store",
"/",
"nbp",
"/",
"heartFelt",
"/",
"data",
"/",
"ecg",
"/",
"emotive_interoception",
"/",
"p20",
"/",
"2012",
"-",
"01",
"-",
"27T09",
".",
"01",
".",
"14",
"-",
"ecg",
".",
"csv",
"/",
"net",
"/",
"store",
"/",
"nbp",
"/",
"heartFelt",
"/",
"data",
"/",
"ecg",
"/",
"emotive_interoception",
"/",
"p21",
"/",
"2012",
"-",
"01",
"-",
"27T11",
".",
"03",
".",
"08",
"-",
"ecg",
".",
"csv",
"/",
"net",
"/",
"store",
"/",
"nbp",
"/",
"heartFelt",
"/",
"data",
"/",
"ecg",
"/",
"emotive_interoception",
"/",
"p23",
"/",
"2012",
"-",
"01",
"-",
"31T12",
".",
"02",
".",
"55",
"-",
"ecg",
".",
"csv",
"]",
">>>",
"(",
"blee",
"bleecmn",
")",
"=",
"factorise_strings",
"(",
"blah",
"/",
")",
">>>",
"bleecmn",
"/",
"net",
"/",
"store",
"/",
"nbp",
"/",
"heartFelt",
"/",
"data",
"/",
"ecg",
"/",
"emotive_interoception",
"/",
"rmuil",
"2012",
"/",
"02",
"/",
"01"
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L235-L277
|
nwilming/ocupy
|
ocupy/datamat.py
|
load
|
def load(path, variable='Datamat'):
"""
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
"""
f = h5py.File(path,'r')
try:
dm = fromhdf5(f[variable])
finally:
f.close()
return dm
|
python
|
def load(path, variable='Datamat'):
"""
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
"""
f = h5py.File(path,'r')
try:
dm = fromhdf5(f[variable])
finally:
f.close()
return dm
|
[
"def",
"load",
"(",
"path",
",",
"variable",
"=",
"'Datamat'",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"'r'",
")",
"try",
":",
"dm",
"=",
"fromhdf5",
"(",
"f",
"[",
"variable",
"]",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"return",
"dm"
] |
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
|
[
"Load",
"datamat",
"at",
"path",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L528-L541
|
nwilming/ocupy
|
ocupy/datamat.py
|
VectorFactory
|
def VectorFactory(fields, parameters, categories = None):
'''
Creates a datamat from a dictionary that contains lists/arrays as values.
Input:
fields: Dictionary
The values will be used as fields of the datamat and the keys
as field names.
parameters: Dictionary
A dictionary whose values are added as parameters. Keys are used
for parameter names.
'''
fm = Datamat(categories = categories)
fm._fields = list(fields.keys())
for (field, value) in list(fields.items()):
try:
fm.__dict__[field] = np.asarray(value)
except ValueError:
fm.__dict__[field] = np.asarray(value, dtype=np.object)
fm._parameters = parameters
for (field, value) in list(parameters.items()):
fm.__dict__[field] = value
fm._num_fix = len(fm.__dict__[list(fields.keys())[0]])
return fm
|
python
|
def VectorFactory(fields, parameters, categories = None):
'''
Creates a datamat from a dictionary that contains lists/arrays as values.
Input:
fields: Dictionary
The values will be used as fields of the datamat and the keys
as field names.
parameters: Dictionary
A dictionary whose values are added as parameters. Keys are used
for parameter names.
'''
fm = Datamat(categories = categories)
fm._fields = list(fields.keys())
for (field, value) in list(fields.items()):
try:
fm.__dict__[field] = np.asarray(value)
except ValueError:
fm.__dict__[field] = np.asarray(value, dtype=np.object)
fm._parameters = parameters
for (field, value) in list(parameters.items()):
fm.__dict__[field] = value
fm._num_fix = len(fm.__dict__[list(fields.keys())[0]])
return fm
|
[
"def",
"VectorFactory",
"(",
"fields",
",",
"parameters",
",",
"categories",
"=",
"None",
")",
":",
"fm",
"=",
"Datamat",
"(",
"categories",
"=",
"categories",
")",
"fm",
".",
"_fields",
"=",
"list",
"(",
"fields",
".",
"keys",
"(",
")",
")",
"for",
"(",
"field",
",",
"value",
")",
"in",
"list",
"(",
"fields",
".",
"items",
"(",
")",
")",
":",
"try",
":",
"fm",
".",
"__dict__",
"[",
"field",
"]",
"=",
"np",
".",
"asarray",
"(",
"value",
")",
"except",
"ValueError",
":",
"fm",
".",
"__dict__",
"[",
"field",
"]",
"=",
"np",
".",
"asarray",
"(",
"value",
",",
"dtype",
"=",
"np",
".",
"object",
")",
"fm",
".",
"_parameters",
"=",
"parameters",
"for",
"(",
"field",
",",
"value",
")",
"in",
"list",
"(",
"parameters",
".",
"items",
"(",
")",
")",
":",
"fm",
".",
"__dict__",
"[",
"field",
"]",
"=",
"value",
"fm",
".",
"_num_fix",
"=",
"len",
"(",
"fm",
".",
"__dict__",
"[",
"list",
"(",
"fields",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
")",
"return",
"fm"
] |
Creates a datamat from a dictionary that contains lists/arrays as values.
Input:
fields: Dictionary
The values will be used as fields of the datamat and the keys
as field names.
parameters: Dictionary
A dictionary whose values are added as parameters. Keys are used
for parameter names.
|
[
"Creates",
"a",
"datamat",
"from",
"a",
"dictionary",
"that",
"contains",
"lists",
"/",
"arrays",
"as",
"values",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L560-L584
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.filter
|
def filter(self, index): #@ReservedAssignment
"""
Filters a datamat by different aspects.
This function is a device to filter the datamat by certain logical
conditions. It takes as input a logical array (contains only True
or False for every datapoint) and kicks out all datapoints for which
the array says False. The logical array can conveniently be created
with numpy::
>>> print np.unique(fm.category)
np.array([2,9])
>>> fm_filtered = fm[ fm.category == 9 ]
>>> print np.unique(fm_filtered)
np.array([9])
Parameters:
index : array
Array-like that contains True for every element that
passes the filter; else contains False
Returns:
datamat : Datamat Instance
"""
return Datamat(categories=self._categories, datamat=self, index=index)
|
python
|
def filter(self, index): #@ReservedAssignment
"""
Filters a datamat by different aspects.
This function is a device to filter the datamat by certain logical
conditions. It takes as input a logical array (contains only True
or False for every datapoint) and kicks out all datapoints for which
the array says False. The logical array can conveniently be created
with numpy::
>>> print np.unique(fm.category)
np.array([2,9])
>>> fm_filtered = fm[ fm.category == 9 ]
>>> print np.unique(fm_filtered)
np.array([9])
Parameters:
index : array
Array-like that contains True for every element that
passes the filter; else contains False
Returns:
datamat : Datamat Instance
"""
return Datamat(categories=self._categories, datamat=self, index=index)
|
[
"def",
"filter",
"(",
"self",
",",
"index",
")",
":",
"#@ReservedAssignment",
"return",
"Datamat",
"(",
"categories",
"=",
"self",
".",
"_categories",
",",
"datamat",
"=",
"self",
",",
"index",
"=",
"index",
")"
] |
Filters a datamat by different aspects.
This function is a device to filter the datamat by certain logical
conditions. It takes as input a logical array (contains only True
or False for every datapoint) and kicks out all datapoints for which
the array says False. The logical array can conveniently be created
with numpy::
>>> print np.unique(fm.category)
np.array([2,9])
>>> fm_filtered = fm[ fm.category == 9 ]
>>> print np.unique(fm_filtered)
np.array([9])
Parameters:
index : array
Array-like that contains True for every element that
passes the filter; else contains False
Returns:
datamat : Datamat Instance
|
[
"Filters",
"a",
"datamat",
"by",
"different",
"aspects",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L140-L163
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.copy
|
def copy(self):
"""
Returns a copy of the datamat.
"""
return self.filter(np.ones(self._num_fix).astype(bool))
|
python
|
def copy(self):
"""
Returns a copy of the datamat.
"""
return self.filter(np.ones(self._num_fix).astype(bool))
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"self",
".",
"filter",
"(",
"np",
".",
"ones",
"(",
"self",
".",
"_num_fix",
")",
".",
"astype",
"(",
"bool",
")",
")"
] |
Returns a copy of the datamat.
|
[
"Returns",
"a",
"copy",
"of",
"the",
"datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L165-L169
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.save
|
def save(self, path):
"""
Saves Datamat to path.
Parameters:
path : string
Absolute path of the file to save to.
"""
f = h5py.File(path, 'w')
try:
fm_group = f.create_group('Datamat')
for field in self.fieldnames():
try:
fm_group.create_dataset(field, data = self.__dict__[field])
except (TypeError,) as e:
# Assuming field is an object array that contains dicts which
# contain numpy arrays as values
sub_group = fm_group.create_group(field)
for i, d in enumerate(self.__dict__[field]):
index_group = sub_group.create_group(str(i))
print((field, d))
for key, value in list(d.items()):
index_group.create_dataset(key, data=value)
for param in self.parameters():
fm_group.attrs[param]=self.__dict__[param]
finally:
f.close()
|
python
|
def save(self, path):
"""
Saves Datamat to path.
Parameters:
path : string
Absolute path of the file to save to.
"""
f = h5py.File(path, 'w')
try:
fm_group = f.create_group('Datamat')
for field in self.fieldnames():
try:
fm_group.create_dataset(field, data = self.__dict__[field])
except (TypeError,) as e:
# Assuming field is an object array that contains dicts which
# contain numpy arrays as values
sub_group = fm_group.create_group(field)
for i, d in enumerate(self.__dict__[field]):
index_group = sub_group.create_group(str(i))
print((field, d))
for key, value in list(d.items()):
index_group.create_dataset(key, data=value)
for param in self.parameters():
fm_group.attrs[param]=self.__dict__[param]
finally:
f.close()
|
[
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"'w'",
")",
"try",
":",
"fm_group",
"=",
"f",
".",
"create_group",
"(",
"'Datamat'",
")",
"for",
"field",
"in",
"self",
".",
"fieldnames",
"(",
")",
":",
"try",
":",
"fm_group",
".",
"create_dataset",
"(",
"field",
",",
"data",
"=",
"self",
".",
"__dict__",
"[",
"field",
"]",
")",
"except",
"(",
"TypeError",
",",
")",
"as",
"e",
":",
"# Assuming field is an object array that contains dicts which",
"# contain numpy arrays as values",
"sub_group",
"=",
"fm_group",
".",
"create_group",
"(",
"field",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"self",
".",
"__dict__",
"[",
"field",
"]",
")",
":",
"index_group",
"=",
"sub_group",
".",
"create_group",
"(",
"str",
"(",
"i",
")",
")",
"print",
"(",
"(",
"field",
",",
"d",
")",
")",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
":",
"index_group",
".",
"create_dataset",
"(",
"key",
",",
"data",
"=",
"value",
")",
"for",
"param",
"in",
"self",
".",
"parameters",
"(",
")",
":",
"fm_group",
".",
"attrs",
"[",
"param",
"]",
"=",
"self",
".",
"__dict__",
"[",
"param",
"]",
"finally",
":",
"f",
".",
"close",
"(",
")"
] |
Saves Datamat to path.
Parameters:
path : string
Absolute path of the file to save to.
|
[
"Saves",
"Datamat",
"to",
"path",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L190-L217
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.set_param
|
def set_param(self, key, value):
"""
Set the value of a parameter.
"""
self.__dict__[key] = value
self._parameters[key] = value
|
python
|
def set_param(self, key, value):
"""
Set the value of a parameter.
"""
self.__dict__[key] = value
self._parameters[key] = value
|
[
"def",
"set_param",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"__dict__",
"[",
"key",
"]",
"=",
"value",
"self",
".",
"_parameters",
"[",
"key",
"]",
"=",
"value"
] |
Set the value of a parameter.
|
[
"Set",
"the",
"value",
"of",
"a",
"parameter",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L234-L239
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.by_field
|
def by_field(self, field):
"""
Returns an iterator that iterates over unique values of field
Parameters:
field : string
Filters the datamat for every unique value in field and yields
the filtered datamat.
Returns:
datamat : Datamat that is filtered according to one of the unique
values in 'field'.
"""
for value in np.unique(self.__dict__[field]):
yield self.filter(self.__dict__[field] == value)
|
python
|
def by_field(self, field):
"""
Returns an iterator that iterates over unique values of field
Parameters:
field : string
Filters the datamat for every unique value in field and yields
the filtered datamat.
Returns:
datamat : Datamat that is filtered according to one of the unique
values in 'field'.
"""
for value in np.unique(self.__dict__[field]):
yield self.filter(self.__dict__[field] == value)
|
[
"def",
"by_field",
"(",
"self",
",",
"field",
")",
":",
"for",
"value",
"in",
"np",
".",
"unique",
"(",
"self",
".",
"__dict__",
"[",
"field",
"]",
")",
":",
"yield",
"self",
".",
"filter",
"(",
"self",
".",
"__dict__",
"[",
"field",
"]",
"==",
"value",
")"
] |
Returns an iterator that iterates over unique values of field
Parameters:
field : string
Filters the datamat for every unique value in field and yields
the filtered datamat.
Returns:
datamat : Datamat that is filtered according to one of the unique
values in 'field'.
|
[
"Returns",
"an",
"iterator",
"that",
"iterates",
"over",
"unique",
"values",
"of",
"field"
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L241-L254
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.by_cat
|
def by_cat(self):
"""
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
"""
for value in np.unique(self.category):
cat_fm = self.filter(self.category == value)
if self._categories:
yield (cat_fm, self._categories[value])
else:
yield (cat_fm, None)
|
python
|
def by_cat(self):
"""
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
"""
for value in np.unique(self.category):
cat_fm = self.filter(self.category == value)
if self._categories:
yield (cat_fm, self._categories[value])
else:
yield (cat_fm, None)
|
[
"def",
"by_cat",
"(",
"self",
")",
":",
"for",
"value",
"in",
"np",
".",
"unique",
"(",
"self",
".",
"category",
")",
":",
"cat_fm",
"=",
"self",
".",
"filter",
"(",
"self",
".",
"category",
"==",
"value",
")",
"if",
"self",
".",
"_categories",
":",
"yield",
"(",
"cat_fm",
",",
"self",
".",
"_categories",
"[",
"value",
"]",
")",
"else",
":",
"yield",
"(",
"cat_fm",
",",
"None",
")"
] |
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
|
[
"Iterates",
"over",
"categories",
"and",
"returns",
"a",
"filtered",
"datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L256-L273
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.by_filenumber
|
def by_filenumber(self):
"""
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
"""
for value in np.unique(self.filenumber):
file_fm = self.filter(self.filenumber == value)
if self._categories:
yield (file_fm, self._categories[self.category[0]][value])
else:
yield (file_fm, None)
|
python
|
def by_filenumber(self):
"""
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
"""
for value in np.unique(self.filenumber):
file_fm = self.filter(self.filenumber == value)
if self._categories:
yield (file_fm, self._categories[self.category[0]][value])
else:
yield (file_fm, None)
|
[
"def",
"by_filenumber",
"(",
"self",
")",
":",
"for",
"value",
"in",
"np",
".",
"unique",
"(",
"self",
".",
"filenumber",
")",
":",
"file_fm",
"=",
"self",
".",
"filter",
"(",
"self",
".",
"filenumber",
"==",
"value",
")",
"if",
"self",
".",
"_categories",
":",
"yield",
"(",
"file_fm",
",",
"self",
".",
"_categories",
"[",
"self",
".",
"category",
"[",
"0",
"]",
"]",
"[",
"value",
"]",
")",
"else",
":",
"yield",
"(",
"file_fm",
",",
"None",
")"
] |
Iterates over categories and returns a filtered datamat.
If a categories object is attached, the images object for the given
category is returned as well (else None is returned).
Returns:
(datamat, categories) : A tuple that contains first the filtered
datamat (has only one category) and second the associated
categories object (if it is available, None otherwise)
|
[
"Iterates",
"over",
"categories",
"and",
"returns",
"a",
"filtered",
"datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L275-L292
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.add_field
|
def add_field(self, name, data):
"""
Add a new field to the datamat.
Parameters:
name : string
Name of the new field
data : list
Data for the new field, must be same length as all other fields.
"""
if name in self._fields:
raise ValueError
if not len(data) == self._num_fix:
raise ValueError
self._fields.append(name)
self.__dict__[name] = data
|
python
|
def add_field(self, name, data):
"""
Add a new field to the datamat.
Parameters:
name : string
Name of the new field
data : list
Data for the new field, must be same length as all other fields.
"""
if name in self._fields:
raise ValueError
if not len(data) == self._num_fix:
raise ValueError
self._fields.append(name)
self.__dict__[name] = data
|
[
"def",
"add_field",
"(",
"self",
",",
"name",
",",
"data",
")",
":",
"if",
"name",
"in",
"self",
".",
"_fields",
":",
"raise",
"ValueError",
"if",
"not",
"len",
"(",
"data",
")",
"==",
"self",
".",
"_num_fix",
":",
"raise",
"ValueError",
"self",
".",
"_fields",
".",
"append",
"(",
"name",
")",
"self",
".",
"__dict__",
"[",
"name",
"]",
"=",
"data"
] |
Add a new field to the datamat.
Parameters:
name : string
Name of the new field
data : list
Data for the new field, must be same length as all other fields.
|
[
"Add",
"a",
"new",
"field",
"to",
"the",
"datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L294-L309
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.add_field_like
|
def add_field_like(self, name, like_array):
"""
Add a new field to the Datamat with the dtype of the
like_array and the shape of the like_array except for the first
dimension which will be instead the field-length of this Datamat.
"""
new_shape = list(like_array.shape)
new_shape[0] = len(self)
new_data = ma.empty(new_shape, like_array.dtype)
new_data.mask = True
self.add_field(name, new_data)
|
python
|
def add_field_like(self, name, like_array):
"""
Add a new field to the Datamat with the dtype of the
like_array and the shape of the like_array except for the first
dimension which will be instead the field-length of this Datamat.
"""
new_shape = list(like_array.shape)
new_shape[0] = len(self)
new_data = ma.empty(new_shape, like_array.dtype)
new_data.mask = True
self.add_field(name, new_data)
|
[
"def",
"add_field_like",
"(",
"self",
",",
"name",
",",
"like_array",
")",
":",
"new_shape",
"=",
"list",
"(",
"like_array",
".",
"shape",
")",
"new_shape",
"[",
"0",
"]",
"=",
"len",
"(",
"self",
")",
"new_data",
"=",
"ma",
".",
"empty",
"(",
"new_shape",
",",
"like_array",
".",
"dtype",
")",
"new_data",
".",
"mask",
"=",
"True",
"self",
".",
"add_field",
"(",
"name",
",",
"new_data",
")"
] |
Add a new field to the Datamat with the dtype of the
like_array and the shape of the like_array except for the first
dimension which will be instead the field-length of this Datamat.
|
[
"Add",
"a",
"new",
"field",
"to",
"the",
"Datamat",
"with",
"the",
"dtype",
"of",
"the",
"like_array",
"and",
"the",
"shape",
"of",
"the",
"like_array",
"except",
"for",
"the",
"first",
"dimension",
"which",
"will",
"be",
"instead",
"the",
"field",
"-",
"length",
"of",
"this",
"Datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L311-L321
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.annotate
|
def annotate (self, src_dm, data_field, key_field, take_first=True):
"""
Adds a new field (data_field) to the Datamat with data from the
corresponding field of another Datamat (src_dm).
This is accomplished through the use of a key_field, which is
used to determine how the data is copied.
This operation corresponds loosely to an SQL join operation.
The two Datamats are essentially aligned by the unique values
of key_field so that each block element of the new field of the target
Datamat will consist of those elements of src_dm's data_field
where the corresponding element in key_field matches.
If 'take_first' is not true, and there is not
only a single corresponding element (typical usage case) then the
target element value will be
a sequence (array) of all the matching elements.
The target Datamat (self) must not have a field name data_field
already, and both Datamats must have key_field.
The new field in the target Datamat will be a masked array to handle
non-existent data.
TODO: Make example more generic, remove interoceptive reference
TODO: Make standalone test
Examples:
>>> dm_intero = load_interoception_files ('test-ecg.csv', silent=True)
>>> dm_emotiv = load_emotivestimuli_files ('test-bpm.csv', silent=True)
>>> length(dm_intero)
4
>>> unique(dm_intero.subject_id)
['p05', 'p06']
>>> length(dm_emotiv)
3
>>> unique(dm_emotiv.subject_id)
['p04', 'p05', 'p06']
>>> 'interospective_awareness' in dm_intero.fieldnames()
True
>>> unique(dm_intero.interospective_awareness) == [0.5555, 0.6666]
True
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
False
>>> dm_emotiv.copy_field(dm_intero, 'interospective_awareness', 'subject_id')
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
True
>>> unique(dm_emotiv.interospective_awareness) == [NaN, 0.5555, 0.6666]
False
"""
if key_field not in self._fields or key_field not in src_dm._fields:
raise AttributeError('key field (%s) must exist in both Datamats'%(key_field))
if data_field not in src_dm._fields:
raise AttributeError('data field (%s) must exist in source Datamat' % (data_field))
if data_field in self._fields:
raise AttributeError('data field (%s) already exists in target Datamat' % (data_field))
#Create a mapping of key_field value to data value.
data_to_copy = dict([(x.field(key_field)[0], x.field(data_field)) for x in src_dm.by_field(key_field)])
data_element = list(data_to_copy.values())[0]
#Create the new data array of correct size.
# We use a masked array because it is possible that for some elements
# of the target Datamat, there exist simply no data in the source
# Datamat. NaNs are fine as indication of this for floats, but if the
# field happens to hold booleans or integers or something else, NaN
# does not work.
new_shape = [len(self)] + list(data_element.shape)
new_data = ma.empty(new_shape, data_element.dtype)
new_data.mask=True
if np.issubdtype(new_data.dtype, np.float):
new_data.fill(np.NaN) #For backwards compatibility, if mask not used
#Now we copy the data. If the data to copy contains only a single value,
# it is added to the target as a scalar (single value).
# Otherwise, it is copied as is, i.e. as a sequence.
for (key, val) in list(data_to_copy.items()):
if take_first:
new_data[self.field(key_field) == key] = val[0]
else:
new_data[self.field(key_field) == key] = val
self.add_field(data_field, new_data)
|
python
|
def annotate (self, src_dm, data_field, key_field, take_first=True):
"""
Adds a new field (data_field) to the Datamat with data from the
corresponding field of another Datamat (src_dm).
This is accomplished through the use of a key_field, which is
used to determine how the data is copied.
This operation corresponds loosely to an SQL join operation.
The two Datamats are essentially aligned by the unique values
of key_field so that each block element of the new field of the target
Datamat will consist of those elements of src_dm's data_field
where the corresponding element in key_field matches.
If 'take_first' is not true, and there is not
only a single corresponding element (typical usage case) then the
target element value will be
a sequence (array) of all the matching elements.
The target Datamat (self) must not have a field name data_field
already, and both Datamats must have key_field.
The new field in the target Datamat will be a masked array to handle
non-existent data.
TODO: Make example more generic, remove interoceptive reference
TODO: Make standalone test
Examples:
>>> dm_intero = load_interoception_files ('test-ecg.csv', silent=True)
>>> dm_emotiv = load_emotivestimuli_files ('test-bpm.csv', silent=True)
>>> length(dm_intero)
4
>>> unique(dm_intero.subject_id)
['p05', 'p06']
>>> length(dm_emotiv)
3
>>> unique(dm_emotiv.subject_id)
['p04', 'p05', 'p06']
>>> 'interospective_awareness' in dm_intero.fieldnames()
True
>>> unique(dm_intero.interospective_awareness) == [0.5555, 0.6666]
True
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
False
>>> dm_emotiv.copy_field(dm_intero, 'interospective_awareness', 'subject_id')
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
True
>>> unique(dm_emotiv.interospective_awareness) == [NaN, 0.5555, 0.6666]
False
"""
if key_field not in self._fields or key_field not in src_dm._fields:
raise AttributeError('key field (%s) must exist in both Datamats'%(key_field))
if data_field not in src_dm._fields:
raise AttributeError('data field (%s) must exist in source Datamat' % (data_field))
if data_field in self._fields:
raise AttributeError('data field (%s) already exists in target Datamat' % (data_field))
#Create a mapping of key_field value to data value.
data_to_copy = dict([(x.field(key_field)[0], x.field(data_field)) for x in src_dm.by_field(key_field)])
data_element = list(data_to_copy.values())[0]
#Create the new data array of correct size.
# We use a masked array because it is possible that for some elements
# of the target Datamat, there exist simply no data in the source
# Datamat. NaNs are fine as indication of this for floats, but if the
# field happens to hold booleans or integers or something else, NaN
# does not work.
new_shape = [len(self)] + list(data_element.shape)
new_data = ma.empty(new_shape, data_element.dtype)
new_data.mask=True
if np.issubdtype(new_data.dtype, np.float):
new_data.fill(np.NaN) #For backwards compatibility, if mask not used
#Now we copy the data. If the data to copy contains only a single value,
# it is added to the target as a scalar (single value).
# Otherwise, it is copied as is, i.e. as a sequence.
for (key, val) in list(data_to_copy.items()):
if take_first:
new_data[self.field(key_field) == key] = val[0]
else:
new_data[self.field(key_field) == key] = val
self.add_field(data_field, new_data)
|
[
"def",
"annotate",
"(",
"self",
",",
"src_dm",
",",
"data_field",
",",
"key_field",
",",
"take_first",
"=",
"True",
")",
":",
"if",
"key_field",
"not",
"in",
"self",
".",
"_fields",
"or",
"key_field",
"not",
"in",
"src_dm",
".",
"_fields",
":",
"raise",
"AttributeError",
"(",
"'key field (%s) must exist in both Datamats'",
"%",
"(",
"key_field",
")",
")",
"if",
"data_field",
"not",
"in",
"src_dm",
".",
"_fields",
":",
"raise",
"AttributeError",
"(",
"'data field (%s) must exist in source Datamat'",
"%",
"(",
"data_field",
")",
")",
"if",
"data_field",
"in",
"self",
".",
"_fields",
":",
"raise",
"AttributeError",
"(",
"'data field (%s) already exists in target Datamat'",
"%",
"(",
"data_field",
")",
")",
"#Create a mapping of key_field value to data value.",
"data_to_copy",
"=",
"dict",
"(",
"[",
"(",
"x",
".",
"field",
"(",
"key_field",
")",
"[",
"0",
"]",
",",
"x",
".",
"field",
"(",
"data_field",
")",
")",
"for",
"x",
"in",
"src_dm",
".",
"by_field",
"(",
"key_field",
")",
"]",
")",
"data_element",
"=",
"list",
"(",
"data_to_copy",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"#Create the new data array of correct size.",
"# We use a masked array because it is possible that for some elements",
"# of the target Datamat, there exist simply no data in the source",
"# Datamat. NaNs are fine as indication of this for floats, but if the",
"# field happens to hold booleans or integers or something else, NaN",
"# does not work.",
"new_shape",
"=",
"[",
"len",
"(",
"self",
")",
"]",
"+",
"list",
"(",
"data_element",
".",
"shape",
")",
"new_data",
"=",
"ma",
".",
"empty",
"(",
"new_shape",
",",
"data_element",
".",
"dtype",
")",
"new_data",
".",
"mask",
"=",
"True",
"if",
"np",
".",
"issubdtype",
"(",
"new_data",
".",
"dtype",
",",
"np",
".",
"float",
")",
":",
"new_data",
".",
"fill",
"(",
"np",
".",
"NaN",
")",
"#For backwards compatibility, if mask not used",
"#Now we copy the data. If the data to copy contains only a single value,",
"# it is added to the target as a scalar (single value).",
"# Otherwise, it is copied as is, i.e. as a sequence.",
"for",
"(",
"key",
",",
"val",
")",
"in",
"list",
"(",
"data_to_copy",
".",
"items",
"(",
")",
")",
":",
"if",
"take_first",
":",
"new_data",
"[",
"self",
".",
"field",
"(",
"key_field",
")",
"==",
"key",
"]",
"=",
"val",
"[",
"0",
"]",
"else",
":",
"new_data",
"[",
"self",
".",
"field",
"(",
"key_field",
")",
"==",
"key",
"]",
"=",
"val",
"self",
".",
"add_field",
"(",
"data_field",
",",
"new_data",
")"
] |
Adds a new field (data_field) to the Datamat with data from the
corresponding field of another Datamat (src_dm).
This is accomplished through the use of a key_field, which is
used to determine how the data is copied.
This operation corresponds loosely to an SQL join operation.
The two Datamats are essentially aligned by the unique values
of key_field so that each block element of the new field of the target
Datamat will consist of those elements of src_dm's data_field
where the corresponding element in key_field matches.
If 'take_first' is not true, and there is not
only a single corresponding element (typical usage case) then the
target element value will be
a sequence (array) of all the matching elements.
The target Datamat (self) must not have a field name data_field
already, and both Datamats must have key_field.
The new field in the target Datamat will be a masked array to handle
non-existent data.
TODO: Make example more generic, remove interoceptive reference
TODO: Make standalone test
Examples:
>>> dm_intero = load_interoception_files ('test-ecg.csv', silent=True)
>>> dm_emotiv = load_emotivestimuli_files ('test-bpm.csv', silent=True)
>>> length(dm_intero)
4
>>> unique(dm_intero.subject_id)
['p05', 'p06']
>>> length(dm_emotiv)
3
>>> unique(dm_emotiv.subject_id)
['p04', 'p05', 'p06']
>>> 'interospective_awareness' in dm_intero.fieldnames()
True
>>> unique(dm_intero.interospective_awareness) == [0.5555, 0.6666]
True
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
False
>>> dm_emotiv.copy_field(dm_intero, 'interospective_awareness', 'subject_id')
>>> 'interospective_awareness' in dm_emotiv.fieldnames()
True
>>> unique(dm_emotiv.interospective_awareness) == [NaN, 0.5555, 0.6666]
False
|
[
"Adds",
"a",
"new",
"field",
"(",
"data_field",
")",
"to",
"the",
"Datamat",
"with",
"data",
"from",
"the",
"corresponding",
"field",
"of",
"another",
"Datamat",
"(",
"src_dm",
")",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L323-L408
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.rm_field
|
def rm_field(self, name):
"""
Remove a field from the datamat.
Parameters:
name : string
Name of the field to be removed
"""
if not name in self._fields:
raise ValueError
self._fields.remove(name)
del self.__dict__[name]
|
python
|
def rm_field(self, name):
"""
Remove a field from the datamat.
Parameters:
name : string
Name of the field to be removed
"""
if not name in self._fields:
raise ValueError
self._fields.remove(name)
del self.__dict__[name]
|
[
"def",
"rm_field",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"_fields",
":",
"raise",
"ValueError",
"self",
".",
"_fields",
".",
"remove",
"(",
"name",
")",
"del",
"self",
".",
"__dict__",
"[",
"name",
"]"
] |
Remove a field from the datamat.
Parameters:
name : string
Name of the field to be removed
|
[
"Remove",
"a",
"field",
"from",
"the",
"datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L410-L421
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.add_parameter
|
def add_parameter(self, name, value):
"""
Adds a parameter to the existing Datamat.
Fails if parameter with same name already exists or if name is otherwise
in this objects ___dict__ dictionary.
"""
if name in self._parameters:
raise ValueError("'%s' is already a parameter" % (name))
elif name in self.__dict__:
raise ValueError("'%s' conflicts with the Datamat name-space" % (name))
self.__dict__[name] = value
self._parameters[name] = self.__dict__[name]
|
python
|
def add_parameter(self, name, value):
"""
Adds a parameter to the existing Datamat.
Fails if parameter with same name already exists or if name is otherwise
in this objects ___dict__ dictionary.
"""
if name in self._parameters:
raise ValueError("'%s' is already a parameter" % (name))
elif name in self.__dict__:
raise ValueError("'%s' conflicts with the Datamat name-space" % (name))
self.__dict__[name] = value
self._parameters[name] = self.__dict__[name]
|
[
"def",
"add_parameter",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"name",
"in",
"self",
".",
"_parameters",
":",
"raise",
"ValueError",
"(",
"\"'%s' is already a parameter\"",
"%",
"(",
"name",
")",
")",
"elif",
"name",
"in",
"self",
".",
"__dict__",
":",
"raise",
"ValueError",
"(",
"\"'%s' conflicts with the Datamat name-space\"",
"%",
"(",
"name",
")",
")",
"self",
".",
"__dict__",
"[",
"name",
"]",
"=",
"value",
"self",
".",
"_parameters",
"[",
"name",
"]",
"=",
"self",
".",
"__dict__",
"[",
"name",
"]"
] |
Adds a parameter to the existing Datamat.
Fails if parameter with same name already exists or if name is otherwise
in this objects ___dict__ dictionary.
|
[
"Adds",
"a",
"parameter",
"to",
"the",
"existing",
"Datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L423-L436
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.rm_parameter
|
def rm_parameter(self, name):
"""
Removes a parameter to the existing Datamat.
Fails if parameter doesn't exist.
"""
if name not in self._parameters:
raise ValueError("no '%s' parameter found" % (name))
del self._parameters[name]
del self.__dict__[name]
|
python
|
def rm_parameter(self, name):
"""
Removes a parameter to the existing Datamat.
Fails if parameter doesn't exist.
"""
if name not in self._parameters:
raise ValueError("no '%s' parameter found" % (name))
del self._parameters[name]
del self.__dict__[name]
|
[
"def",
"rm_parameter",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_parameters",
":",
"raise",
"ValueError",
"(",
"\"no '%s' parameter found\"",
"%",
"(",
"name",
")",
")",
"del",
"self",
".",
"_parameters",
"[",
"name",
"]",
"del",
"self",
".",
"__dict__",
"[",
"name",
"]"
] |
Removes a parameter to the existing Datamat.
Fails if parameter doesn't exist.
|
[
"Removes",
"a",
"parameter",
"to",
"the",
"existing",
"Datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L438-L448
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.parameter_to_field
|
def parameter_to_field(self, name):
"""
Promotes a parameter to a field by creating a new array of same
size as the other existing fields, filling it with the current
value of the parameter, and then removing that parameter.
"""
if name not in self._parameters:
raise ValueError("no '%s' parameter found" % (name))
if self._fields.count(name) > 0:
raise ValueError("field with name '%s' already exists" % (name))
data = np.array([self._parameters[name]]*self._num_fix)
self.rm_parameter(name)
self.add_field(name, data)
|
python
|
def parameter_to_field(self, name):
"""
Promotes a parameter to a field by creating a new array of same
size as the other existing fields, filling it with the current
value of the parameter, and then removing that parameter.
"""
if name not in self._parameters:
raise ValueError("no '%s' parameter found" % (name))
if self._fields.count(name) > 0:
raise ValueError("field with name '%s' already exists" % (name))
data = np.array([self._parameters[name]]*self._num_fix)
self.rm_parameter(name)
self.add_field(name, data)
|
[
"def",
"parameter_to_field",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_parameters",
":",
"raise",
"ValueError",
"(",
"\"no '%s' parameter found\"",
"%",
"(",
"name",
")",
")",
"if",
"self",
".",
"_fields",
".",
"count",
"(",
"name",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"field with name '%s' already exists\"",
"%",
"(",
"name",
")",
")",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"_parameters",
"[",
"name",
"]",
"]",
"*",
"self",
".",
"_num_fix",
")",
"self",
".",
"rm_parameter",
"(",
"name",
")",
"self",
".",
"add_field",
"(",
"name",
",",
"data",
")"
] |
Promotes a parameter to a field by creating a new array of same
size as the other existing fields, filling it with the current
value of the parameter, and then removing that parameter.
|
[
"Promotes",
"a",
"parameter",
"to",
"a",
"field",
"by",
"creating",
"a",
"new",
"array",
"of",
"same",
"size",
"as",
"the",
"other",
"existing",
"fields",
"filling",
"it",
"with",
"the",
"current",
"value",
"of",
"the",
"parameter",
"and",
"then",
"removing",
"that",
"parameter",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L450-L464
|
nwilming/ocupy
|
ocupy/datamat.py
|
Datamat.join
|
def join(self, fm_new, minimal_subset=True):
"""
Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30
"""
# Check if parameters are equal. If not, promote them to fields.
'''
for (nm, val) in fm_new._parameters.items():
if self._parameters.has_key(nm):
if (val != self._parameters[nm]):
self.parameter_to_field(nm)
fm_new.parameter_to_field(nm)
else:
fm_new.parameter_to_field(nm)
'''
# Deal with mismatch in the fields
# First those in self that do not exist in new...
orig_fields = self._fields[:]
for field in orig_fields:
if not field in fm_new._fields:
if minimal_subset:
self.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
fm_new.add_field_like(field, self.field(field))
# ... then those in the new that do not exist in self.
orig_fields = fm_new._fields[:]
for field in orig_fields:
if not field in self._fields:
if minimal_subset:
fm_new.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
self.add_field_like(field, fm_new.field(field))
if 'SUBJECTINDEX' in self._fields[:]:
if fm_new.SUBJECTINDEX[0] in self.SUBJECTINDEX:
fm_new.SUBJECTINDEX[:] = self.SUBJECTINDEX.max()+1
# Concatenate fields
for field in self._fields:
self.__dict__[field] = ma.hstack((self.__dict__[field],
fm_new.__dict__[field]))
# Update _num_fix
self._num_fix += fm_new._num_fix
|
python
|
def join(self, fm_new, minimal_subset=True):
"""
Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30
"""
# Check if parameters are equal. If not, promote them to fields.
'''
for (nm, val) in fm_new._parameters.items():
if self._parameters.has_key(nm):
if (val != self._parameters[nm]):
self.parameter_to_field(nm)
fm_new.parameter_to_field(nm)
else:
fm_new.parameter_to_field(nm)
'''
# Deal with mismatch in the fields
# First those in self that do not exist in new...
orig_fields = self._fields[:]
for field in orig_fields:
if not field in fm_new._fields:
if minimal_subset:
self.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
fm_new.add_field_like(field, self.field(field))
# ... then those in the new that do not exist in self.
orig_fields = fm_new._fields[:]
for field in orig_fields:
if not field in self._fields:
if minimal_subset:
fm_new.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
self.add_field_like(field, fm_new.field(field))
if 'SUBJECTINDEX' in self._fields[:]:
if fm_new.SUBJECTINDEX[0] in self.SUBJECTINDEX:
fm_new.SUBJECTINDEX[:] = self.SUBJECTINDEX.max()+1
# Concatenate fields
for field in self._fields:
self.__dict__[field] = ma.hstack((self.__dict__[field],
fm_new.__dict__[field]))
# Update _num_fix
self._num_fix += fm_new._num_fix
|
[
"def",
"join",
"(",
"self",
",",
"fm_new",
",",
"minimal_subset",
"=",
"True",
")",
":",
"# Check if parameters are equal. If not, promote them to fields.",
"'''\n for (nm, val) in fm_new._parameters.items():\n if self._parameters.has_key(nm):\n if (val != self._parameters[nm]):\n self.parameter_to_field(nm)\n fm_new.parameter_to_field(nm)\n else:\n fm_new.parameter_to_field(nm)\n '''",
"# Deal with mismatch in the fields",
"# First those in self that do not exist in new...",
"orig_fields",
"=",
"self",
".",
"_fields",
"[",
":",
"]",
"for",
"field",
"in",
"orig_fields",
":",
"if",
"not",
"field",
"in",
"fm_new",
".",
"_fields",
":",
"if",
"minimal_subset",
":",
"self",
".",
"rm_field",
"(",
"field",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"This option is deprecated. Clean and Filter your data before it is joined.\"",
",",
"DeprecationWarning",
")",
"fm_new",
".",
"add_field_like",
"(",
"field",
",",
"self",
".",
"field",
"(",
"field",
")",
")",
"# ... then those in the new that do not exist in self.",
"orig_fields",
"=",
"fm_new",
".",
"_fields",
"[",
":",
"]",
"for",
"field",
"in",
"orig_fields",
":",
"if",
"not",
"field",
"in",
"self",
".",
"_fields",
":",
"if",
"minimal_subset",
":",
"fm_new",
".",
"rm_field",
"(",
"field",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"This option is deprecated. Clean and Filter your data before it is joined.\"",
",",
"DeprecationWarning",
")",
"self",
".",
"add_field_like",
"(",
"field",
",",
"fm_new",
".",
"field",
"(",
"field",
")",
")",
"if",
"'SUBJECTINDEX'",
"in",
"self",
".",
"_fields",
"[",
":",
"]",
":",
"if",
"fm_new",
".",
"SUBJECTINDEX",
"[",
"0",
"]",
"in",
"self",
".",
"SUBJECTINDEX",
":",
"fm_new",
".",
"SUBJECTINDEX",
"[",
":",
"]",
"=",
"self",
".",
"SUBJECTINDEX",
".",
"max",
"(",
")",
"+",
"1",
"# Concatenate fields",
"for",
"field",
"in",
"self",
".",
"_fields",
":",
"self",
".",
"__dict__",
"[",
"field",
"]",
"=",
"ma",
".",
"hstack",
"(",
"(",
"self",
".",
"__dict__",
"[",
"field",
"]",
",",
"fm_new",
".",
"__dict__",
"[",
"field",
"]",
")",
")",
"# Update _num_fix",
"self",
".",
"_num_fix",
"+=",
"fm_new",
".",
"_num_fix"
] |
Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30
|
[
"Adds",
"content",
"of",
"a",
"new",
"Datamat",
"to",
"this",
"Datamat",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L466-L526
|
nwilming/ocupy
|
ocupy/simulator.py
|
makeAngLenHist
|
def makeAngLenHist(ad, ld, fm = None, collapse=True, fit=spline_base.fit2d):
"""
Histograms and performs a spline fit on the given data,
usually angle and length differences.
Parameters:
ad : array
The data to be histogrammed along the x-axis.
May range from -180 to 180.
ld : array
The data to be histogrammed along the y-axis.
May range from -36 to 36.
collapse : boolean
If true, the histogrammed data will include
negative values on the x-axis. Else, the histogram
will be collapsed along x = 0, and thus contain only
positive angle differences
fit : function or None, optional
The function to use in order to fit the data.
If no fit should be applied, set to None
fm : fixmat or None, optional
If given, the angle and length differences are calculated
from the fixmat and the previous parameters are overwritten.
"""
if fm:
ad,ld = anglendiff(fm, roll=2)
ad, ld = ad[0], ld[0]
ld = ld[~np.isnan(ld)]
ad = reshift(ad[~np.isnan(ad)])
if collapse:
e_y = np.linspace(-36.5, 36.5, 74)
e_x = np.linspace(0, 180, 181)
H = makeHist(abs(ad), ld, fit=fit, bins=[e_y, e_x])
H = H/H.sum()
return H
else:
e_x = np.linspace(-180, 180, 361)
e_y = np.linspace(-36.5, 36.5, 74)
return makeHist(ad, ld, fit=fit, bins=[e_y, e_x])
|
python
|
def makeAngLenHist(ad, ld, fm = None, collapse=True, fit=spline_base.fit2d):
"""
Histograms and performs a spline fit on the given data,
usually angle and length differences.
Parameters:
ad : array
The data to be histogrammed along the x-axis.
May range from -180 to 180.
ld : array
The data to be histogrammed along the y-axis.
May range from -36 to 36.
collapse : boolean
If true, the histogrammed data will include
negative values on the x-axis. Else, the histogram
will be collapsed along x = 0, and thus contain only
positive angle differences
fit : function or None, optional
The function to use in order to fit the data.
If no fit should be applied, set to None
fm : fixmat or None, optional
If given, the angle and length differences are calculated
from the fixmat and the previous parameters are overwritten.
"""
if fm:
ad,ld = anglendiff(fm, roll=2)
ad, ld = ad[0], ld[0]
ld = ld[~np.isnan(ld)]
ad = reshift(ad[~np.isnan(ad)])
if collapse:
e_y = np.linspace(-36.5, 36.5, 74)
e_x = np.linspace(0, 180, 181)
H = makeHist(abs(ad), ld, fit=fit, bins=[e_y, e_x])
H = H/H.sum()
return H
else:
e_x = np.linspace(-180, 180, 361)
e_y = np.linspace(-36.5, 36.5, 74)
return makeHist(ad, ld, fit=fit, bins=[e_y, e_x])
|
[
"def",
"makeAngLenHist",
"(",
"ad",
",",
"ld",
",",
"fm",
"=",
"None",
",",
"collapse",
"=",
"True",
",",
"fit",
"=",
"spline_base",
".",
"fit2d",
")",
":",
"if",
"fm",
":",
"ad",
",",
"ld",
"=",
"anglendiff",
"(",
"fm",
",",
"roll",
"=",
"2",
")",
"ad",
",",
"ld",
"=",
"ad",
"[",
"0",
"]",
",",
"ld",
"[",
"0",
"]",
"ld",
"=",
"ld",
"[",
"~",
"np",
".",
"isnan",
"(",
"ld",
")",
"]",
"ad",
"=",
"reshift",
"(",
"ad",
"[",
"~",
"np",
".",
"isnan",
"(",
"ad",
")",
"]",
")",
"if",
"collapse",
":",
"e_y",
"=",
"np",
".",
"linspace",
"(",
"-",
"36.5",
",",
"36.5",
",",
"74",
")",
"e_x",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"180",
",",
"181",
")",
"H",
"=",
"makeHist",
"(",
"abs",
"(",
"ad",
")",
",",
"ld",
",",
"fit",
"=",
"fit",
",",
"bins",
"=",
"[",
"e_y",
",",
"e_x",
"]",
")",
"H",
"=",
"H",
"/",
"H",
".",
"sum",
"(",
")",
"return",
"H",
"else",
":",
"e_x",
"=",
"np",
".",
"linspace",
"(",
"-",
"180",
",",
"180",
",",
"361",
")",
"e_y",
"=",
"np",
".",
"linspace",
"(",
"-",
"36.5",
",",
"36.5",
",",
"74",
")",
"return",
"makeHist",
"(",
"ad",
",",
"ld",
",",
"fit",
"=",
"fit",
",",
"bins",
"=",
"[",
"e_y",
",",
"e_x",
"]",
")"
] |
Histograms and performs a spline fit on the given data,
usually angle and length differences.
Parameters:
ad : array
The data to be histogrammed along the x-axis.
May range from -180 to 180.
ld : array
The data to be histogrammed along the y-axis.
May range from -36 to 36.
collapse : boolean
If true, the histogrammed data will include
negative values on the x-axis. Else, the histogram
will be collapsed along x = 0, and thus contain only
positive angle differences
fit : function or None, optional
The function to use in order to fit the data.
If no fit should be applied, set to None
fm : fixmat or None, optional
If given, the angle and length differences are calculated
from the fixmat and the previous parameters are overwritten.
|
[
"Histograms",
"and",
"performs",
"a",
"spline",
"fit",
"on",
"the",
"given",
"data",
"usually",
"angle",
"and",
"length",
"differences",
".",
"Parameters",
":",
"ad",
":",
"array",
"The",
"data",
"to",
"be",
"histogrammed",
"along",
"the",
"x",
"-",
"axis",
".",
"May",
"range",
"from",
"-",
"180",
"to",
"180",
".",
"ld",
":",
"array",
"The",
"data",
"to",
"be",
"histogrammed",
"along",
"the",
"y",
"-",
"axis",
".",
"May",
"range",
"from",
"-",
"36",
"to",
"36",
".",
"collapse",
":",
"boolean",
"If",
"true",
"the",
"histogrammed",
"data",
"will",
"include",
"negative",
"values",
"on",
"the",
"x",
"-",
"axis",
".",
"Else",
"the",
"histogram",
"will",
"be",
"collapsed",
"along",
"x",
"=",
"0",
"and",
"thus",
"contain",
"only",
"positive",
"angle",
"differences",
"fit",
":",
"function",
"or",
"None",
"optional",
"The",
"function",
"to",
"use",
"in",
"order",
"to",
"fit",
"the",
"data",
".",
"If",
"no",
"fit",
"should",
"be",
"applied",
"set",
"to",
"None",
"fm",
":",
"fixmat",
"or",
"None",
"optional",
"If",
"given",
"the",
"angle",
"and",
"length",
"differences",
"are",
"calculated",
"from",
"the",
"fixmat",
"and",
"the",
"previous",
"parameters",
"are",
"overwritten",
"."
] |
train
|
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L329-L372
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.