Search is not available for this dataset
text stringlengths 75 104k |
|---|
def add_months(months, timestamp=datetime.datetime.utcnow()):
"""Add a number of months to a timestamp"""
month = timestamp.month
new_month = month + months
years = 0
while new_month < 1:
new_month += 12
years -= 1
while new_month > 12:
new_month -= 12
years += 1
# month = timestamp.month
year = timestamp.year + years
try:
return datetime.datetime(year, new_month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second)
except ValueError:
# This means that the day exceeds the last day of the month, i.e. it is 30th March, and we are finding the day
# 1 month ago, and it is trying to return 30th February
if months > 0:
# We are adding, so use the first day of the next month
new_month += 1
if new_month > 12:
new_month -= 12
year += 1
return datetime.datetime(year, new_month, 1, timestamp.hour, timestamp.minute, timestamp.second)
else:
# We are subtracting - use the last day of the same month
new_day = calendar.monthrange(year, new_month)[1]
return datetime.datetime(year, new_month, new_day, timestamp.hour, timestamp.minute, timestamp.second) |
def add_months_to_date(months, date):
"""Add a number of months to a date"""
month = date.month
new_month = month + months
years = 0
while new_month < 1:
new_month += 12
years -= 1
while new_month > 12:
new_month -= 12
years += 1
# month = timestamp.month
year = date.year + years
try:
return datetime.date(year, new_month, date.day)
except ValueError:
# This means that the day exceeds the last day of the month, i.e. it is 30th March, and we are finding the day
# 1 month ago, and it is trying to return 30th February
if months > 0:
# We are adding, so use the first day of the next month
new_month += 1
if new_month > 12:
new_month -= 12
year += 1
return datetime.datetime(year, new_month, 1)
else:
# We are subtracting - use the last day of the same month
new_day = calendar.monthrange(year, new_month)[1]
return datetime.datetime(year, new_month, new_day) |
def unix_time(dt=None, as_int=False):
"""Generate a unix style timestamp (in seconds)"""
if dt is None:
dt = datetime.datetime.utcnow()
if type(dt) is datetime.date:
dt = date_to_datetime(dt)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
if as_int:
return int(delta.total_seconds())
return delta.total_seconds() |
def is_christmas_period():
"""Is this the christmas period?"""
now = datetime.date.today()
if now.month != 12:
return False
if now.day < 15:
return False
if now.day > 27:
return False
return True |
def get_end_of_day(timestamp):
"""
Given a date or a datetime, return a datetime at 23:59:59 on that day
"""
return datetime.datetime(timestamp.year, timestamp.month, timestamp.day, 23, 59, 59) |
def transform(self, X):
'''
:param X: features.
'''
inverser_tranformer = self.dict_vectorizer_
if self.feature_selection:
inverser_tranformer = self.clone_dict_vectorizer_
return inverser_tranformer.inverse_transform(
self.transformer.transform(
self.dict_vectorizer_.transform(X))) |
def use_music_service(self, service_name, api_key):
"""
Sets the current music service to service_name.
:param str service_name: Name of the music service
:param str api_key: Optional API key if necessary
"""
try:
self.current_music = self.music_services[service_name]
except KeyError:
if service_name == 'youtube':
self.music_services['youtube'] = Youtube()
self.current_music = self.music_services['youtube']
elif service_name == 'soundcloud':
self.music_services['soundcloud'] = Soundcloud(api_key=api_key)
self.current_music = self.music_services['soundcloud']
else:
log.error('Music service name is not recognized.') |
def use_storage_service(self, service_name, custom_path):
"""
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
"""
try:
self.current_storage = self.storage_services[service_name]
except KeyError:
if service_name == 'google drive':
self.storage_services['google drive'] = GoogleDrive()
self.current_storage = self.storage_services['google drive']
self.current_storage.connect()
elif service_name == 'dropbox':
log.error('Dropbox is not supported yet.')
elif service_name == 'local':
self.storage_services['local'] = LocalStorage(custom_path=custom_path)
self.current_storage = self.storage_services['local']
self.current_storage.connect()
else:
log.error('Storage service name is not recognized.') |
def from_csv(self, label_column='labels'):
'''
Read dataset from csv.
'''
df = pd.read_csv(self.path, header=0)
X = df.loc[:, df.columns != label_column].to_dict('records')
X = map_dict_list(X, if_func=lambda k, v: v and math.isfinite(v))
y = list(df[label_column].values)
return X, y |
def from_json(self):
'''
Reads dataset from json.
'''
with gzip.open('%s.gz' % self.path,
'rt') if self.gz else open(self.path) as file:
return list(map(list, zip(*json.load(file))))[::-1] |
def to_json(self, X, y):
'''
Reads dataset to csv.
:param X: dataset as list of dict.
:param y: labels.
'''
with gzip.open('%s.gz' % self.path, 'wt') if self.gz else open(
self.path, 'w') as file:
json.dump(list(zip(y, X)), file) |
def filter_by_label(X, y, ref_label, reverse=False):
'''
Select items with label from dataset.
:param X: dataset
:param y: labels
:param ref_label: reference label
:param bool reverse: if false selects ref_labels else eliminates
'''
check_reference_label(y, ref_label)
return list(zip(*filter(lambda t: (not reverse) == (t[1] == ref_label),
zip(X, y)))) |
def average_by_label(X, y, ref_label):
'''
Calculates average dictinary from list of dictionary for give label
:param List[Dict] X: dataset
:param list y: labels
:param ref_label: reference label
'''
# TODO: consider to delete defaultdict
return defaultdict(float,
pd.DataFrame.from_records(
filter_by_label(X, y, ref_label)[0]
).mean().to_dict()) |
def map_dict(d, key_func=None, value_func=None, if_func=None):
'''
:param dict d: dictionary
:param func key_func: func which will run on key.
:param func value_func: func which will run on values.
'''
key_func = key_func or (lambda k, v: k)
value_func = value_func or (lambda k, v: v)
if_func = if_func or (lambda k, v: True)
return {
key_func(*k_v): value_func(*k_v)
for k_v in d.items() if if_func(*k_v)
} |
def map_dict_list(ds, key_func=None, value_func=None, if_func=None):
'''
:param List[Dict] ds: list of dict
:param func key_func: func which will run on key.
:param func value_func: func which will run on values.
'''
return [map_dict(d, key_func, value_func, if_func) for d in ds] |
def check_reference_label(y, ref_label):
'''
:param list y: label
:param ref_label: reference label
'''
set_y = set(y)
if ref_label not in set_y:
raise ValueError('There is not reference label in dataset. '
"Reference label: '%s' "
'Labels in dataset: %s' % (ref_label, set_y)) |
def feature_importance_report(X,
y,
threshold=0.001,
correcting_multiple_hypotesis=True,
method='fdr_bh',
alpha=0.1,
sort_by='pval'):
'''
Provide signifance for features in dataset with anova using multiple hypostesis testing
:param X: List of dict with key as feature names and values as features
:param y: Labels
:param threshold: Low-variens threshold to eliminate low varience features
:param correcting_multiple_hypotesis: corrects p-val with multiple hypotesis testing
:param method: method of multiple hypotesis testing
:param alpha: alpha of multiple hypotesis testing
:param sort_by: sorts output dataframe by pval or F
:return: DataFrame with F and pval for each feature with their average values
'''
df = variance_threshold_on_df(
pd.DataFrame.from_records(X), threshold=threshold)
F, pvals = f_classif(df.values, y)
if correcting_multiple_hypotesis:
_, pvals, _, _ = multipletests(pvals, alpha=alpha, method=method)
df['labels'] = y
df_mean = df.groupby('labels').mean().T
df_mean['F'] = F
df_mean['pval'] = pvals
return df_mean.sort_values(sort_by, ascending=True) |
def restore_data(self, data_dict):
"""
Restore the data dict - update the flask session and this object
"""
session[self._base_key] = data_dict
self._data_dict = session[self._base_key] |
def _mergedict(a, b):
"""Recusively merge the 2 dicts.
Destructive on argument 'a'.
"""
for p, d1 in b.items():
if p in a:
if not isinstance(d1, dict):
continue
_mergedict(a[p], d1)
else:
a[p] = d1
return a |
def multi(dispatch_fn, default=None):
"""A decorator for a function to dispatch on.
The value returned by the dispatch function is used to look up the
implementation function based on its dispatch key.
The dispatch function is available using the `dispatch_fn` function.
"""
def _inner(*args, **kwargs):
dispatch_value = dispatch_fn(*args, **kwargs)
f = _inner.__multi__.get(dispatch_value, _inner.__multi_default__)
if f is None:
raise Exception(
f"No implementation of {dispatch_fn.__name__} "
f"for dispatch value {dispatch_value}"
)
return f(*args, **kwargs)
_inner.__multi__ = {}
_inner.__multi_default__ = default
_inner.__dispatch_fn__ = dispatch_fn
return _inner |
def method(dispatch_fn, dispatch_key=None):
"""A decorator for a function implementing dispatch_fn for dispatch_key.
If no dispatch_key is specified, the function is used as the
default dispacth function.
"""
def apply_decorator(fn):
if dispatch_key is None:
# Default case
dispatch_fn.__multi_default__ = fn
else:
dispatch_fn.__multi__[dispatch_key] = fn
return fn
return apply_decorator |
def find_blocks():
"""
Auto-discover INSTALLED_APPS registered_blocks.py modules and fail
silently when not present. This forces an import on them thereby
registering their blocks.
This is a near 1-to-1 copy of how django's admin application registers
models.
"""
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's sizedimage module.
try:
before_import_block_registry = copy.copy(
block_registry._registry
)
import_module('{}.registered_blocks'.format(app))
except:
# Reset the block_registry to the state before the last
# import as this import will have to reoccur on the next request
# and this could raise NotRegistered and AlreadyRegistered
# exceptions (see django ticket #8245).
block_registry._registry = before_import_block_registry
# Decide whether to bubble up this error. If the app just
# doesn't have a stuff module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'registered_blocks'):
raise |
def _verify_block(self, block_type, block):
"""
Verifies a block prior to registration.
"""
if block_type in self._registry:
raise AlreadyRegistered(
"A block has already been registered to the {} `block_type` "
"in the registry. Either unregister that block before trying "
"to register this block under a different `block_type`".format(
block_type
)
)
if not isinstance(block, Block):
raise InvalidBlock(
"The block you tried register to {} is invalid. Only "
"instances of `wagtail.wagtailcore.blocks.Block` may be "
"registered with the the block_registry.".format(block_type)
) |
def register_block(self, block_type, block):
"""
Registers `block` to `block_type` in the registry.
"""
self._verify_block(block_type, block)
self._registry[block_type] = block |
def unregister_block(self, block_type):
"""
Unregisters the block associated with `block_type` from the registry.
If no block is registered to `block_type`, NotRegistered will raise.
"""
if block_type not in self._registry:
raise NotRegistered(
'There is no block registered as "{}" with the '
'RegisteredBlockStreamFieldRegistry registry.'.format(
block_type
)
)
else:
del self._registry[block_type] |
def convert_to_mp3(file_name, delete_queue):
"""
Converts the file associated with the file_name passed into a MP3 file.
:param str file_name: Filename of the original file in local storage
:param Queue delete_queue: Delete queue to add the original file to after conversion is done
:return str: Filename of the new file in local storage
"""
file = os.path.splitext(file_name)
if file[1] == '.mp3':
log.info(f"{file_name} is already a MP3 file, no conversion needed.")
return file_name
new_file_name = file[0] + '.mp3'
ff = FFmpeg(
inputs={file_name: None},
outputs={new_file_name: None}
)
log.info(f"Conversion for {file_name} has started")
start_time = time()
try:
ff.run(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except FFRuntimeError:
os.remove(new_file_name)
ff.run(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
end_time = time()
log.info(f"Conversion for {file_name} has finished in {end_time - start_time} seconds")
delete_queue.put(file_name)
return new_file_name |
def delete_local_file(file_name):
"""
Deletes the file associated with the file_name passed from local storage.
:param str file_name: Filename of the file to be deleted
:return str: Filename of the file that was just deleted
"""
try:
os.remove(file_name)
log.info(f"Deletion for {file_name} has finished")
return file_name
except OSError:
pass |
def cli(*args, **kwargs):
"""
通用自动化处理工具
详情参考 `GitHub <https://github.com/littlemo/mohand>`_
"""
log.debug('cli: {} {}'.format(args, kwargs))
# 使用终端传入的 option 更新 env 中的配置值
env.update(kwargs) |
def _is_package(path):
"""
判断传入的路径是否为一个 Python 模块包
:param str path: 待判断的路径
:return: 返回是,则传入 path 为一个 Python 包,否则不是
:rtype: bool
"""
def _exists(s):
return os.path.exists(os.path.join(path, s))
return (
os.path.isdir(path) and
(_exists('__init__.py') or _exists('__init__.pyc'))
) |
def find_handfile(names=None):
"""
尝试定位 ``handfile`` 文件,明确指定或逐级搜索父路径
:param str names: 可选,待查找的文件名,主要用于调试,默认使用终端传入的配置
:return: ``handfile`` 文件所在的绝对路径,默认为 None
:rtype: str
"""
# 如果没有明确指定,则包含 env 中的值
names = names or [env.handfile]
# 若无 ``.py`` 扩展名,则作为待查询名称,追加到 names 末尾
if not names[0].endswith('.py'):
names += [names[0] + '.py']
# name 中是否包含路径元素
if os.path.dirname(names[0]):
# 若存在,则扩展 Home 路径标志,并测试是否存在
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# 否则,逐级向上搜索,直到根路径
path = '.'
# 在到系统根路径之前停止
while os.path.split(os.path.abspath(path))[1]:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
path = os.path.join('..', path)
return None |
def get_commands_from_module(imported):
"""
从传入的 ``imported`` 中获取所有 ``click.core.Command``
:param module imported: 导入的Python包
:return: 包描述文档,仅含终端命令函数的对象字典
:rtype: (str, dict(str, object))
"""
# 如果存在 <module>.__all__ ,则遵守
imported_vars = vars(imported)
if "__all__" in imported_vars:
imported_vars = [
(name, imported_vars[name]) for name in
imported_vars if name in imported_vars["__all__"]]
else:
imported_vars = imported_vars.items()
cmd_dict = extract_commands(imported_vars)
return imported.__doc__, cmd_dict |
def extract_commands(imported_vars):
"""
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
"""
commands = dict()
for tup in imported_vars:
name, obj = tup
if is_command_object(obj):
commands.setdefault(name, obj)
return commands |
def load_handfile(path, importer=None):
"""
导入传入的 ``handfile`` 文件路径,并返回(docstring, callables)
也就是 handfile 包的 ``__doc__`` 属性 (字符串) 和一个 ``{'name': callable}``
的字典,包含所有通过 mohand 的 command 测试的 callables
:param str path: 待导入的 handfile 文件路径
:param function importer: 可选,包导入函数,默认为 ``__import__``
:return: 包描述文档,仅含终端命令函数的对象字典
:rtype: (str, dict(str, object))
"""
if importer is None:
importer = __import__
# 获取路径&文件名
directory, handfile = os.path.split(path)
# 如果路径不在 ``PYTHONPATH`` 中,则添加,以便于我们的导入正常工作
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# 如果路径在 ``PYTHONPATH`` 中,则临时将其移到最前,否则其他的 ``handfile``
# 文件将会被优先导入,而不是我们想要导入的那个
else:
i = sys.path.index(directory)
if i != 0:
# 为之后的恢复保存索引号
index = i
# 添加到最前,然后删除原始位置
sys.path.insert(0, directory)
del sys.path[i + 1]
# 执行导入(去除 .py 扩展名)
sys_byte_code_bak = sys.dont_write_bytecode
sys.dont_write_bytecode = True
imported = importer(os.path.splitext(handfile)[0])
sys.dont_write_bytecode = sys_byte_code_bak
# 从 ``PYTHONPATH`` 中移除我们自己添加的路径
# (仅仅出于严谨,尽量不污染 ``PYTHONPATH`` )
if added_to_path:
del sys.path[0]
# 将我们移动的 PATH 放回原处
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# 实际加载 Command
docstring, commands = get_commands_from_module(imported)
return docstring, commands |
def reasonable_desired_version(self, desired_version, allow_equal=False,
allow_patch_skip=False):
"""
Determine whether the desired version is a reasonable next version.
Parameters
----------
desired_version: str
the proposed next version name
"""
try:
desired_version = desired_version.base_version
except:
pass
(new_major, new_minor, new_patch) = \
map(int, desired_version.split('.'))
tag_versions = self._versions_from_tags()
if not tag_versions:
# no tags yet, and legal version is legal!
return ""
max_version = max(self._versions_from_tags()).base_version
(old_major, old_minor, old_patch) = \
map(int, str(max_version).split('.'))
update_str = str(max_version) + " -> " + str(desired_version)
v_desired = vers.Version(desired_version)
v_max = vers.Version(max_version)
if allow_equal and v_desired == v_max:
return ""
if v_desired < v_max:
return ("Bad update: New version doesn't increase on last tag: "
+ update_str + "\n")
bad_update = skipped_version((old_major, old_minor, old_patch),
(new_major, new_minor, new_patch),
allow_patch_skip)
msg = ""
if bad_update:
msg = ("Bad update: Did you skip a version from "
+ update_str + "?\n")
return msg |
def handle_ssl_redirect():
"""
Check if a route needs ssl, and redirect it if not. Also redirects back to http for non-ssl routes. Static routes
are served as both http and https
:return: A response to be returned or None
"""
if request.endpoint and request.endpoint not in ['static', 'filemanager.static']:
needs_ssl = False
ssl_enabled = False
view_function = current_app.view_functions[request.endpoint]
if request.endpoint.startswith('admin.') or \
(hasattr(view_function, 'ssl_required') and view_function.ssl_required):
needs_ssl = True
ssl_enabled = True
if hasattr(view_function, 'ssl_allowed') and view_function.ssl_allowed:
ssl_enabled = True
if (hasattr(view_function, 'ssl_disabled') and view_function.ssl_disabled):
needs_ssl = False
ssl_enabled = False
if current_app.config['SSL_ENABLED']:
if needs_ssl and not request.is_secure:
log.debug('Redirecting to https: %s' % request.endpoint)
return redirect(request.url.replace("http://", "https://"))
elif not ssl_enabled and request.is_secure:
log.debug('Redirecting to http: %s' % request.endpoint)
return redirect(request.url.replace("https://", "http://"))
elif needs_ssl:
log.info('Not redirecting to HTTPS for endpoint %s as SSL_ENABLED is set to False' % request.endpoint) |
def init(app):
"""
Initialise this library. The following config variables need to be in your Flask config:
REDIS_HOST: The host of the Redis server
REDIS_PORT: The port of the Redis server
REDIS_PASSWORD: The password used to connect to Redis or None
REDIS_GLOBAL_KEY_PREFIX: A short string unique to your application i.e. 'MYAPP'. This will be turned into
a prefix like '~~MYAPP~~:' and will be used to allow multiple applications to share
a single redis server
REDIS_LOCK_TIMEOUT: An integer with the number of seconds to wait before automatically releasing a lock.
A good number is 60 * 5 for 5 minutes. This stops locks from being held indefinitely
if something goes wrong, but bear in mind this can also cause concurrency issues if
you ave a locking process that takes longer than this timeout!
"""
global connection, LOCK_TIMEOUT, GLOBAL_KEY_PREFIX
host = app.config['REDIS_HOST']
port = app.config['REDIS_PORT']
password = app.config['REDIS_PASSWORD']
GLOBAL_KEY_PREFIX = '~~{}~~:'.format(app.config['REDIS_GLOBAL_KEY_PREFIX'])
LOCK_TIMEOUT = app.config['REDIS_LOCK_TIMEOUT']
connection = redis.StrictRedis(host=host, port=port, password=password) |
def get_enable_celery_error_reporting_function(site_name, from_address):
"""
Use this to enable error reporting. You need to put the following in your tasks.py or wherever you
want to create your celery instance:
celery = Celery(__name__)
enable_celery_email_logging = get_enable_celery_error_reporting_function('My Website [LIVE]', 'errors@mywebsite.com')
after_setup_logger.connect(enable_celery_email_logging)
after_setup_task_logger.connect(enable_celery_email_logging)
"""
def enable_celery_email_logging(sender, signal, logger, loglevel, logfile, format, colorize, **kwargs):
from celery import current_app
log.info('>> Initialising Celery task error reporting for logger {}'.format(logger.name))
send_errors = current_app.conf['CELERY_SEND_TASK_ERROR_EMAILS']
send_warnings = current_app.conf['CELERY_SEND_TASK_WARNING_EMAILS']
if send_errors or send_warnings:
error_email_subject = '{} Celery ERROR!'.format(site_name)
celery_handler = CeleryEmailHandler(from_address, current_app.conf['ADMIN_EMAILS'],
error_email_subject)
if send_warnings:
celery_handler.setLevel(logging.WARNING)
else:
celery_handler.setLevel(logging.ERROR)
logger.addHandler(celery_handler)
return enable_celery_email_logging |
def init_celery(app, celery):
"""
Initialise Celery and set up logging
:param app: Flask app
:param celery: Celery instance
"""
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery |
def queue_email(to_addresses, from_address, subject, body, commit=True, html=True, session=None):
"""
Add a mail to the queue to be sent.
WARNING: Commits by default!
:param to_addresses: The names and addresses to send the email to, i.e. "Steve<steve@fig14.com>, info@fig14.com"
:param from_address: Who the email is from i.e. "Stephen Brown <s@fig14.com>"
:param subject: The email subject
:param body: The html / text body of the email
:param commit: Whether to commit to the database
:param html: Is this a html email?
:param session: The sqlalchemy session or None to use db.session
"""
from models import QueuedEmail
if session is None:
session = _db.session
log.info('Queuing mail to %s: %s' % (to_addresses, subject))
queued_email = QueuedEmail(html, to_addresses, from_address, subject, body, STATUS_QUEUED)
session.add(queued_email)
session.commit()
return queued_email |
def parse_accept(header_value):
"""Parse an HTTP accept-like header.
:param str header_value: the header value to parse
:return: a :class:`list` of :class:`.ContentType` instances
in decreasing quality order. Each instance is augmented
with the associated quality as a ``float`` property
named ``quality``.
``Accept`` is a class of headers that contain a list of values
and an associated preference value. The ever present `Accept`_
header is a perfect example. It is a list of content types and
an optional parameter named ``q`` that indicates the relative
weight of a particular type. The most basic example is::
Accept: audio/*;q=0.2, audio/basic
Which states that I prefer the ``audio/basic`` content type
but will accept other ``audio`` sub-types with an 80% mark down.
.. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2
"""
next_explicit_q = decimal.ExtendedContext.next_plus(decimal.Decimal('5.0'))
headers = [parse_content_type(header)
for header in parse_list(header_value)]
for header in headers:
q = header.parameters.pop('q', None)
if q is None:
q = '1.0'
elif float(q) == 1.0:
q = float(next_explicit_q)
next_explicit_q = next_explicit_q.next_minus()
header.quality = float(q)
def ordering(left, right):
"""
Method for sorting the header values
:param mixed left:
:param mixed right:
:rtype: mixed
"""
if left.quality != right.quality:
return right.quality - left.quality
if left == right:
return 0
if left > right:
return -1
return 1
return sorted(headers, key=functools.cmp_to_key(ordering)) |
def parse_cache_control(header_value):
"""
Parse a `Cache-Control`_ header, returning a dictionary of key-value pairs.
Any of the ``Cache-Control`` parameters that do not have directives, such
as ``public`` or ``no-cache`` will be returned with a value of ``True``
if they are set in the header.
:param str header_value: ``Cache-Control`` header value to parse
:return: the parsed ``Cache-Control`` header values
:rtype: dict
.. _Cache-Control: https://tools.ietf.org/html/rfc7234#section-5.2
"""
directives = {}
for segment in parse_list(header_value):
name, sep, value = segment.partition('=')
if sep != '=':
directives[name] = None
elif sep and value:
value = _dequote(value.strip())
try:
directives[name] = int(value)
except ValueError:
directives[name] = value
# NB ``name='' is never valid and is ignored!
# convert parameterless boolean directives
for name in _CACHE_CONTROL_BOOL_DIRECTIVES:
if directives.get(name, '') is None:
directives[name] = True
return directives |
def parse_content_type(content_type, normalize_parameter_values=True):
"""Parse a content type like header.
:param str content_type: the string to parse as a content type
:param bool normalize_parameter_values:
setting this to ``False`` will enable strict RFC2045 compliance
in which content parameter values are case preserving.
:return: a :class:`~ietfparse.datastructures.ContentType` instance
"""
parts = _remove_comments(content_type).split(';')
content_type, content_subtype = parts.pop(0).split('/')
if '+' in content_subtype:
content_subtype, content_suffix = content_subtype.split('+')
else:
content_suffix = None
parameters = _parse_parameter_list(
parts, normalize_parameter_values=normalize_parameter_values)
return datastructures.ContentType(content_type, content_subtype,
dict(parameters),
content_suffix) |
def parse_forwarded(header_value, only_standard_parameters=False):
"""
Parse RFC7239 Forwarded header.
:param str header_value: value to parse
:keyword bool only_standard_parameters: if this keyword is specified
and given a *truthy* value, then a non-standard parameter name
will result in :exc:`~ietfparse.errors.StrictHeaderParsingFailure`
:return: an ordered :class:`list` of :class:`dict` instances
:raises: :exc:`ietfparse.errors.StrictHeaderParsingFailure` is
raised if `only_standard_parameters` is enabled and a non-standard
parameter name is encountered
This function parses a :rfc:`7239` HTTP header into a :class:`list`
of :class:`dict` instances with each instance containing the param
values. The list is ordered as received from left to right and the
parameter names are folded to lower case strings.
"""
result = []
for entry in parse_list(header_value):
param_tuples = _parse_parameter_list(entry.split(';'),
normalize_parameter_names=True,
normalize_parameter_values=False)
if only_standard_parameters:
for name, _ in param_tuples:
if name not in ('for', 'proto', 'by', 'host'):
raise errors.StrictHeaderParsingFailure('Forwarded',
header_value)
result.append(dict(param_tuples))
return result |
def parse_link(header_value, strict=True):
"""
Parse a HTTP Link header.
:param str header_value: the header value to parse
:param bool strict: set this to ``False`` to disable semantic
checking. Syntactical errors will still raise an exception.
Use this if you want to receive all parameters.
:return: a sequence of :class:`~ietfparse.datastructures.LinkHeader`
instances
:raises ietfparse.errors.MalformedLinkValue:
if the specified `header_value` cannot be parsed
"""
sanitized = _remove_comments(header_value)
links = []
def parse_links(buf):
"""
Find quoted parts, these are allowed to contain commas
however, it is much easier to parse if they do not so
replace them with \000. Since the NUL byte is not allowed
to be there, we can replace it with a comma later on.
A similar trick is performed on semicolons with \001.
:param str buf: The link buffer
:return:
"""
quoted = re.findall('"([^"]*)"', buf)
for segment in quoted:
left, match, right = buf.partition(segment)
match = match.replace(',', '\000')
match = match.replace(';', '\001')
buf = ''.join([left, match, right])
while buf:
matched = re.match(r'<(?P<link>[^>]*)>\s*(?P<params>.*)', buf)
if matched:
groups = matched.groupdict()
params, _, buf = groups['params'].partition(',')
params = params.replace('\000', ',') # undo comma hackery
if params and not params.startswith(';'):
raise errors.MalformedLinkValue(
'Param list missing opening semicolon ')
yield (groups['link'].strip(),
[p.replace('\001', ';').strip()
for p in params[1:].split(';') if p])
buf = buf.strip()
else:
raise errors.MalformedLinkValue('Malformed link header', buf)
for target, param_list in parse_links(sanitized):
parser = _helpers.ParameterParser(strict=strict)
for name, value in _parse_parameter_list(param_list):
parser.add_value(name, value)
links.append(datastructures.LinkHeader(target=target,
parameters=parser.values))
return links |
def parse_list(value):
"""
Parse a comma-separated list header.
:param str value: header value to split into elements
:return: list of header elements as strings
"""
segments = _QUOTED_SEGMENT_RE.findall(value)
for segment in segments:
left, match, right = value.partition(segment)
value = ''.join([left, match.replace(',', '\000'), right])
return [_dequote(x.strip()).replace('\000', ',')
for x in value.split(',')] |
def _parse_parameter_list(parameter_list,
normalized_parameter_values=_DEF_PARAM_VALUE,
normalize_parameter_names=False,
normalize_parameter_values=True):
"""
Parse a named parameter list in the "common" format.
:param parameter_list: sequence of string values to parse
:keyword bool normalize_parameter_names: if specified and *truthy*
then parameter names will be case-folded to lower case
:keyword bool normalize_parameter_values: if omitted or specified
as *truthy*, then parameter values are case-folded to lower case
:keyword bool normalized_parameter_values: alternate way to spell
``normalize_parameter_values`` -- this one is deprecated
:return: a sequence containing the name to value pairs
The parsed values are normalized according to the keyword parameters
and returned as :class:`tuple` of name to value pairs preserving the
ordering from `parameter_list`. The values will have quotes removed
if they were present.
"""
if normalized_parameter_values is not _DEF_PARAM_VALUE: # pragma: no cover
warnings.warn('normalized_parameter_values keyword to '
'_parse_parameter_list is deprecated, use '
'normalize_parameter_values instead',
DeprecationWarning)
normalize_parameter_values = normalized_parameter_values
parameters = []
for param in parameter_list:
param = param.strip()
if param:
name, value = param.split('=')
if normalize_parameter_names:
name = name.lower()
if normalize_parameter_values:
value = value.lower()
parameters.append((name, _dequote(value.strip())))
return parameters |
def _parse_qualified_list(value):
"""
Parse a header value, returning a sorted list of values based upon
the quality rules specified in https://tools.ietf.org/html/rfc7231 for
the Accept-* headers.
:param str value: The value to parse into a list
:rtype: list
"""
found_wildcard = False
values, rejected_values = [], []
parsed = parse_list(value)
default = float(len(parsed) + 1)
highest = default + 1.0
for raw_str in parsed:
charset, _, parameter_str = raw_str.replace(' ', '').partition(';')
if charset == '*':
found_wildcard = True
continue
params = dict(_parse_parameter_list(parameter_str.split(';')))
quality = float(params.pop('q', default))
if quality < 0.001:
rejected_values.append(charset)
elif quality == 1.0:
values.append((highest + default, charset))
else:
values.append((quality, charset))
default -= 1.0
parsed = [value[1] for value in sorted(values, reverse=True)]
if found_wildcard:
parsed.append('*')
parsed.extend(rejected_values)
return parsed |
def parse_link_header(header_value, strict=True):
"""
Parse a HTTP Link header.
:param str header_value: the header value to parse
:param bool strict: set this to ``False`` to disable semantic
checking. Syntactical errors will still raise an exception.
Use this if you want to receive all parameters.
:return: a sequence of :class:`~ietfparse.datastructures.LinkHeader`
instances
:raises ietfparse.errors.MalformedLinkValue:
if the specified `header_value` cannot be parsed
.. deprecated:: 1.3.0
Use :func:`~ietfparse.headers.parse_link` instead.
"""
warnings.warn("deprecated", DeprecationWarning)
return parse_link(header_value, strict) |
def resize_image_to_fit(image, dest_w, dest_h):
"""
Resize the image to fit inside dest rectangle. Resultant image may be smaller than target
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:return: Scaled image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
return scaled_image |
def resize_crop_image(image, dest_w, dest_h, pad_when_tall=False):
"""
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:return: Scaled and cropped image
"""
# Now we need to resize it
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right width and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
# Cropping values
left = 0
right = dest_w
top = (scaled_h - dest_h) / 2.0
bottom = top + dest_h
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
# Cropping values
left = (scaled_w - dest_w) / 2.0
right = left + dest_w
top = 0
bottom = dest_h
if pad_when_tall:
# Now, for images that are really tall and thin, we start to have issues as we only show a small section of them
# (i.e. nasonex). To deal with this we will resize and pad in this situation
if (bottom - top) < (scaled_h * 0.66):
log.info('Image would crop too much - returning padded image instead')
return resize_pad_image(image, dest_w, dest_h)
if src_w > dest_w or src_h > dest_h:
# This means we are shrinking the image which is ok!
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
cropped_image = scaled_image.crop((int(left), int(top), int(right), int(bottom)))
return cropped_image
elif scaled_w < src_w or scaled_h < src_h:
# Just crop is as we don't want to stretch the image
cropped_image = image.crop((int(left), int(top), int(right), int(bottom)))
return cropped_image
else:
return image |
def resize_pad_image(image, dest_w, dest_h, pad_with_transparent=False):
"""
Resize the image and pad to the correct aspect ratio.
:param image: PIL.Image
:param dest_w: Target width
:param dest_h: Target height
:param pad_with_transparent: If True, make additional padding transparent
:return: Scaled and padded image
"""
dest_w = float(dest_w)
dest_h = float(dest_h)
dest_ratio = dest_w / dest_h
# Calculate the apect ratio of the image
src_w = float(image.size[0])
src_h = float(image.size[1])
src_ratio = src_w / src_h
if src_ratio < dest_ratio:
# Image is tall and thin - we need to scale to the right height and then pad
scale = dest_h / src_h
scaled_h = dest_h
scaled_w = src_w * scale
offset = (int((dest_w - scaled_w) / 2), 0)
else:
# Image is short and wide - we need to scale to the right height and then crop
scale = dest_w / src_w
scaled_w = dest_w
scaled_h = src_h * scale
offset = (0, int((dest_h - scaled_h) / 2))
scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)
# Normally we will want to copy the source mode for the destination image, but in some
# cases the source image will use a Palletted (mode=='P') in which case we need to change
# the mode
mode = scaled_image.mode
log.debug('Padding image with mode: "{}"'.format(mode))
if pad_with_transparent and mode != 'RGBA':
old_mode = mode
mode = 'RGBA'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "{}" to "{}"'.format(old_mode, mode))
elif mode == 'P':
if 'transparency' in scaled_image.info:
mode = 'RGBA'
else:
mode = 'RGB'
scaled_image = scaled_image.convert(mode)
log.debug('Changed mode from "P" to "{}"'.format(mode))
if pad_with_transparent:
pad_colour = (255, 255, 255, 0)
else:
# Get the pixel colour for coordinate (0,0)
pixels = scaled_image.load()
pad_colour = pixels[0, 0]
padded_image = PIL.Image.new(mode, (int(dest_w), int(dest_h)), pad_colour)
padded_image.paste(scaled_image, offset)
return padded_image |
def resize_image_to_fit_width(image, dest_w):
"""
Resize and image to fit the passed in width, keeping the aspect ratio the same
:param image: PIL.Image
:param dest_w: The desired width
"""
scale_factor = dest_w / image.size[0]
dest_h = image.size[1] * scale_factor
scaled_image = image.resize((int(dest_w), int(dest_h)), PIL.Image.ANTIALIAS)
return scaled_image |
def add_value(self, name, value):
"""
Add a new value to the list.
:param str name: name of the value that is being parsed
:param str value: value that is being parsed
:raises ietfparse.errors.MalformedLinkValue:
if *strict mode* is enabled and a validation error
is detected
This method implements most of the validation mentioned in
sections 5.3 and 5.4 of :rfc:`5988`. The ``_rfc_values``
dictionary contains the appropriate values for the attributes
that get special handling. If *strict mode* is enabled, then
only values that are acceptable will be added to ``_values``.
"""
try:
if self._rfc_values[name] is None:
self._rfc_values[name] = value
elif self.strict:
if name in ('media', 'type'):
raise errors.MalformedLinkValue(
'More than one {} parameter present'.format(name))
return
except KeyError:
pass
if self.strict and name in ('title', 'title*'):
return
self._values.append((name, value)) |
def values(self):
"""
The name/value mapping that was parsed.
:returns: a sequence of name/value pairs.
"""
values = self._values[:]
if self.strict:
if self._rfc_values['title*']:
values.append(('title*', self._rfc_values['title*']))
if self._rfc_values['title']:
values.append(('title', self._rfc_values['title*']))
elif self._rfc_values['title']:
values.append(('title', self._rfc_values['title']))
return values |
def download(self, url):
"""
Downloads a MP4 or WebM file that is associated with the video at the URL passed.
:param str url: URL of the video to be downloaded
:return str: Filename of the file in local storage
"""
try:
yt = YouTube(url)
except RegexMatchError:
log.error(f"Cannot download file at {url}")
else:
stream = yt.streams.first()
log.info(f"Download for {stream.default_filename} has started")
start_time = time()
stream.download()
end_time = time()
log.info(f"Download for {stream.default_filename} has finished in {end_time - start_time} seconds")
return stream.default_filename |
def download(self, url):
"""
Downloads a MP3 file that is associated with the track at the URL passed.
:param str url: URL of the track to be downloaded
"""
try:
track = self.client.get('/resolve', url=url)
except HTTPError:
log.error(f"{url} is not a Soundcloud URL.")
return
r = requests.get(self.client.get(track.stream_url, allow_redirects=False).location, stream=True)
total_size = int(r.headers['content-length'])
chunk_size = 1000000
file_name = track.title + '.mp3'
with open(file_name, 'wb') as f:
for data in tqdm(r.iter_content(chunk_size), desc=track.title, total=total_size / chunk_size, unit='MB', file=sys.stdout):
f.write(data)
return file_name |
def connect(self):
"""Creates connection to the Google Drive API, sets the connection attribute to make requests, and creates the Music folder if it doesn't exist."""
SCOPES = 'https://www.googleapis.com/auth/drive'
store = file.Storage('drive_credentials.json')
creds = store.get()
if not creds or creds.invalid:
try:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
except InvalidClientSecretsError:
log.error('ERROR: Could not find client_secret.json in current directory, please obtain it from the API console.')
return
creds = tools.run_flow(flow, store)
self.connection = build('drive', 'v3', http=creds.authorize(Http()))
response = self.connection.files().list(q="name='Music' and mimeType='application/vnd.google-apps.folder' and trashed=false").execute()
try:
folder_id = response.get('files', [])[0]['id']
except IndexError:
log.warning('Music folder is missing. Creating it.')
folder_metadata = {'name': 'Music', 'mimeType': 'application/vnd.google-apps.folder'}
folder = self.connection.files().create(body=folder_metadata, fields='id').execute() |
def upload(self, file_name):
"""
Uploads the file associated with the file_name passed to Google Drive in the Music folder.
:param str file_name: Filename of the file to be uploaded
:return str: Original filename passed as an argument (in order for the worker to send it to the delete queue)
"""
response = self.connection.files().list(q="name='Music' and mimeType='application/vnd.google-apps.folder' and trashed=false").execute()
folder_id = response.get('files', [])[0]['id']
file_metadata = {'name': file_name, 'parents': [folder_id]}
media = MediaFileUpload(file_name, mimetype='audio/mpeg')
log.info(f"Upload for {file_name} has started")
start_time = time()
self.connection.files().create(body=file_metadata, media_body=media, fields='id').execute()
end_time = time()
log.info(f"Upload for {file_name} has finished in {end_time - start_time} seconds")
return file_name |
def connect(self):
"""Initializes the connection attribute with the path to the user home folder's Music folder, and creates it if it doesn't exist."""
if self.music_folder is None:
music_folder = os.path.join(os.path.expanduser('~'), 'Music')
if not os.path.exists(music_folder):
os.makedirs(music_folder)
self.music_folder = music_folder |
def upload(self, file_name):
"""
Moves the file associated with the file_name passed to the Music folder in the local storage.
:param str file_name: Filename of the file to be uploaded
"""
log.info(f"Upload for {file_name} has started")
start_time = time()
os.rename(file_name, os.path.join(self.music_folder, file_name))
end_time = time()
log.info(f"Upload for {file_name} has finished in {end_time - start_time} seconds") |
def write_run_parameters_to_file(self):
"""All of the class properties are written to a text file
Each property is on a new line with the key and value seperated with an equals sign '='
This is the mane planarrad properties file used by slabtool
"""
self.update_filenames()
lg.info('Writing Inputs to file : ' + self.project_file)
# First update the file names in case we changed the file values. the file name includes the file values
# self.updateFileNames()
f = open(self.project_file, 'w')
f.write('name = ' + self.project_file + '\n')
f.write('band_count = ' + str(len(self.wavelengths)) + '\n')
f.write('bs_name = ' + str(len(self.wavelengths)) + ' Bands (' + str(self.wavelengths[0]) + '-' + str(
self.wavelengths[len(self.wavelengths) - 1]) + ' nm) \n')
f.write('bs_code = ' + str(len(self.wavelengths)) + '\n')
f.write('band_centres_data = ')
f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n')
# f.write('band_widths_data = ')
# for i in range(0, len(self.wavelengths) - 1): # Find better way to do this!
# width = self.wavelengths[i + 1] - self.wavelengths[i]
# f.write(str(width))
# if i < len(self.wavelengths) - 2:
# f.write(',')
f.write('\n')
f.write('ds_name = ' + self.ds_name + '\n')
f.write('ds_code = ' + self.ds_code + '\n')
f.write('partition = ' + self.partition + '\n')
f.write('vn = ' + str(self.vn) + '\n')
f.write('hn = ' + str(self.hn) + '\n')
f.write('theta_points=')
f.write(",".join([str(theta) for theta in self.theta_points]) + '\n')
f.write('depth = ' + str(self.depth) + '\n')
f.write('sample_point_distance = ' + str(self.sample_point_distance) + '\n')
f.write('sample_point_delta_distance = ' + str(self.sample_point_delta_distance) + '\n')
f.write('\n')
f.write('sky_fp = ' + self.sky_file + '\n') # need to create these files from sky tool
f.write('water_surface_fp =' + self.water_surface_file)
f.write('\n')
f.write('atten_fp = ' + self.attenuation_file + '\n')
f.write('scat_fp = ' + self.scattering_file + '\n')
f.write('pf_fp = ' + self.phase_function_file + '\n')
f.write('bottom_reflec_diffuse_fp = ' + self.bottom_reflectance_file + '\n')
f.write('sky_type = ' + self.sky_type + '\n')
f.write('sky_azimuth = ' + str(self.sky_azimuth) + '\n')
f.write('sky_zenith = ' + str(self.sky_zenith) + '\n')
f.write('sky_C = ' + str(self.sky_c) + '\n')
f.write('sky_rdif = ' + str(self.sky_r_dif) + '\n')
f.write('iface_type = ' + self.iface_type + '\n')
f.write('iface_refrac_index_0 = ' + str(self.iface_0_ri) + '\n')
f.write('iface_refrac_index_1 = ' + str(self.iface_1_ri) + '\n')
# f.write('iop_atten_data = 1\n')
# f.write('iop_absorp_data = 0\n')
f.write('iop_type = ' + self.iop_type + '\n')
f.write('iop_backscatter_proportion_list = ' + str(self.iop_backscatter_proportion_list) + '\n')
f.write('bound_bottom_reflec_diffuse_data = ' + str(self.bound_bottom_reflec_diffuse_data) + '\n')
f.write('sky_sub_quad_count = ' + self.sky_sub_quad_count + '\n')
f.write('iface_sub_quad_count = ' + self.iface_sub_quad_count + '\n')
f.write('pf_sub_quad_count = ' + self.pf_sub_quad_count + '\n')
f.write('integrator = ' + self.integrator + '\n')
f.write('euler_steps_per_optical_depth = ' + str(self.euler_steps_per_optical_depth) + '\n')
f.write('midpoint_steps_per_optical_depth = ' + str(self.midpoint_steps_per_optical_depth) + '\n')
f.write('runga4_steps_per_optical_depth = ' + str(self.runga4_steps_per_optical_depth) + '\n')
f.write('runga4adap_min_steps_per_optical_depth = ' + str(self.runga4adap_min_steps_per_optical_depth) + '\n')
f.write('runga4adap_max_steps_per_optical_depth = ' + str(self.runga4adap_max_steps_per_optical_depth) + '\n')
f.write('runga4adap_min_error = ' + str(self.runga4adap_min_error) + '\n')
f.write('runga4adap_max_error = ' + str(self.runga4adap_max_error) + '\n')
f.write('\n')
f.write('Ld_b_image_save_fp = ' + os.path.join(self.output_path,
'image_Ld_b.ppm') + '\n') #todo update this in the constructor not here
f.write('Ld_b_image_sens_k = ' + str(self.ld_b_image_sens_k) + '\n')
f.write('\n')
f.write('Ld_b_save_fp = ' + os.path.join(self.output_path,
'Ld_b_data') + '\n')
f.write('\n')
f.write('report_save_fp = ' + self.report_file)
f.write('\n')
f.write('verbose = ' + str(self.verbose) + '\n')
f.close() |
def write_sky_params_to_file(self):
"""Writes the params to file that skytool_Free needs to generate the sky radiance distribution."""
inp_file = self.sky_file + '_params.txt'
lg.info('Writing Inputs to file : ' + inp_file)
f = open(inp_file, 'w')
f.write('verbose= ' + str(self.verbose) + '\n')
f.write('band_count= ' + str(self.num_bands) + '\n')
f.write('band_centres_data= ')
f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n')
f.write('partition= ' + self.partition + '\n')
f.write('vn= ' + str(self.vn) + '\n')
f.write('hn= ' + str(self.hn) + '\n')
f.write('rdif= ' + str(self.sky_r_dif) + '\n')
f.write('theta_points= ')
f.write(",".join([str(theta) for theta in self.theta_points]) + '\n')
f.write('type= ' + self.sky_type + '\n')
f.write('azimuth= ' + str(self.sky_azimuth) + '\n')
f.write('zenith= ' + str(self.sky_zenith) + '\n')
f.write('sky_save_fp= ' + inp_file.strip('_params.txt') + '\n')
f.write('sky_image_save_fp= ' + self.sky_file + '.ppm' + '\n')
f.write('sky_image_size= 256' + '\n')
if self.sky_type == 'hlideal':
f.write('C= ' + str(self.sky_c) + '\n')
f.write('rdif= ' + str(self.sky_r_dif) + '\n')
f.flush()
f.close() |
def write_surf_params_to_file(self):
"""Write the params to file that surftool_Free needs to generate the surface facets"""
inp_file = self.water_surface_file + '_params.txt'
lg.info('Writing Inputs to file : ' + inp_file)
if self.surf_state == 'flat': # this is the only one that currently works.
lg.info('Surface Type is :: flat')
f = open(inp_file, 'w')
f.write('verbose= ' + str(self.verbose) + '\n')
f.write('band_count= ' + str(self.num_bands) + '\n')
f.write('band_centres_data= ')
f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n')
f.write('partition= ' + self.partition + '\n')
f.write('vn= ' + str(self.vn) + '\n')
f.write('hn= ' + str(self.hn) + '\n')
f.write('theta_points= ')
f.write(",".join([str(theta) for theta in self.theta_points]) + '\n')
f.write('type= ' + self.iface_type + '\n')
f.write('refrac_index_0= ' + str(self.iface_0_ri) + '\n')
f.write('refrac_index_1= ' + str(self.iface_1_ri) + '\n')
f.write('wind_speed= ' + str(self.wind_speed) + '\n')
f.write('wind_direc= ' + str(self.wind_direc) + '\n')
f.write('crosswind_vertices= ' + str(self.crosswind_vertices) + '\n')
f.write('upwind_vertices= ' + str(self.upwind_vertices) + '\n')
f.write('surface_size= ' + str(self.surface_size) + '\n')
f.write('surface_radius=' + str(self.surface_radius) + '\n')
f.write('target_size= ' + str(self.target_size) + '\n')
f.write('rays_per_quad= ' + str(self.rays_per_quad) + '\n')
f.write('surface_count= ' + str(self.surface_count) + '\n')
f.write('azimuthally_average= ' + str(self.azimuthally_average) + '\n')
f.write('surface_save_fp= ' + inp_file.strip('_params.txt') + '\n')
f.flush()
f.close() |
def write_phase_params_to_file(self):
"""Write the params to file that surftool_Free needs to generate the surface facets"""
inp_file = os.path.join(os.path.join(self.input_path, 'phase_files'), self.phase_function_file) + '_params.txt'
lg.info('Writing Inputs to file : ' + inp_file)
if self.iop_type == 'isotropic' or 'isotropic_integ' or 'petzold' or 'pure_water ':
lg.info('Iop type is :: ' + self.iop_type)
f = open(inp_file, 'w')
f.write('verbose = ' + str(self.verbose) + '\n')
f.write('band_count = ' + str(self.num_bands) + '\n')
f.write('band_centres_data = ')
f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n')
f.write('partition = ' + self.partition + '\n')
f.write('vn = ' + str(self.vn) + '\n')
f.write('hn = ' + str(self.hn) + '\n')
f.write('theta_points = ')
f.write(",".join([str(theta) for theta in self.theta_points]) + '\n')
f.write('type = ' + self.iop_type + '\n')
f.write('phase_func_save_fp = ' + inp_file.strip('_params.txt') + '\n')
f.flush()
f.close() |
def update_filenames(self):
"""Does nothing currently. May not need this method"""
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'),
'sky_' + self.sky_state + '_z' + str(
self.sky_zenith) + '_a' + str(
self.sky_azimuth) + '_' + str(
self.num_bands) + '_' + self.ds_code)) |
def build_bbp(self, x, y, wave_const=550):
"""
Builds the particle backscattering function :math:`X(\\frac{550}{\\lambda})^Y`
:param x: function coefficient
:param y: order of the power function
:param wave_const: wave constant default 550 (nm)
:returns null:
"""
lg.info('Building b_bp spectra')
self.b_bp = x * (wave_const / self.wavelengths) ** y |
def build_a_cdom(self, g, s, wave_const=400):
"""
Builds the CDOM absorption function :: :math:`G \exp (-S(\lambda - 400))`
:param g: function coefficient
:param s: slope factor
:param wave_const: wave constant default = 400 (nm)
:returns null:
"""
lg.info('building CDOM absorption')
self.a_cdom = g * scipy.exp(-s * (self.wavelengths - wave_const)) |
def read_aphi_from_file(self, file_name):
"""Read the phytoplankton absorption file from a csv formatted file
:param file_name: filename and path of the csv file
"""
lg.info('Reading ahpi absorption')
try:
self.a_phi = self._read_iop_from_file(file_name)
except:
lg.exception('Problem reading file :: ' + file_name)
self.a_phi = -1 |
def scale_aphi(self, scale_parameter):
"""Scale the spectra by multiplying by linear scaling factor
:param scale_parameter: Linear scaling factor
"""
lg.info('Scaling a_phi by :: ' + str(scale_parameter))
try:
self.a_phi = self.a_phi * scale_parameter
except:
lg.exception("Can't scale a_phi, check that it has been defined ") |
def read_pure_water_absorption_from_file(self, file_name):
"""Read the pure water absorption from a csv formatted file
:param file_name: filename and path of the csv file
"""
lg.info('Reading water absorption from file')
try:
self.a_water = self._read_iop_from_file(file_name)
except:
lg.exception('Problem reading file :: ' + file_name) |
def read_pure_water_scattering_from_file(self, file_name):
"""Read the pure water scattering from a csv formatted file
:param file_name: filename and path of the csv file
"""
lg.info('Reading water scattering from file')
try:
self.b_water = self._read_iop_from_file(file_name)
except:
lg.exception('Problem reading file :: ' + file_name) |
def _read_iop_from_file(self, file_name):
"""
Generic IOP reader that interpolates the iop to the common wavelengths defined in the constructor
:param file_name: filename and path of the csv file
:returns interpolated iop
"""
lg.info('Reading :: ' + file_name + ' :: and interpolating to ' + str(self.wavelengths))
if os.path.isfile(file_name):
iop_reader = csv.reader(open(file_name), delimiter=',', quotechar='"')
wave = iop_reader.next()
iop = iop_reader.next()
else:
lg.exception('Problem reading file :: ' + file_name)
raise IOError
try:
wave = map(float, wave)
iop = map(float, iop)
return scipy.interp(self.wavelengths, wave, iop)
except IOError:
lg.exception('Error interpolating IOP to common wavelength')
return -1 |
def _write_iop_to_file(self, iop, file_name):
"""Generic iop file writer
:param iop numpy array to write to file
:param file_name the file and path to write the IOP to
"""
lg.info('Writing :: ' + file_name)
f = open(file_name, 'w')
for i in scipy.nditer(iop):
f.write(str(i) + '\n') |
def build_b(self, scattering_fraction=0.01833):
"""Calculates the total scattering from back-scattering
:param scattering_fraction: the fraction of back-scattering to total scattering default = 0.01833
b = ( bb[sea water] + bb[p] ) /0.01833
"""
lg.info('Building b with scattering fraction of :: ' + str(scattering_fraction))
self.b = (self.b_b + self.b_water / 2.0) / scattering_fraction |
def build_a(self):
"""Calculates the total absorption from water, phytoplankton and CDOM
a = awater + acdom + aphi
"""
lg.info('Building total absorption')
self.a = self.a_water + self.a_cdom + self.a_phi |
def build_c(self):
"""Calculates the total attenuation from the total absorption and total scattering
c = a + b
"""
lg.info('Building total attenuation C')
self.c = self.a + self.b |
def build_all_iop(self):
"""Meta method that calls all of the build methods in the correct order
self.build_a()
self.build_bb()
self.build_b()
self.build_c()
"""
lg.info('Building all b and c from IOPs')
self.build_a()
self.build_bb()
self.build_b()
self.build_c() |
def run(self):
"""Distributes the work across the CPUs. It actually uses _run()"""
done = False
dir_list = []
tic = time.clock()
lg.info('Starting batch run at :: ' + str(tic))
if self.run_params.num_cpus == -1: # user hasn't set a throttle
self.run_params.num_cpus = os.sysconf("SC_NPROCESSORS_ONLN")
lg.info('Found ' + str(self.run_params.num_cpus) + ' CPUs')
# --------------------------------------------------#
# COUNT THE NUMBER OF DIRECTORIES TO ITERATE THROUGH
# --------------------------------------------------#
tmp_dir_list = os.listdir(self.batch_output)
for direc in tmp_dir_list:
dir_list.append(os.path.join(self.batch_output, direc))
num_dirs = len(dir_list)
lg.info('Found ' + str(num_dirs) + ' directories to process in ' + self.batch_output)
sub = scipy.floor(num_dirs / self.run_params.num_cpus)
remainder = num_dirs - (sub * self.run_params.num_cpus)
if remainder > 0:
lg.warning('Number of variations not evenly divisible by number of CPUs')
lg.warning('This is not a problem, last block will not use all available CPUs')
lg.warning('The remainder is :: ' + str(remainder))
while not done:
for l in range(0, int(sub)):
lg.info('Starting processing block of :: ' + str(self.run_params.num_cpus) + ' processes')
for m in range(0, self.run_params.num_cpus):
#row = (m * sub) + l
_dir = dir_list.pop()
#--------------------------------------------------#
# CHECK TO SEE IF REPORT HAS BEEN GENERATED AND DON'T
# BOTHER RUNNING AGAIN IF THEY DO EXIST
#--------------------------------------------------#
report_dir, report_file_name = os.path.split(self.run_params.report_file)
lg.debug(report_file_name)
lg.debug(os.path.join(_dir, report_file_name))
try:
rep_size = os.path.getsize(os.path.join(_dir, report_file_name.strip('\n')))
lg.debug('report size is :: ' + str(rep_size))
except:
rep_size = 0
if rep_size < 1.0: # TODO this is a spoof!
lg.info('No report file found, running process')
p = Process(target=self._run, args=(_dir,))
else:
lg.warning('Report file found :: ' + os.path.join(_dir, report_file_name.strip(
'\n')) + ' not redoing run ')
p = Process(target=self._dummy, args=(_dir,))
# !! for testing
#p = Process(target=self._dummy, args=(_dir,))
p.start()
lg.info('Starting Process :: Process ID :: ' + str(p.pid))
p.join()
self.run_params.num_cpus = remainder
remainder = 0
lg.info('Processing remainder')
sub = 1
if remainder == 0:
done = True
toc = time.clock() # this isn't working
lg.info('Ending batch run at :: ' + str(toc))
timeTaken = toc - tic
lg.info('Time taken ::' + str(timeTaken)) |
def _run(self, run_dir):
"""Distributed process"""
# Check to see if the required run_params files exist, if they dont use the tools to generate them
# --------------------------------------------------#
# HERE WE RECREATE OUR RUN_PARAMS OBJECT FROM
# THE RUN FILE WE WROTE TO DISK EARLIER
# --------------------------------------------------#
file_tools = FileTools()
run_dict = file_tools.read_param_file_to_dict(os.path.join(run_dir, 'batch.txt'))
#print(run_dict['band_centres_data'])
#self.run_params.wavelengths = run_dict['wavelengths']
#run_params = RunParameters()
#run_params = file_tools.dict_to_object(run_params, run_dict)
#------------------------------------------------#
# Sky inputs
#------------------------------------------------#
#lg.debug(run_dict.keys())
#self.run_params.update_filenames()
#lg.debug('!!!!!!!!!' + run_dict['sky_fp'])
if os.path.isfile(run_dict['sky_fp']):
sky_file_exists = True
lg.info('Found sky_tool generated file' + run_dict['sky_fp'])
else:
lg.info('No sky_tool generated file, generating one')
#try:
inp_file = run_dict['sky_fp'] + '_params.txt'
#self.run_params.sky_file = inp_file
self.run_params.write_sky_params_to_file()
#if not os.path.isfile(inp_file):
# lg.error(inp_file + ' : is not a valid parameter file')
lg.debug('Runing skytool' + os.path.join(self.run_params.exec_path, 'skytool_free') + '#')
lg.debug(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
os.system(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
#except OSError:
# lg.exception('Cannot execute PlannarRad, cannot find executable file to skytool_free')
#------------------------------------------------#
# Water surface inputs
#------------------------------------------------#
if os.path.isfile(run_dict['water_surface_fp']):
surface_file_exists = True
lg.info('Found surf_tool generated file' + run_dict['water_surface_fp'])
else:
lg.info('No surf_tool generated file, generating one')
try:
inp_file = run_dict['water_surface_fp'] + '_params.txt'
self.run_params.write_surf_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'surftool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to surftool_free')
#------------------------------------------------#
# Phase functions inputs
#------------------------------------------------#
if os.path.isfile(run_dict['pf_fp']):
phase_file_exists = True
lg.info('Found phase_tool generated file' + run_dict['pf_fp'])
else:
lg.info('No sky_tool generated file, generating one')
try:
inp_file = run_dict['pf_fp'] + '_params.txt'
self.run_params.write_phase_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'phasetool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to phasetool_free')
#------------------------------------------------#
# slabtool inputs [Run planarrad]
#------------------------------------------------#
inp_file = run_dict['name']
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid batch file')
try:
os.system(os.path.join(self.run_params.exec_path, 'slabtool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to slabtool_free') |
def generate_directories(self, overwrite=False):
"""For all possible combinations of 'batchable' parameters. create a unique directory to story outputs
Each directory name is unique and contains the run parameters in the directory name
:param overwrite: If set to True will over write all files default = False
"""
if not os.path.exists(self.batch_output):
try:
lg.info('Creating batch project directory')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
elif os.path.exists(self.batch_output) and overwrite == True:
try:
lg.info('Creating batch project directory')
lg.warning('Overwriting existing directories')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
# --------------------------------------------------#
# GENERATE ALL THE IOPS FROM BIOP
# --------------------------------------------------#
#--------------------------------------------------#
# WRITE EACH BIOP TO CSV FILE INTO THE INPUT
# DIRECTORY IF IT DOESNT EXIST
#--------------------------------------------------#
#--------------------------------------------------#
# GENERATE A LIST OF ALL COMBINATIONS OF BIOPS
#--------------------------------------------------#
#--------------------------------------------------#
# WRITE THE DIRECTORIES FOR EACH BIOP AND NAME APPROPRIATELY
# DON'T OVERWRITE IF THEY EXIST ALREADY
#--------------------------------------------------#
self.bio_params.read_pure_water_absorption_from_file(
self.run_params.pure_water_absorption_file)
self.bio_params.read_pure_water_scattering_from_file(
self.run_params.pure_water_scattering_file)
self.bio_params.read_aphi_from_file(self.run_params.phytoplankton_absorption_file)
for saa in self.saa_list:
# update the saa in the run file & the todo filename!
self.run_params.sky_aziumth = saa
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(self.run_params.sky_zenith) + '_a' + str(
self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for sza in self.sza_list:
# update the saz in the run file
self.run_params.sky_zenith = sza
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(
self.run_params.sky_zenith) + '_a' + str(self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for p in self.p_list:
for x in self.x_list:
for y in self.y_list:
for g in self.g_list:
for s in self.s_list:
for z in self.z_list:
file_name = 'SAA' + str(saa) + '_SZA' + str(sza) + '_P' + str(p) + '_X' + str(
x) + '_Y' + str(y) + '_G' + str(g) + '_S' + str(s) + '_Z' + str(z)
dir_name = os.path.join(self.batch_output, file_name)
self.run_params.output_path = dir_name
#--------------------------------------------------#
# UPDATE THE IOP PARAMETERS FOR THE RUN FILE
#--------------------------------------------------#
self.run_params.sky_azimuth = saa
self.run_params.sky_zenith = sza
self.run_params.depth = z
self.bio_params.build_bbp(x, y) # todo add wave const as a kwarg
self.bio_params.build_a_cdom(g, s)
# Need to re-read the file as it was scaled in a the other run!
self.bio_params.read_aphi_from_file(
self.run_params.phytoplankton_absorption_file)
self.bio_params.scale_aphi(p)
self.bio_params.build_all_iop()
self.run_params.scattering_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'b_' + file_name)
self.bio_params.write_b_to_file(self.run_params.scattering_file)
self.run_params.attenuation_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'c_' + file_name)
self.bio_params.write_c_to_file(self.run_params.attenuation_file)
self.run_params.project_file = os.path.join(dir_name, 'batch.txt')
self.run_params.report_file = os.path.join(dir_name, 'report.txt')
self.run_params.write_sky_params_to_file()
self.run_params.write_surf_params_to_file()
self.run_params.write_phase_params_to_file()
if not os.path.exists(dir_name):
try:
lg.info('Creating run directory')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory')
elif os.path.exists(dir_name) and overwrite == True:
try:
lg.info('Creating run directory')
lg.warning('Overwriting existing directories')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory') |
def batch_parameters(self, saa, sza, p, x, y, g, s, z):
"""Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m)"""
self.saa_list = saa
self.sza_list = sza
self.p_list = p
self.x_list = x
self.y_list = y
self.g_list = g
self.s_list = s
self.z_list = z |
def read_param_file_to_dict(file_name):
"""Loads a text file to a python dictionary using '=' as the delimiter
:param file_name: the name and path of the text file
"""
data = loadtxt(file_name, delimiter='=', dtype=scipy.string0)
data_dict = dict(data)
for key in data_dict.keys():
data_dict[key] = data_dict[key].strip()
data_dict[key.strip()] = data_dict[key]
del data_dict[key]
return data_dict |
def string_to_float_list(string_var):
"""Pull comma separated string values out of a text file and converts them to float list"""
try:
return [float(s) for s in string_var.strip('[').strip(']').split(', ')]
except:
return [float(s) for s in string_var.strip('[').strip(']').split(',')] |
def read_pr_report(self, filename):
"""Reads in a PlanarRad generated report
Saves the single line reported parameters as a python dictionary
:param filename: The name and path of the PlanarRad generated file
:returns self.data_dictionary: python dictionary with the key and values from the report
"""
done = False
f = open(filename)
while f:
#for line in open(filename):
line = f.readline()
if not line:
done = True
break
if "# Quad solid angle mean point theta table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_theta'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "# Quad solid angle mean point phi table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_phi'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "L_w band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_w_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
if "L_it band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_it_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
return self.data_dictionary |
def calc_directional_aop(self, report, parameter, parameter_dir):
"""
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return:
"""
lg.debug('calculating the directional ' + parameter)
tmp_zenith = []
param_zenith = parameter_dir.split(':')[0]
param_azimuth = parameter_dir.split(':')[1]
# --------------------------------------------------#
# find the mean directions values
# --------------------------------------------------#
for i_iter in range(0, int(report['vn'][1])):
tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain!
tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1]
zenith = scipy.asarray(tmp_zenith, dtype=float)
azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',')
# --------------------------------------------------#
# now grab the min and max index of the closest match
# --------------------------------------------------#
#min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin()
from scipy import interpolate
lw = scipy.zeros(int(report['band_count'][1]))
for j_iter in range(0, int(report['band_count'][1])):
if parameter == 'rrs':
lg.info('Calculating directional rrs')
tmp_lw = report['L_w_band_' + str(j_iter + 1)]
elif parameter == 'Rrs':
lg.info('Calculating directional Rrs')
print(report.keys())
tmp_lw = report['L_it_band_' + str(j_iter + 1)]
lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1])))
# for the fist and last line we have to replicate the top and bottom circle
for i_iter in range(0, int(report['hn'][1])):
lw_scal[0, i_iter] = tmp_lw[0].split(',')[0]
lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0]
for i_iter in range(1, int(report['vn'][1]) - 1):
lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float)
# to do, make an array of zeros and loop over each list an apply to eah line. bruteforce
f1 = interpolate.interp2d(zenith, azimuth, lw_scal)
lw[j_iter] = f1(float(param_zenith), float(param_azimuth))
# ----
# Now we finally have L_w we calculate the rrs
# ----
if parameter == 'rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val
elif parameter == 'Rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val
# make rrs a string so it can be written to file.
rrs = ",".join(map(str, tmp_rrs))
return " ," + rrs |
def write_batch_report(self, input_directory, parameter):
"""
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
"""
# Check to see if there is an @ in the parameter. If there is split
if '@' in parameter:
parameter_dir = parameter.split('@')[1]
parameter = parameter.split('@')[0]
# --------------------------------------------------#
# we put the batch report one directory up in the tree
# --------------------------------------------------#
batch_report_file = 'batch_report.txt'
batch_report_file = os.path.join(input_directory, batch_report_file)
f = open(batch_report_file, 'w')
w = csv.writer(f, delimiter=',')
#--------------------------------------------------#
# Read in the report from planarrad and pull out the parameter that we want
#--------------------------------------------------#
dir_list = os.listdir(input_directory)
#--------------------------------------------------#
# Sometimes the report isn't generated for some reason.
# this checks to see if the first file in the dir list exists and skips if it doesn't
#--------------------------------------------------#
read_first_file = True
i_iter = 0
while read_first_file:
if os.path.exists(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt'))):
report = self.read_pr_report(
os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt')))
read_first_file = False
else:
lg.warning('Missing report file in' + dir_list[i_iter])
i_iter += 1
try:
wave_val = report['band_centres']
param_val = report[parameter]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
wave_str = str(wave_val)
wave_str = wave_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ', '').replace(' -,',
'').replace(
',', '\",\"')
f.write(
'\"Sun Azimuth (deg)\",\"Sun Zenith (deg)\",\"Phytoplankton\",\"Scattering X\",\"Scattering Y\",\"CDOM G\",\"CDOM S\",\"Depth (m)\",\"#wave length (nm) ->\",\"' + wave_str + '\"\n')
#--------------------------------------------------#
# Get all of the directories under the batch directories
# The directory names have the IOP parameters in the names
#--------------------------------------------------#
for dir in dir_list:
if os.path.isdir(os.path.abspath(os.path.join(input_directory, dir))):
tmp_str_list = dir.split('_')
#for tmp_str in tmp_str_list:
saa = ''.join(c for c in tmp_str_list[0] if not c.isalpha())
sza = ''.join(c for c in tmp_str_list[1] if not c.isalpha())
p = ''.join(c for c in tmp_str_list[2] if not c.isalpha())
x = ''.join(c for c in tmp_str_list[3] if not c.isalpha())
y = ''.join(c for c in tmp_str_list[4] if not c.isalpha())
g = ''.join(c for c in tmp_str_list[5] if not c.isalpha())
s = ''.join(c for c in tmp_str_list[6] if not c.isalpha())
z = ''.join(c for c in tmp_str_list[7] if not c.isalpha())
#--------------------------------------------------#
# Write the report header and then the values above in the columns
#--------------------------------------------------#
try:
f.write(saa + ',' + sza + ',' + p + ',' + x + ',' + y + ',' + g + ',' + s + ',' + z + ',')
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir, 'report.txt')))
try:
# check to see if the parameter has the @ parameter. If it does pass to directional calculator
if 'parameter_dir' in locals():
param_val = self.calc_directional_aop(report, parameter, parameter_dir)
else:
param_val = report[parameter]
param_str = str(param_val)
param_str = param_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ',
'')
f.write(param_str + '\n')
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
except:
lg.warning('Cannot find a report in directory :: ' + dir) |
def write_batch_to_file(self, filename='batch_test_default.txt'):
"""
This function creates a new file if he doesn't exist already, moves it to 'inputs/batch_file' folder
and writes data and comments associated to them.
Inputs: saa_values : <list> Sun Azimuth Angle (deg)
sza_values : <list> Sun Zenith Angle (deg)
batch_name : Name of the batch file.
p_values : <list> Phytoplankton linear scaling factor
x_value : <list> Scattering scaling factor
y_value : <list> Scattering slope factor
g_value : <list> CDOM absorption scaling factor
s_value : <list> CDOM absorption slope factor
s_value : <list> depth (m)
waveL_values : Wavelength values used to test.
verbose_value : Number concerning if the software explains a lot or not what it does.
phytoplankton_path : The path to the file containing phytoplankton data.
bottom_path : The path to the file containing bottom data.
nb_cpu : The number of CPU we want to allowed to the software.
executive_path : The path to the file where there is executive files using by PlanarRad.
report_parameter :
"""
#---------------------------------------------------------#
# The following is the file which is passed to planarradpy.
#---------------------------------------------------------#
self.batch_file = open(str(filename), 'w')
self.batch_file.write("""#----------------------------------------#
# Name of the batch run
#----------------------------------------#
batch_name = """)
self.batch_file.write(str(self.batch_name))
self.batch_file.write("""
#----------------------------------------#
# Bio-optical parameters list
#----------------------------------------#
saa_list = """)
self.batch_file.write(str(self.saa_values))
self.batch_file.write("""
sza_list = """)
self.batch_file.write(str(self.sza_values))
self.batch_file.write("""
p_list = """)
self.batch_file.write(str(self.p_values))
self.batch_file.write("""
x_list = """)
self.batch_file.write(str(self.x_value))
self.batch_file.write("""
y_list = """)
self.batch_file.write(str(self.y_value))
self.batch_file.write("""
g_list = """)
self.batch_file.write(str(self.g_value))
self.batch_file.write("""
s_list = """)
self.batch_file.write(str(self.s_value))
self.batch_file.write("""
z_list = """)
self.batch_file.write(str(self.z_value))
self.batch_file.write("""
#----------------------------------------#
# Wavelengths
# All IOPs are interpolated to these
# Wavelengths
#----------------------------------------#
wavelengths = """)
self.batch_file.write(str(self.wavelength_values))
self.batch_file.write("""
#----------------------------------------#
# Number of CPUs
# -1 means query the number of CPUs
#----------------------------------------#
num_cpus = """)
self.batch_file.write(str(self.nb_cpu))
self.batch_file.write("""
#----------------------------------------#
# Path of Planarrad
#----------------------------------------#
exec_path = """)
self.batch_file.write(self.executive_path)
self.batch_file.write("""
#----------------------------------------#
# Logging level
#----------------------------------------#
verbose = """)
self.batch_file.write(str(self.verbose_value))
self.batch_file.write("""
#----------------------------------------#
# File paths
# Using absolute paths
#----------------------------------------#
phytoplankton_absorption_file =""")
self.batch_file.write(self.phytoplankton_path)
self.batch_file.write("""
bottom_reflectance_file = """)
self.batch_file.write(self.bottom_path)
self.batch_file.write("""
#----------------------------------------#
# Set the parameter to report
#----------------------------------------#
report_parameter = """)
self.batch_file.write(str(self.report_parameter_value))
self.batch_file.write("""
""")
self.batch_file.close()
#-------------------------------------------------------------------#
# The following is the action to move the file to the good directory.
#-------------------------------------------------------------------#
src = './' + filename
dst = './inputs/batch_files'
os.system("mv" + " " + src + " " + dst) |
def update_fields(self, x_data, y_data, num_plot):
"""
This function will update data that we need to display curves, from "data_processing" from "gui_mainLayout"
Inputs : x_data : An array with wavelengths.
y_data : An array with curve's data.
num_plot : The line, curve to plot.
"""
self.x_data = x_data
self.y_data = y_data
self.num_plot = num_plot |
def display_graphic(self, flag_curves, ui):
"""
This function plots results of a file into the canvas.
Inputs : flag_curves : A boolean to know with we have to plot all curves or not.
ui : The main_Window.
"""
ui.graphic_widget.canvas.picture.clear()
x = scipy.linspace(self.x_data[0], self.x_data[-1], len(self.x_data)) #X-axis
curve_wanted = 0 #Iterator on lines of y_data
for curve in self.y_data:
if flag_curves:
if curve_wanted == self.num_plot: #If the iterator is equal of the slider's value, this curve is different
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))),
linewidth=4)
else:
ui.graphic_widget.canvas.picture.plot(x, curve, '0.75')
else:
if curve_wanted == self.num_plot:
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))))
curve_wanted += 1
ui.graphic_widget.canvas.picture.set_title('Rrs.csv')
ui.graphic_widget.canvas.picture.set_xlabel('Wavelength (${nm}$)')
ui.graphic_widget.canvas.picture.set_ylabel('Reflectance ($Sr^{-1}$)')
self.legend = ui.graphic_widget.canvas.picture.legend() #Display in a legend curves's labels.
ui.graphic_widget.canvas.picture.legend(bbox_to_anchor=(1.1, 1.05))
ui.graphic_widget.canvas.draw() |
def set_handler(self, signals, handler=signal.SIG_DFL):
""" Takes a list of signals and sets a handler for them """
for sig in signals:
self.log.debug("Creating handler for signal: {0}".format(sig))
signal.signal(sig, handler) |
def pseudo_handler(self, signum, frame):
""" Pseudo handler placeholder while signal is beind processed """
self.log.warn("Received sigal {0} but system is already busy processing a previous signal, current frame: {1}".format(signum, str(frame))) |
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise |
def pause(self, signum, seconds=0, callback_function=None):
"""
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
"""
if callback_function is None:
callback_function = self.default_handler
if seconds > 0:
self.log.info("Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT".format(seconds))
signal.signal(signal.SIGALRM, callback_function)
signal.alarm(seconds)
else:
self.log.info('Signal handler pausing until it receives SIGALRM or SIGCONT')
signal.signal(signal.SIGCONT, callback_function)
signal.pause()
self.log.info('Signal handler resuming from pause')
if signum == signal.SIGALRM:
return True
else:
return False |
def abort(self, signum):
""" Run all abort tasks, then all exit tasks, then exit with error
return status"""
self.log.info('Signal handler received abort request')
self._abort(signum)
self._exit(signum)
os._exit(1) |
def status(self, signum):
""" Run all status tasks, then run all tasks in the resume queue"""
self.log.debug('Signal handler got status signal')
new_status_callbacks = []
for status_call in self.status_callbacks:
# If callback is non persistent we remove it
try:
self.log.debug("Calling {0}({1},{2})".format(status_call['function'].__name__, status_call['args'], status_call['kwargs']))
except AttributeError:
self.log.debug("Calling unbound function/method {0}".format(str(status_call)))
apply(status_call['function'], status_call['args'], status_call['kwargs'])
if status_call['persistent']:
new_status_callbacks.append(status_call)
self.status_callbacks = new_status_callbacks
self._resume(signum) |
def _unreg_event(self, event_list, event):
""" Tries to remove a registered event without triggering it """
try:
self.log.debug("Removing event {0}({1},{2})".format(event['function'].__name__, event['args'], event['kwargs']))
except AttributeError:
self.log.debug("Removing event {0}".format(str(event)))
try:
event_list.remove(event)
except ValueError:
try:
self.log.warn("Unable to remove event {0}({1},{2}) , not found in list: {3}".format(event['function'].__name__, event['args'], event['kwargs'], event_list))
except AttributeError:
self.log.debug("Unable to remove event {0}".format(str(event)))
raise KeyError('Unable to unregister the specified event from the signals specified') |
def reg_on_exit(self, callable_object, *args, **kwargs):
""" Register a function/method to be called on program exit,
will get executed regardless of successs/failure of the program running """
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'exit', persistent, *args, **kwargs)
self.exit_callbacks.append(event)
return event |
def reg_on_abort(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when execution is aborted"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'abort', persistent, *args, **kwargs)
self.abort_callbacks.append(event)
return event |
def reg_on_status(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when a user or another
program asks for an update, when status is done it will start running
any tasks registered with the reg_on_resume method"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'status', persistent, *args, **kwargs)
self.status_callbacks.append(event)
return event |
def reg_on_resume(self, callable_object, *args, **kwargs):
""" Register a function/method to be called if the system needs to
resume a previously halted or paused execution, including status
requests."""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'resume', persistent, *args, **kwargs)
self.resume_callbacks.append(event)
return event |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.