code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -----------------------------------------------------------------------------
#
# This file is the copyrighted property of Tableau Software and is protected
# by registered patents and other applicable U.S. and international laws and
# regulations.
#
# Unlicensed use of the contents of this file is prohibited. Please refer to
# the NOTICES.txt file for further details.
#
# -----------------------------------------------------------------------------
from ctypes import *
from . import Libs
class TableauException(Exception):
def __init__(self, errorCode, message):
Exception.__init__(self, message)
self.errorCode = errorCode
self.message = message
def __str__(self):
return 'TableauException ({0}): {1}'.format(self.errorCode, self.message)
def GetLastErrorMessage():
common_lib = Libs.LoadLibs().load_lib('Common')
common_lib.TabGetLastErrorMessage.argtypes = []
common_lib.TabGetLastErrorMessage.restype = c_wchar_p
return wstring_at(common_lib.TabGetLastErrorMessage()) | corystreet/pyOdbcToTde | Static/TableauSDK-9100.15.0828.1711/tableausdk/Exceptions.py | Python | gpl-2.0 | 1,040 |
"""Layout provider for an unsupported directory layout."""
from __future__ import annotations
import typing as t
from . import (
ContentLayout,
LayoutProvider,
)
class UnsupportedLayout(LayoutProvider):
"""Layout provider for an unsupported directory layout."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path): # type: (str) -> bool
"""Return True if the given path is a content root for this provider."""
return False
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, p) for p in self.PLUGIN_TYPES)
return ContentLayout(root,
paths,
plugin_paths=plugin_paths,
collection=None,
test_path='',
results_path='',
sanity_path='',
sanity_messages=None,
integration_path='',
integration_targets_path='',
integration_vars_path='',
integration_messages=None,
unit_path='',
unit_module_path='',
unit_module_utils_path='',
unit_messages=None,
unsupported=True,
)
| mattclay/ansible | test/lib/ansible_test/_internal/provider/layout/unsupported.py | Python | gpl-3.0 | 1,561 |
"""Add datetime column to vote
Revision ID: 9b53dd8832b6
Revises: 5debb667e48b
Create Date: 2018-02-05 11:27:10.742269
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9b53dd8832b6'
down_revision = '5debb667e48b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('votes',
sa.Column('time', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False))
def downgrade():
op.drop_column('votes', 'time')
| SCUEvals/scuevals-api | db/alembic/versions/20180205112710_add_datetime_column_to_vote.py | Python | agpl-3.0 | 524 |
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes that build containers.
"""
from .NodeBases import (
ExpressionChildrenHavingBase,
SideEffectsFromChildrenMixin
)
from .NodeMakingHelpers import (
getComputationResult,
makeStatementOnlyNodesFromExpressions,
wrapExpressionWithSideEffects
)
from .shapes.BuiltinTypeShapes import (
ShapeTypeList,
ShapeTypeSet,
ShapeTypeTuple
)
class ExpressionMakeSequenceBase(SideEffectsFromChildrenMixin,
ExpressionChildrenHavingBase):
named_children = (
"elements",
)
def __init__(self, sequence_kind, elements, source_ref):
assert sequence_kind in ("TUPLE", "LIST", "SET"), sequence_kind
for element in elements:
assert element.isExpression(), element
self.sequence_kind = sequence_kind.lower()
ExpressionChildrenHavingBase.__init__(
self,
values = {
"elements" : tuple(elements),
},
source_ref = source_ref
)
def isExpressionMakeSequence(self):
return True
def getSequenceKind(self):
return self.sequence_kind
getElements = ExpressionChildrenHavingBase.childGetter("elements")
def getSimulator(self):
# Abstract method, pylint: disable=R0201
return None
def computeExpression(self, trace_collection):
elements = self.getElements()
for count, element in enumerate(elements):
if element.willRaiseException(BaseException):
result = wrapExpressionWithSideEffects(
side_effects = elements[:count],
new_node = element,
old_node = self
)
return result, "new_raise", "Sequence creation raises exception"
# TODO: CompileTimeConstant should be good enough.
for element in elements:
if not element.isExpressionConstantRef():
return self, None, None
simulator = self.getSimulator()
assert simulator is not None
# The simulator is in fact callable if not None, pylint: disable=E1102
return getComputationResult(
node = self,
computation = lambda : simulator(
element.getConstant()
for element in
self.getElements()
),
description = "%s with constant arguments." % simulator.__name__.title()
)
def mayHaveSideEffectsBool(self):
return False
def isKnownToBeIterable(self, count):
return count is None or count == len(self.getElements())
def isKnownToBeIterableAtMin(self, count):
return count <= len(self.getElements())
def getIterationValue(self, count):
return self.getElements()[count]
def getIterationValueRange(self, start, stop):
return self.getElements()[start:stop]
@staticmethod
def canPredictIterationValues():
return True
def getIterationValues(self):
return self.getElements()
def getTruthValue(self):
return self.getIterationLength() > 0
def mayRaiseException(self, exception_type):
for element in self.getElements():
if element.mayRaiseException(exception_type):
return True
return False
def mayBeNone(self):
return False
def computeExpressionDrop(self, statement, trace_collection):
result = makeStatementOnlyNodesFromExpressions(
expressions = self.getElements()
)
return result, "new_statements", """\
Removed sequence creation for unused sequence."""
class ExpressionMakeTuple(ExpressionMakeSequenceBase):
kind = "EXPRESSION_MAKE_TUPLE"
def __init__(self, elements, source_ref):
ExpressionMakeSequenceBase.__init__(
self,
sequence_kind = "TUPLE",
elements = elements,
source_ref = source_ref
)
def getTypeShape(self):
return ShapeTypeTuple
def getSimulator(self):
return tuple
def getIterationLength(self):
return len(self.getElements())
class ExpressionMakeList(ExpressionMakeSequenceBase):
kind = "EXPRESSION_MAKE_LIST"
def __init__(self, elements, source_ref):
ExpressionMakeSequenceBase.__init__(
self,
sequence_kind = "LIST",
elements = elements,
source_ref = source_ref
)
def getTypeShape(self):
return ShapeTypeList
def getSimulator(self):
return list
def getIterationLength(self):
return len(self.getElements())
def computeExpressionIter1(self, iter_node, trace_collection):
result = ExpressionMakeTuple(
elements = self.getElements(),
source_ref = self.source_ref
)
self.replaceWith(result)
return iter_node, "new_expression", """\
Iteration over list reduced to tuple."""
class ExpressionMakeSet(ExpressionMakeSequenceBase):
kind = "EXPRESSION_MAKE_SET"
def __init__(self, elements, source_ref):
ExpressionMakeSequenceBase.__init__(
self,
sequence_kind = "SET",
elements = elements,
source_ref = source_ref
)
def getTypeShape(self):
return ShapeTypeSet
def getSimulator(self):
return set
def getIterationLength(self):
element_count = len(self.getElements())
# Hashing may consume elements.
if element_count >= 2:
return None
else:
return element_count
def getIterationMinLength(self):
element_count = len(self.getElements())
if element_count == 0:
return 0
else:
return 1
def getIterationMaxLength(self):
return len(self.getElements())
def mayRaiseException(self, exception_type):
for element in self.getElements():
if not element.isKnownToBeHashable():
return True
if element.mayRaiseException(exception_type):
return True
return False
def computeExpressionIter1(self, iter_node, trace_collection):
result = ExpressionMakeTuple(
elements = self.getElements(),
source_ref = self.source_ref
)
self.replaceWith(result)
return iter_node, "new_expression", """\
Iteration over set reduced to tuple."""
| fluxer/spm | nuitka/nuitka/nodes/ContainerMakingNodes.py | Python | gpl-2.0 | 7,298 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012, 2013 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyudev.monitor
==============
Monitor implementation.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
import errno
from threading import Thread
from functools import partial
from pyudev._util import ensure_byte_string
from pyudev.core import Device
from pyudev.os import Pipe, Poll, set_fd_status_flag
__all__ = ['Monitor', 'MonitorObserver']
class Monitor(object):
"""
A synchronous device event monitor.
A :class:`Monitor` objects connects to the udev daemon and listens for
changes to the device list. A monitor is created by connecting to the
kernel daemon through netlink (see :meth:`from_netlink`):
>>> from pyudev import Context, Monitor
>>> context = Context()
>>> monitor = Monitor.from_netlink(context)
Once the monitor is created, you can add a filter using :meth:`filter_by()`
or :meth:`filter_by_tag()` to drop incoming events in subsystems, which are
not of interest to the application:
>>> monitor.filter_by('input')
When the monitor is eventually set up, you can either poll for events
synchronously:
>>> device = monitor.poll(timeout=3)
>>> if device:
... print('{0.action}: {0}'.format(device))
...
Or you can monitor events asynchronously with :class:`MonitorObserver`.
To integrate into various event processing frameworks, the monitor provides
a :func:`selectable <select.select>` file description by :meth:`fileno()`.
However, do *not* read or write directly on this file descriptor.
Instances of this class can directly be given as ``udev_monitor *`` to
functions wrapped through :mod:`ctypes`.
.. versionchanged:: 0.16
Remove :meth:`from_socket()` which is deprecated, and even removed in
recent udev versions.
"""
def __init__(self, context, monitor_p):
self.context = context
self._as_parameter_ = monitor_p
self._libudev = context._libudev
self._started = False
def __del__(self):
self._libudev.udev_monitor_unref(self)
@classmethod
def from_netlink(cls, context, source='udev'):
"""
Create a monitor by connecting to the kernel daemon through netlink.
``context`` is the :class:`Context` to use. ``source`` is a string,
describing the event source. Two sources are available:
``'udev'`` (the default)
Events emitted after udev as registered and configured the device.
This is the absolutely recommended source for applications.
``'kernel'``
Events emitted directly after the kernel has seen the device. The
device has not yet been configured by udev and might not be usable
at all. **Never** use this, unless you know what you are doing.
Return a new :class:`Monitor` object, which is connected to the
given source. Raise :exc:`~exceptions.ValueError`, if an invalid
source has been specified. Raise
:exc:`~exceptions.EnvironmentError`, if the creation of the monitor
failed.
"""
if source not in ('kernel', 'udev'):
raise ValueError('Invalid source: {0!r}. Must be one of "udev" '
'or "kernel"'.format(source))
monitor = context._libudev.udev_monitor_new_from_netlink(
context, ensure_byte_string(source))
if not monitor:
raise EnvironmentError('Could not create udev monitor')
return cls(context, monitor)
@property
def started(self):
"""
``True``, if this monitor was started, ``False`` otherwise. Readonly.
.. seealso:: :meth:`start()`
.. versionadded:: 0.16
"""
return self._started
def fileno(self):
# pylint: disable=anomalous-backslash-in-string
"""
Return the file description associated with this monitor as integer.
This is really a real file descriptor ;), which can be watched and
:func:`select.select`\ ed.
"""
return self._libudev.udev_monitor_get_fd(self)
def filter_by(self, subsystem, device_type=None):
"""
Filter incoming events.
``subsystem`` is a byte or unicode string with the name of a
subsystem (e.g. ``'input'``). Only events originating from the
given subsystem pass the filter and are handed to the caller.
If given, ``device_type`` is a byte or unicode string specifying the
device type. Only devices with the given device type are propagated
to the caller. If ``device_type`` is not given, no additional
filter for a specific device type is installed.
These filters are executed inside the kernel, and client processes
will usually not be woken up for device, that do not match these
filters.
.. versionchanged:: 0.15
This method can also be after :meth:`start()` now.
"""
subsystem = ensure_byte_string(subsystem)
if device_type:
device_type = ensure_byte_string(device_type)
self._libudev.udev_monitor_filter_add_match_subsystem_devtype(
self, subsystem, device_type)
self._libudev.udev_monitor_filter_update(self)
def filter_by_tag(self, tag):
"""
Filter incoming events by the given ``tag``.
``tag`` is a byte or unicode string with the name of a tag. Only
events for devices which have this tag attached pass the filter and are
handed to the caller.
Like with :meth:`filter_by` this filter is also executed inside the
kernel, so that client processes are usually not woken up for devices
without the given ``tag``.
.. udevversion:: 154
.. versionadded:: 0.9
.. versionchanged:: 0.15
This method can also be after :meth:`start()` now.
"""
self._libudev.udev_monitor_filter_add_match_tag(
self, ensure_byte_string(tag))
self._libudev.udev_monitor_filter_update(self)
def remove_filter(self):
"""
Remove any filters installed with :meth:`filter_by()` or
:meth:`filter_by_tag()` from this monitor.
.. warning::
Up to udev 181 (and possibly even later versions) the underlying
``udev_monitor_filter_remove()`` seems to be broken. If used with
affected versions this method always raises
:exc:`~exceptions.ValueError`.
Raise :exc:`~exceptions.EnvironmentError` if removal of installed
filters failed.
.. versionadded:: 0.15
"""
self._libudev.udev_monitor_filter_remove(self)
self._libudev.udev_monitor_filter_update(self)
def enable_receiving(self):
"""
Switch the monitor into listing mode.
Connect to the event source and receive incoming events. Only after
calling this method, the monitor listens for incoming events.
.. note::
This method is implicitly called by :meth:`__iter__`. You don't
need to call it explicitly, if you are iterating over the
monitor.
.. deprecated:: 0.16
Will be removed in 1.0. Use :meth:`start()` instead.
"""
import warnings
warnings.warn('Will be removed in 1.0. Use Monitor.start() instead.',
DeprecationWarning)
self.start()
def start(self):
"""
Start this monitor.
The monitor will not receive events until this method is called. This
method does nothing if called on an already started :class:`Monitor`.
.. note::
Typically you don't need to call this method. It is implicitly
called by :meth:`poll()` and :meth:`__iter__()`.
.. seealso:: :attr:`started`
.. versionchanged:: 0.16
This method does nothing if the :class:`Monitor` was already
started.
"""
if not self._started:
self._libudev.udev_monitor_enable_receiving(self)
# Force monitor FD into non-blocking mode
set_fd_status_flag(self, os.O_NONBLOCK)
self._started = True
def set_receive_buffer_size(self, size):
"""
Set the receive buffer ``size``.
``size`` is the requested buffer size in bytes, as integer.
.. note::
The CAP_NET_ADMIN capability must be contained in the effective
capability set of the caller for this method to succeed. Otherwise
:exc:`~exceptions.EnvironmentError` will be raised, with ``errno``
set to :data:`~errno.EPERM`. Unprivileged processes typically lack
this capability. You can check the capabilities of the current
process with the python-prctl_ module:
>>> import prctl
>>> prctl.cap_effective.net_admin
Raise :exc:`~exceptions.EnvironmentError`, if the buffer size could not
bet set.
.. versionadded:: 0.13
.. _python-prctl: http://packages.python.org/python-prctl
"""
self._libudev.udev_monitor_set_receive_buffer_size(self, size)
def _receive_device(self):
"""Receive a single device from the monitor.
Return the received :class:`Device`, or ``None`` if no device could be
received.
"""
while True:
try:
device_p = self._libudev.udev_monitor_receive_device(self)
return Device(self.context, device_p) if device_p else None
except EnvironmentError as error:
if error.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
# No data available
return None
elif error.errno == errno.EINTR:
# Try again if our system call was interrupted
continue
else:
raise
def poll(self, timeout=None):
"""
Poll for a device event.
You can use this method together with :func:`iter()` to synchronously
monitor events in the current thread::
for device in iter(monitor.poll, None):
print('{0.action} on {0.device_path}'.format(device))
Since this method will never return ``None`` if no ``timeout`` is
specified, this is effectively an endless loop. With
:func:`functools.partial()` you can also create a loop that only waits
for a specified time::
for device in iter(partial(monitor.poll, 3), None):
print('{0.action} on {0.device_path}'.format(device))
This loop will only wait three seconds for a new device event. If no
device event occurred after three seconds, the loop will exit.
``timeout`` is a floating point number that specifies a time-out in
seconds. If omitted or ``None``, this method blocks until a device
event is available. If ``0``, this method just polls and will never
block.
.. note::
This method implicitly calls :meth:`start()`.
Return the received :class:`Device`, or ``None`` if a timeout
occurred. Raise :exc:`~exceptions.EnvironmentError` if event retrieval
failed.
.. seealso::
:attr:`Device.action`
The action that created this event.
:attr:`Device.sequence_number`
The sequence number of this event.
.. versionadded:: 0.16
"""
if timeout is not None and timeout > 0:
# .poll() takes timeout in milliseconds
timeout = int(timeout * 1000)
self.start()
if Poll.for_events((self, 'r')).poll(timeout):
return self._receive_device()
else:
return None
def receive_device(self):
"""
Receive a single device from the monitor.
.. warning::
You *must* call :meth:`start()` before calling this method.
The caller must make sure, that there are events available in the
event queue. The call blocks, until a device is available.
If a device was available, return ``(action, device)``. ``device``
is the :class:`Device` object describing the device. ``action`` is
a string describing the action. Usual actions are:
``'add'``
A device has been added (e.g. a USB device was plugged in)
``'remove'``
A device has been removed (e.g. a USB device was unplugged)
``'change'``
Something about the device changed (e.g. a device property)
``'online'``
The device is online now
``'offline'``
The device is offline now
Raise :exc:`~exceptions.EnvironmentError`, if no device could be
read.
.. deprecated:: 0.16
Will be removed in 1.0. Use :meth:`Monitor.poll()` instead.
"""
import warnings
warnings.warn('Will be removed in 1.0. Use Monitor.poll() instead.',
DeprecationWarning)
device = self.poll()
return device.action, device
def __iter__(self):
"""
Wait for incoming events and receive them upon arrival.
This methods implicitly calls :meth:`start()`, and starts polling the
:meth:`fileno` of this monitor. If a event comes in, it receives the
corresponding device and yields it to the caller.
The returned iterator is endless, and continues receiving devices
without ever stopping.
Yields ``(action, device)`` (see :meth:`receive_device` for a
description).
.. deprecated:: 0.16
Will be removed in 1.0. Use an explicit loop over :meth:`poll()`
instead, or monitor asynchronously with :class:`MonitorObserver`.
"""
import warnings
warnings.warn('Will be removed in 1.0. Use an explicit loop over '
'"poll()" instead, or monitor asynchronously with '
'"MonitorObserver".', DeprecationWarning)
self.start()
while True:
device = self.poll()
if device:
yield device.action, device
class MonitorObserver(Thread):
"""
An asynchronous observer for device events.
This class subclasses :class:`~threading.Thread` class to asynchronously
observe a :class:`Monitor` in a background thread:
>>> from pyudev import Context, Monitor, MonitorObserver
>>> context = Context()
>>> monitor = Monitor.from_netlink(context)
>>> monitor.filter_by(subsystem='input')
>>> def print_device_event(device):
... print('background event {0.action}: {0.device_path}'.format(device))
>>> observer = MonitorObserver(monitor, callback=print_device_event, name='monitor-observer')
>>> observer.daemon
True
>>> observer.start()
In the above example, input device events will be printed in background,
until :meth:`stop()` is called on ``observer``.
.. note::
Instances of this class are always created as daemon thread. If you do
not want to use daemon threads for monitoring, you need explicitly set
:attr:`~threading.Thread.daemon` to ``False`` before invoking
:meth:`~threading.Thread.start()`.
.. seealso::
:attr:`Device.action`
The action that created this event.
:attr:`Device.sequence_number`
The sequence number of this event.
.. versionadded:: 0.14
.. versionchanged:: 0.15
:meth:`Monitor.start()` is implicitly called when the thread is started.
"""
def __init__(self, monitor, event_handler=None, callback=None, *args,
**kwargs):
"""
Create a new observer for the given ``monitor``.
``monitor`` is the :class:`Monitor` to observe. ``callback`` is the
callable to invoke on events, with the signature ``callback(device)``
where ``device`` is the :class:`Device` that caused the event.
.. warning::
``callback`` is invoked in the observer thread, hence the observer
is blocked while callback executes.
``args`` and ``kwargs`` are passed unchanged to the constructor of
:class:`~threading.Thread`.
.. deprecated:: 0.16
The ``event_handler`` argument will be removed in 1.0. Use
the ``callback`` argument instead.
.. versionchanged:: 0.16
Add ``callback`` argument.
"""
if callback is None and event_handler is None:
raise ValueError('callback missing')
elif callback is not None and event_handler is not None:
raise ValueError('Use either callback or event handler')
Thread.__init__(self, *args, **kwargs)
self.monitor = monitor
# observer threads should not keep the interpreter alive
self.daemon = True
self._stop_event = None
if event_handler is not None:
import warnings
warnings.warn('"event_handler" argument will be removed in 1.0. '
'Use Monitor.poll() instead.', DeprecationWarning)
callback = lambda d: event_handler(d.action, d)
self._callback = callback
def start(self):
"""Start the observer thread."""
if not self.is_alive():
self._stop_event = Pipe.open()
Thread.start(self)
def run(self):
self.monitor.start()
notifier = Poll.for_events(
(self.monitor, 'r'), (self._stop_event.source, 'r'))
while True:
for fd, event in notifier.poll():
if fd == self._stop_event.source.fileno():
# in case of a stop event, close our pipe side, and
# return from the thread
self._stop_event.source.close()
return
elif fd == self.monitor.fileno() and event == 'r':
read_device = partial(self.monitor.poll, timeout=0)
for device in iter(read_device, None):
self._callback(device)
else:
raise EnvironmentError('Observed monitor hung up')
def send_stop(self):
"""
Send a stop signal to the background thread.
The background thread will eventually exit, but it may still be running
when this method returns. This method is essentially the asynchronous
equivalent to :meth:`stop()`.
.. note::
The underlying :attr:`monitor` is *not* stopped.
"""
if self._stop_event is None:
return
with self._stop_event.sink:
# emit a stop event to the thread
self._stop_event.sink.write(b'\x01')
self._stop_event.sink.flush()
def stop(self):
"""
Synchronously stop the background thread.
.. note::
This method can safely be called from the observer thread. In this
case it is equivalent to :meth:`send_stop()`.
Send a stop signal to the backgroud (see :meth:`send_stop`), and waits
for the background thread to exit (see :meth:`~threading.Thread.join`)
if the current thread is *not* the observer thread.
After this method returns in a thread *that is not the observer
thread*, the ``callback`` is guaranteed to not be invoked again
anymore.
.. note::
The underlying :attr:`monitor` is *not* stopped.
.. versionchanged:: 0.16
This method can be called from the observer thread.
"""
self.send_stop()
try:
self.join()
except RuntimeError:
pass
| mulkieran/pyudev | pyudev/monitor.py | Python | lgpl-2.1 | 20,793 |
from contextlib import redirect_stdout
import io
from unittest import TestCase
from todone.backend import SavedList
from todone.backend.db import Todo
from todone.commands.done import done_todo, parse_args
from todone.tests.base import DB_Backend
from todone.parser import exceptions as pe
class TestDoneTodo(DB_Backend):
def setUp(self):
self.todos = []
self.todos.append(Todo.create(action='Todo 1', folder='inbox'))
self.todos.append(Todo.create(action='Todo 2', folder='inbox'))
self.todos.append(Todo.create(action='Todo 3', folder='next'))
self.todos.append(Todo.create(action='Todo 4', folder='today'))
self.todos.append(Todo.create(action='project', folder='today'))
self.todos.append(Todo.create(action='other', folder='next'))
SavedList.save_most_recent_search(self.todos)
def test_todo_done_moves_todo_to_done_folder(self):
done_todo(['1'])
moved_todo = Todo.get(Todo.action == 'Todo 1')
self.assertEqual(moved_todo.folder.name, 'done')
def test_done_prints_action_taken(self):
f = io.StringIO()
with redirect_stdout(f):
done_todo(['3'])
s = f.getvalue()
self.assertIn('Moved: Todo 3 -> {}'.format('done'), s)
class TestDoneArgParse(TestCase):
def test_parses_integer(self):
args = parse_args(['5'])
self.assertEqual(args['index'], 5)
def test_negative_integer_does_not_match(self):
with self.assertRaises(pe.ArgumentError):
parse_args(['-5'])
def test_rejects_noninteger_index(self):
with self.assertRaises(pe.ArgumentError):
parse_args(['test'])
def test_rejects_two_args(self):
with self.assertRaises(pe.ArgumentError):
parse_args(['1', '2'])
with self.assertRaises(pe.ArgumentError):
parse_args(['1', 'test'])
| safnuk/todone | todone/commands/tests/test_done.py | Python | apache-2.0 | 1,893 |
from performanceplatform.collector.ga.plugins.rank \
import ComputeRank
def test_rank():
plugin = ComputeRank("rank")
docs = [{}, {}]
result = plugin(docs)
doc1, doc2 = result
assert doc1["rank"] == 1
assert doc2["rank"] == 2
| alphagov/performanceplatform-collector | tests/performanceplatform/collector/ga/plugins/test_rank.py | Python | mit | 260 |
#!/usr/bin/env python
import os
import random
import requests
import subprocess
import argparse
import datetime
import time
import sys
"""Based off https://github.com/fogleman/primitive/blob/master/bot/main.py
"""
with open(os.path.expanduser('~/.flickr_api_key'), 'r') as key_file:
FLICKR_API_KEY = key_file.readline().rstrip()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
def randomize(self):
self.m = random.choice([1, 5, 7])
self.n = random.randint(15, 50) * 10
self.rep = 0
self.a = 128
self.r = 256
def parse(self, text):
text = (text or '').lower()
tokens = text.split()
for i, name in enumerate(MODE_NAMES):
if name in text:
self.m = i
for token in tokens:
try:
self.n = int(token)
except Exception:
pass
def validate(self):
self.m = clamp(self.m, 0, 8)
if self.m == 6:
self.n = random.randint(1400, 2000)
@property
def description(self):
total = self.n + self.n * self.rep
return '%d %s' % (total, MODE_NAMES[self.m])
def clamp(x, lo, hi):
if x < lo:
x = lo
if x > hi:
x = hi
return x
def random_date(max_days_ago=1000):
today = datetime.date.today()
days = random.randint(1, max_days_ago)
d = today - datetime.timedelta(days=days)
return d.strftime('%Y-%m-%d')
def interesting(date=None):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.interestingness.getList',
)
if date:
params['date'] = date
r = requests.get(url, params=params)
return r.json()['photos']['photo']
def get_aspect_ratio(p):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.photos.getSizes',
photo_id=p['id']
)
r = requests.get(url, params=params)
sizes = r.json()['sizes']['size']
thumbnail = filter(lambda x: x['label']=='Thumbnail', sizes)
return float(thumbnail[0]['width']) / float(thumbnail[0]['height'])
def photo_url(p, size=None):
# See: https://www.flickr.com/services/api/misc.urls.html
if size:
url = 'https://farm%s.staticflickr.com/%s/%s_%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'], size)
else:
url = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'])
def download_photo(url, path):
r = requests.get(url)
with open(path, 'wb') as fp:
fp.write(r.content)
def primitive(primitive_path, **kwargs):
args = []
for k, v in kwargs.items():
if v is None:
continue
args.append('-%s' % k)
args.append(str(v))
args = ' '.join(args)
cmd = '{0} {1}'.format(primitive_path, args)
subprocess.call(cmd, shell=True)
def create_wallpaper(args):
download_path = None
try:
print 'Finding interesting photo...'
photos = interesting(date=random_date())
photo = random.choice(photos)
aspect_ratio = get_aspect_ratio(photo)
print 'Downloading photo...'
url = photo_url(photo, 'z')
download_path = os.path.join('/tmp', photo['id'] + '.png')
download_photo(url, download_path)
output_path = os.path.expanduser(args.output)
output_path = os.path.join(output_path, 'landscape' if aspect_ratio > 1 else 'portrait')
if not os.path.exists(output_path):
os.makedirs(output_path)
config = Config()
config.randomize()
config.validate()
print 'Generating wallpaper with parameters {0}'.format(config)
primitive(args.primitive_path,
i=download_path,
s=args.size,
o='\'{0}\''.format(os.path.join(output_path, photo['id'] + '.png')),
**config)
print 'Done!'
except Exception as e:
print e
finally:
if download_path is not None and os.path.exists(download_path):
os.remove(download_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help="path to output directory", required=True)
parser.add_argument('-s', '--size', type=int, help="width of output image", required=True)
parser.add_argument('--primitive_path', help="path to primitive executable", default='/usr/local/bin/primitive')
parser.add_argument('-n', '--num', type=int, help="number of wallpapers to generate", default=1)
args = parser.parse_args()
# check network status
max_retries = 10
attempt = 0
response = None
while attempt < max_retries:
attempt += 1
try:
print 'Checking network...'
response = interesting()
break
except:
print 'No network, retrying...'
time.sleep(5)
if response is None:
print 'No network connection'
sys.exit(1)
for n in xrange(args.num):
create_wallpaper(args)
| yukunlin/homebrew-primitive-wallpaper | primitive-wallpaper.py | Python | mit | 5,416 |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_allowed_mate_type_filter1511_all_of
except ImportError:
bt_allowed_mate_type_filter1511_all_of = sys.modules[
"onshape_client.oas.models.bt_allowed_mate_type_filter1511_all_of"
]
try:
from onshape_client.oas.models import bt_mate_filter162
except ImportError:
bt_mate_filter162 = sys.modules["onshape_client.oas.models.bt_mate_filter162"]
class BTAllowedMateTypeFilter1511(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("allowed_mate_types",): {
"SLIDER": "SLIDER",
"CYLINDRICAL": "CYLINDRICAL",
"REVOLUTE": "REVOLUTE",
"PIN_SLOT": "PIN_SLOT",
"PLANAR": "PLANAR",
"BALL": "BALL",
"FASTENED": "FASTENED",
"PARALLEL": "PARALLEL",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"allowed_mate_types": ([str],), # noqa: E501
"bt_type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"allowed_mate_types": "allowedMateTypes", # noqa: E501
"bt_type": "btType", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_allowed_mate_type_filter1511.BTAllowedMateTypeFilter1511 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
allowed_mate_types ([str]): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_allowed_mate_type_filter1511_all_of.BTAllowedMateTypeFilter1511AllOf,
bt_mate_filter162.BTMateFilter162,
],
"oneOf": [],
}
| onshape-public/onshape-clients | python/onshape_client/oas/models/bt_allowed_mate_type_filter1511.py | Python | mit | 7,411 |
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from mama_cas.services.backends import SettingsBackend
class ExceptionBackend(ModelBackend):
"""Raise an exception on authentication for testing purposes."""
def authenticate(self, request, username=None, password=None):
raise Exception
class CaseInsensitiveBackend(ModelBackend):
"""A case-insenstitive authentication backend."""
def authenticate(self, request, username=None, password=None):
user_model = get_user_model()
try:
user = user_model.objects.get(username__iexact=username)
if user.check_password(password):
return user
except user_model.DoesNotExist:
return None
class CustomTestServiceBackend(SettingsBackend):
"""Service backend that always allows any service containing 'test.com'"""
def service_allowed(self, service):
if service and "test.com" in service:
return True
return super(CustomTestServiceBackend, self).service_allowed(service)
class CustomTestInvalidServiceBackend(object):
pass
| orbitvu/django-mama-cas | mama_cas/tests/backends.py | Python | bsd-3-clause | 1,158 |
import pickle
import sys
from kombu.tests.utils import unittest
if sys.version_info >= (3, 0):
from io import StringIO, BytesIO
else:
from StringIO import StringIO, StringIO as BytesIO # noqa
from kombu import utils
from kombu.utils.functional import wraps
from kombu.tests.utils import redirect_stdouts, mask_modules, skip_if_module
partition = utils._compat_partition
rpartition = utils._compat_rpartition
class OldString(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def split(self, *args, **kwargs):
return self.value.split(*args, **kwargs)
def rsplit(self, *args, **kwargs):
return self.value.rsplit(*args, **kwargs)
class test_utils(unittest.TestCase):
def test_maybe_list(self):
self.assertEqual(utils.maybe_list(None), [])
self.assertEqual(utils.maybe_list(1), [1])
self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3])
def assert_partition(self, p, t=str):
self.assertEqual(p(t("foo.bar.baz"), "."),
("foo", ".", "bar.baz"))
self.assertEqual(p(t("foo"), "."),
("foo", "", ""))
self.assertEqual(p(t("foo."), "."),
("foo", ".", ""))
self.assertEqual(p(t(".bar"), "."),
("", ".", "bar"))
self.assertEqual(p(t("."), "."),
('', ".", ''))
def assert_rpartition(self, p, t=str):
self.assertEqual(p(t("foo.bar.baz"), "."),
("foo.bar", ".", "baz"))
self.assertEqual(p(t("foo"), "."),
("", "", "foo"))
self.assertEqual(p(t("foo."), "."),
("foo", ".", ""))
self.assertEqual(p(t(".bar"), "."),
("", ".", "bar"))
self.assertEqual(p(t("."), "."),
('', ".", ''))
def test_compat_partition(self):
self.assert_partition(partition)
def test_compat_rpartition(self):
self.assert_rpartition(rpartition)
def test_partition(self):
self.assert_partition(utils.partition)
def test_rpartition(self):
self.assert_rpartition(utils.rpartition)
def test_partition_oldstr(self):
self.assert_partition(utils.partition, OldString)
def test_rpartition_oldstr(self):
self.assert_rpartition(utils.rpartition, OldString)
class test_UUID(unittest.TestCase):
def test_uuid4(self):
self.assertNotEqual(utils.uuid4(),
utils.uuid4())
def test_uuid(self):
i1 = utils.uuid()
i2 = utils.uuid()
self.assertIsInstance(i1, str)
self.assertNotEqual(i1, i2)
@skip_if_module('__pypy__')
def test_uuid_without_ctypes(self):
old_utils = sys.modules.pop("kombu.utils")
@mask_modules("ctypes")
def with_ctypes_masked():
from kombu.utils import ctypes, uuid
self.assertIsNone(ctypes)
tid = uuid()
self.assertTrue(tid)
self.assertIsInstance(tid, basestring)
try:
with_ctypes_masked()
finally:
sys.modules["celery.utils"] = old_utils
class test_Misc(unittest.TestCase):
def test_kwdict(self):
def f(**kwargs):
return kwargs
kw = {u"foo": "foo",
u"bar": "bar"}
self.assertTrue(f(**utils.kwdict(kw)))
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state(unittest.TestCase):
@redirect_stdouts
def test_dump(self, stdout, stderr):
fh = MyBytesIO()
utils.emergency_dump_state({"foo": "bar"}, open_file=lambda n, m: fh)
self.assertDictEqual(pickle.loads(fh.getvalue()), {"foo": "bar"})
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
@redirect_stdouts
def test_dump_second_strategy(self, stdout, stderr):
fh = MyStringIO()
def raise_something(*args, **kwargs):
raise KeyError("foo")
utils.emergency_dump_state({"foo": "bar"}, open_file=lambda n, m: fh,
dump=raise_something)
self.assertIn("'foo': 'bar'", fh.getvalue())
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
_tried_to_sleep = [None]
def insomnia(fun):
@wraps(fun)
def _inner(*args, **kwargs):
_tried_to_sleep[0] = None
def mysleep(i):
_tried_to_sleep[0] = i
prev_sleep = utils.sleep
utils.sleep = mysleep
try:
return fun(*args, **kwargs)
finally:
utils.sleep = prev_sleep
return _inner
class test_retry_over_time(unittest.TestCase):
@insomnia
def test_simple(self):
index = [0]
class Predicate(Exception):
pass
def myfun():
sleepvals = {0: None,
1: 2.0,
2: 4.0,
3: 6.0,
4: 8.0,
5: 10.0,
6: 12.0,
7: 14.0,
8: 16.0,
9: 16.0}
self.assertEqual(_tried_to_sleep[0], sleepvals[index[0]])
if index[0] < 9:
raise Predicate()
return 42
def errback(exc, interval):
index[0] += 1
x = utils.retry_over_time(myfun, Predicate, errback=errback,
interval_max=14)
self.assertEqual(x, 42)
_tried_to_sleep[0] = None
index[0] = 0
self.assertRaises(Predicate,
utils.retry_over_time, myfun, Predicate,
max_retries=1, errback=errback, interval_max=14)
| mzdaniel/oh-mainline | vendor/packages/kombu/kombu/tests/test_utils.py | Python | agpl-3.0 | 5,919 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.conf.urls import patterns
urlpatterns = patterns('dialer_audio.views',
# Audio urls
(r'^module/audio/$', 'audio_list'),
(r'^module/audio/add/$', 'audio_add'),
(r'^module/audio/del/(.+)/$', 'audio_del'),
(r'^module/audio/(.+)/$', 'audio_change'),
)
| nishad89/newfies-dialer | newfies/dialer_audio/urls.py | Python | mpl-2.0 | 681 |
class ObjectMappingNotFound(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class InvalidObjectMapping(Exception):
def __init__(self, message):
Exception.__init__(self, message) | Ketouem/python-frontynium | frontynium/exceptions.py | Python | mit | 231 |
# -*- coding: utf-8 -*-
"""
Tests that the request came from a crawler or not.
"""
from __future__ import absolute_import
import ddt
from django.test import TestCase
from django.http import HttpRequest
from ..models import CrawlersConfig
@ddt.ddt
class CrawlersConfigTest(TestCase):
def setUp(self):
super(CrawlersConfigTest, self).setUp()
CrawlersConfig(known_user_agents='edX-downloader,crawler_foo', enabled=True).save()
@ddt.data(
"Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0",
"Le Héros des Deux Mondes",
)
def test_req_user_agent_is_not_crawler(self, req_user_agent):
"""
verify that the request did not come from a crawler.
"""
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = req_user_agent
self.assertFalse(CrawlersConfig.is_crawler(fake_request))
@ddt.data(
u"edX-downloader",
"crawler_foo".encode("utf-8")
)
def test_req_user_agent_is_crawler(self, req_user_agent):
"""
verify that the request came from a crawler.
"""
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = req_user_agent
self.assertTrue(CrawlersConfig.is_crawler(fake_request))
| ESOedX/edx-platform | openedx/core/djangoapps/crawlers/tests/test_models.py | Python | agpl-3.0 | 1,412 |
import cv2
import numpy as np
from glob import glob
import itertools as it
import math as m
from operator import add, truediv
from numpy.linalg import norm
import xml.etree.ElementTree as ET
dimCell = 8 # size of cell
dimBlock = 2 # size of block (2x2)Cells
nbins = 9
Err = 0.0000000001
labels = []
samples = []
samples_test = []
labels_test = []
svm_params = dict( kernel_type = cv2.SVM_LINEAR,
svm_type = cv2.SVM_C_SVC,
C=2.67, gamma=5.383 )
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
def load_image(fn):
# Llegim imatge
img = cv2.imread(fn, 0)
#image_mean = np.mean(img)
#img_norm = img / image_mean
# print img
# Normalitzem imatge
# sum = np.sum(img)
# print sum
# if sum > 0.:
# return img / sum
# else:
# return img
# print img.shape
return img
def getCells(img):
cells = split2d(img, (dimCell, dimCell))
return cells
def getGradients(img):
height, width = img.shape
# gradient_img = np.zeros((height,width))
line_gradients = []
gradient_img = []
counter = 0
for x in range(0, height):
for y in range(0, width):
if x == 0:
if y == 0 or y == width-1:
px = 0.
py = 0.
else:
px = img[x,y+1] - img[x,y-1]
py = 0.
elif x == height-1:
if y == 0 or y == width-1:
px = 0.
py = 0.
else:
px = img[x,y+1] - img[x,y-1]
py = 0.
else:
if y == 0 or y == width-1:
px = 0.
py = img[x+1,y] - img[x-1,y]
else:
px = img[x,y+1] - img[x,y-1]
py = img[x+1,y] - img[x-1,y]
magnitud = m.sqrt((px**2) + (py**2))
angle = m.degrees(getAngle(px,py))
gradient_img.append([magnitud, angle])
return gradient_img
def getAngle(x,y):
if x == 0:
if y == 0:
angle = 0.
else:
angle = m.pi / 2
else:
if y == 0:
angle = 0.
else:
angle = np.arctan(y/x)
return angle
def calculateHistogramCells(img, height, width):
nHorizCell = width / dimCell
nVerticalCell = height / dimCell
histogramCell = np.zeros(9)
histogramVector = []
for j in range(nVerticalCell):
for i in range(nHorizCell):
for y in range(j*dimCell, j*dimCell+dimCell):
for x in range(i*dimCell + y*width, i*dimCell + y*width + dimCell):
magnitud, angle = img[x]
if (90 >= angle > 70) or angle == -90:
percentage1 = ( angle - 70 ) / 20.
percentage2 = 1 - percentage1
histogramCell[0] += magnitud * percentage1
histogramCell[1] += magnitud * percentage2
if 70 >= angle > 50:
percentage1 = ( angle - 50 ) / 20.
percentage2 = 1 - percentage1
histogramCell[1] += magnitud * percentage1
histogramCell[2] += magnitud * percentage2
if 50 >= angle > 30:
percentage1 = ( angle - 30 ) / 20.
percentage2 = 1 - percentage1
histogramCell[2] += magnitud * percentage1
histogramCell[3] += magnitud * percentage2
if 30 >= angle > 10:
percentage1 = ( angle - 10 ) / 20.
percentage2 = 1 - percentage1
histogramCell[3] += magnitud * percentage1
histogramCell[4] += magnitud * percentage2
if 10 >= angle > -10:
percentage1 = ( angle + 10 ) / 20.
percentage2 = 1 - percentage1
histogramCell[4] += magnitud * percentage1
histogramCell[5] += magnitud * percentage2
if -10 >= angle > -30:
percentage1 = ( angle +30 ) / 20.
percentage2 = 1 - percentage1
histogramCell[5] += magnitud * percentage1
histogramCell[6] += magnitud * percentage2
if -30 >= angle > -50:
percentage1 = ( angle + 50 ) / 20.
percentage2 = 1 - percentage1
histogramCell[6] += magnitud * percentage1
histogramCell[7] += magnitud * percentage2
if -50 >= angle > -70:
percentage1 = ( angle + 70 ) / 20.
percentage2 = 1 - percentage1
histogramCell[7] += magnitud * percentage1
histogramCell[8] += magnitud * percentage2
if -70 >= angle > -90:
percentage1 = ( angle + 90 ) / 20.
percentage2 = 1 - percentage1
histogramCell[8] += magnitud * percentage1
histogramCell[0] += magnitud * percentage2
# print percentage1, percentage2
histogramVector.append(histogramCell)
histogramCell = np.zeros(9)
return histogramVector, nVerticalCell, nHorizCell
def normalizeHistogramsBlocks(vector_hist,width,height):
dimVert = height - 1
dimHoriz = width - 1
resto_j = 0
resto_i = 0
blockHistogramVector = []
sumHistogram = [0.,0.,0.,0.,0.,0.,0.,0.,0.]
histogramsNormalized = []
for j in range(dimVert):
resto_i = 0
if j != 0:
resto_j += 1
for i in range(dimHoriz):
if i != 0:
resto_i += 1
for y in range(j*dimBlock, j*dimBlock + dimBlock):
control_y = y
control_y -= resto_j
for x in range(i*dimBlock + control_y*width, i*dimBlock + control_y*width + dimBlock ):
index = x
index -= resto_i
blockHistogramVector.append(vector_hist[index])
sumHistogram = map(add, sumHistogram, vector_hist[index])
for z in range(len(blockHistogramVector)):
for w in range(nbins):
histogramsNormalized.append((blockHistogramVector[z][w] + Err) / (sumHistogram[w]+Err))
# histogramsNormalized += blockHistogramVector
# histogramsNormalized = np.append(histogramsNormalized, blockHistogramVector)
blockHistogramVector = []
sumHistogram = [0.,0.,0.,0.,0.,0.,0.,0.,0.]
# histogramsNormalized = np.float32(histogramsNormalized)
# histogramsNormalized = np.hstack(histogramsNormalized)
return histogramsNormalized
hog = cv2.HOGDescriptor()
svm = cv2.SVM()
counter = 0
train_pos = glob('train/pos/*')
train_neg = glob('train/neg/*')
test_pos = glob('test/pos/*')
test_neg = glob('test/neg/*')
train_mit = glob('MIT/Train/*')
test_mit = glob('MIT/Test/*')
# train_prova_pos = glob('prova/pos/*')
# train_prova_neg = glob('prova/neg/*')
iteracion = 0
svm.load('svm_INRIA.dat')
# for fn in it.chain(train_pos):
# # iteracion += 1
# # if iteracion == 10:
# # break
# try:
# # Retornem imatge en escala de grisos normalitzada
# img = load_image(fn)
# height, width = img.shape
# if width > height:
# img = cv2.transpose(img)
# img = cv2.resize(img, (64, 128))
# height, width = img.shape
#
# if img is None:
# print 'Failed to load image file:', fn
# continue
# else:
# labels.append([1.])
# except:
# print 'loading error'
# continue
# # Calculem el gradient de la imatge
# gradient_img = getGradients(img) # Mirar la funcio cartToPolar
# # Calculem histograma per cells de Ndimensions
# histogram_vector, nVertCell, nHorizCell = calculateHistogramCells(gradient_img, height, width)
# # Normalitzem per blocks
# histogram_vector_normalized = normalizeHistogramsBlocks(histogram_vector, nHorizCell, nVertCell)
# # histogram_vector_normalized = np.float32(histogram_vector_normalized)
# samples.append(histogram_vector_normalized)
# counter += 1
# print counter
# for fn in it.chain(train_neg):
# # iteracion += 1
# # if iteracion == 10:
# # break
# try:
# # Retornem imatge en escala de grisos normalitzada
# img = load_image(fn)
# height, width = img.shape
# if width > height:
# img = cv2.transpose(img)
# img = cv2.resize(img, (64, 128))
# height, width = img.shape
#
# if img is None:
# print 'Failed to load image file:', fn
# continue
# else:
# labels.append([0.])
# except:
# print 'loading error'
# continue
# # Calculem el gradient de la imatge
# gradient_img = getGradients(img) # Mirar la funcio cartToPolar
# # Calculem histograma per cells de Ndimensions
# histogram_vector, nVertCell, nHorizCell = calculateHistogramCells(gradient_img, height, width)
# # Normalitzem per blocks
# histogram_vector_normalized = normalizeHistogramsBlocks(histogram_vector, nHorizCell, nVertCell)
# # histogram_vector_normalized = np.float32(histogram_vector_normalized)
# samples.append(histogram_vector_normalized)
# counter += 1
# print counter
# samples = np.float32(samples)
# labels = np.float32(labels)
# svm.train(samples, labels, params=svm_params)
# svm.save('svm_INRIA.dat')
# iteracion = 0
for fn in it.chain(test_pos):
# iteracion += 1
# if iteracion == 10:
# break
try:
# Retornem imatge en escala de grisos normalitzada
img = load_image(fn)
height, width = img.shape
if width > height:
img = cv2.transpose(img)
img = cv2.resize(img, (64, 128))
height, width = img.shape
if img is None:
print 'Failed to load image file:', fn
continue
else:
labels_test.append([1.])
except:
print 'loading error'
continue
# Calculem el gradient de la imatge
gradient_img = getGradients(img) # Mirar la funcio cartToPolar
# Calculem histograma per cells de Ndimensions
histogram_vector, nVertCell, nHorizCell = calculateHistogramCells(gradient_img, height, width)
# Normalitzem per blocks
histogram_vector_normalized = normalizeHistogramsBlocks(histogram_vector, nHorizCell, nVertCell)
# histogram_vector_normalized = np.float32(histogram_vector_normalized)
samples_test.append(histogram_vector_normalized)
counter += 1
print counter
for fn in it.chain(test_neg):
# iteracion += 1
# if iteracion == 10:
# break
try:
# Retornem imatge en escala de grisos normalitzada
img = load_image(fn)
height, width = img.shape
if width > height:
img = cv2.transpose(img)
img = cv2.resize(img, (64, 128))
height, width = img.shape
if img is None:
print 'Failed to load image file:', fn
continue
else:
labels_test.append([0.])
except:
print 'loading error'
continue
# Calculem el gradient de la imatge
gradient_img = getGradients(img) # Mirar la funcio cartToPolar
# Calculem histograma per cells de Ndimensions
histogram_vector, nVertCell, nHorizCell = calculateHistogramCells(gradient_img, height, width)
# Normalitzem per blocks
histogram_vector_normalized = normalizeHistogramsBlocks(histogram_vector, nHorizCell, nVertCell)
# histogram_vector_normalized = np.float32(histogram_vector_normalized)
samples_test.append(histogram_vector_normalized)
counter += 1
print counter
samples_test = np.float32(samples_test)
labels_test = np.float32(labels_test)
resp = svm.predict_all(samples_test)
mask = resp == labels_test
correct = np.count_nonzero(mask)
print correct*100.0/resp.size,'%' | HDLynx/sharingan | sharinganINRIA.py | Python | gpl-2.0 | 12,570 |
# encoding: UTF-8
'''
本文件基于vnpy.event.eventType,并添加更多字段
'''
from vnpy.event.eventType import *
# 系统相关
EVENT_TIMER = 'eTimer' # 计时器事件,每隔1秒发送一次
EVENT_LOG = 'eLog' # 日志事件,全局通用
# Gateway相关
EVENT_TICK = 'eTick.' # TICK行情事件,可后接具体的vtSymbol
EVENT_TRADE = 'eTrade.' # 成交回报事件
EVENT_ORDER = 'eOrder.' # 报单回报事件
EVENT_POSITION = 'ePosition.' # 持仓回报事件
EVENT_ACCOUNT = 'eAccount.' # 账户回报事件
EVENT_CONTRACT = 'eContract.' # 合约基础信息回报事件
EVENT_ERROR = 'eError.' # 错误回报事件 | rrrrrr8/vnpy | vnpy/trader/vtEvent.py | Python | mit | 774 |
"""
This module contains helper functions to communicate with wellfare
using JSON dictionnaries. The main function is json_process, which
will call one of the subsequent functions depending on the TASK
to be performed.
"""
import numpy as np
from .curves import Curve
from .ILM import (infer_growth_rate,
infer_synthesis_rate_onestep,
infer_synthesis_rate_multistep,
infer_prot_conc_onestep,
infer_prot_conc_multistep)
from .preprocessing import filter_outliers, filter_outliersnew, calibration_curve
from .parsing import parse_tecan, merge_wells_dicts
DEFAULTS = {
'n_control_points': 100,
'dRNA': 1.0,
'eps_L': 0.000001,
'alphalow': -10,
'alphahigh': 10,
'nalphastep': 1000,
}
def get_var_with_default(data, var):
if var in data:
return data.get(var)
elif var in DEFAULTS:
return DEFAULTS[var]
else:
raise ValueError("Variable %s was not provided and no default value" % var
+ " is known for this variable (check spelling ?)")
def check_noNaN(array, name, fun, additional_message=''):
if np.isnan(np.sum(array)):
raise AssertionError("Error: Array '%s' in function %s has NaNs ! %s" % (
name, fun, additional_message))
# THE MAIN FUNCTION, CALLED BY THE PYTHON/JS PROCESS:
def json_process(command, input_data):
""" Calls the right function depending on the ``command``.
This function is a 'hub': it will decide which function to
apply to the data, depending on the command.
For inputs and ouputs, see the doc of the different functions
below.
"""
return {'growth': wellfare_growth,
'activity': wellfare_activity,
'concentration': wellfare_concentration,
'outliers': wellfare_outliers,
'outliersnew': wellfare_outliersnew,
'synchronize': wellfare_synchronize,
'subtract': wellfare_subtract,
'calibrationcurve': wellfare_calibrationcurve,
'parsetecan': wellfare_parsetecan
}[command](input_data)
# THE SPECIFIC FUNCTIONS, ONE FOR EACH TASK:
# === INFERENCE ============================================
def wellfare_growth(data):
""" Computes the growth rate from volume data.
Command : 'growth'
Input :
{ 'times_volume': [...] ,
'values_volume': [...],
'n_control_points': 100 // optional, 100 is default
'positive' : boolean
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output :
{ 'times_growth_rate': [...],
'values_growth_rate': [...]
}
"""
curve_v = Curve(data['times_volume'],
data['values_volume'])
positive = False
if 'positive' in data:
positive = data['positive']
check_noNaN(curve_v.y, "curve_v.y", "wellfare_growth")
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.linspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
growth, volume, _, _, _ = infer_growth_rate(curve_v, ttu,
alphas=alphas, eps_L=eps_L, positive=positive)
check_noNaN(growth.y, "growth.y", "wellfare_growth")
return {'times_growth_rate': list(growth.x.astype(float)),
'values_growth_rate': list(growth.y.astype(float))}
def wellfare_activity(data):
""" Computes protein synthesis rate, or promoter activity,
using a simple one-step model for the GFP synthesis.
Command : 'activity'
Input:
{ 'times_volume': [...] ,
'values_volume': [...],
'times_fluo': [...],
'values_fluo': [...],
'dR': float, // degradation constant of the reporter
'kR': float // (optional) folding constant of the reporter.
'dRNA': float // (optional) degradation constant of the RNA.
'n_control_points':100, // 100 is the default
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output:
{ 'times_activity': [...],
'values_activity': [...]
}
"""
curve_v = Curve(data['times_volume'],
data['values_volume'])
curve_f = Curve(data['times_fluo'],
data['values_fluo'])
dR = data['dR']
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.linspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
if 'kR' in data:
# use a two-step model of reporter expression
# if no dRNA provided it is supposed to be very short-lived so that
# the transcription step won't impact the dynamics of gene expression
dRNA = get_var_with_default(data, 'dRNA')
kR = data['kR']
synth_rate, _, _, _, _ = infer_synthesis_rate_multistep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
drna=dRNA,
kr=kR,
dR=dR,
alphas=alphas,
eps_L=eps_L)
else:
# use a one-step model of reporter expression
synth_rate, _, _, _, _ = infer_synthesis_rate_onestep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
degr=dR,
alphas=alphas,
eps_L=eps_L)
return {'times_activity': list(synth_rate.x.astype(float)),
'values_activity': list(synth_rate.y.astype(float))}
def wellfare_concentration(data):
""" Computes the concentration of a protein from
a fluorescence curve and an absorbance curve.
Command: 'concentration'
Input:
{ 'times_volume': [...] ,
'values_volume': [...],
'times_fluo: [...],
'values_fluo: [...],
'dR': float,
'dP': float,
'n_control_points': 100, // optional, 100 is default
'alphalow' : -10,
'alphahigh' : 10,
'nalphastep' : 1000,
'eps_L' : 0.000001
}
Output:
{ 'times_concentration': [...],
'values_concentration': [...]
}
"""
curve_v = Curve(data['times_volume'],
data['values_volume'])
curve_f = Curve(data['times_fluo'],
data['values_fluo'])
dR = data['dR']
dP = data['dP']
n_control_points = get_var_with_default(data, 'n_control_points')
ttu = np.linspace(curve_v.x.min(), curve_v.x.max(),
n_control_points + 3)[:-3]
eps_L = get_var_with_default(data, 'eps_L')
alphalow = get_var_with_default(data, 'alphalow')
alphahigh = get_var_with_default(data, 'alphahigh')
nalphastep = get_var_with_default(data, 'nalphastep')
alphas = np.logspace(alphalow, alphahigh, nalphastep)
if 'kR' in data:
# use a two-step model of reporter expression
# if no dRNA provided it is supposed to be very short-lived so that
# the transcription step won't impact the dynamics of gene expression
dRNA = get_var_with_default(data, 'dRNA')
kR = data['kR']
concentration, _, _, _, _ = infer_prot_conc_multistep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
drna=dRNA,
kr=kR,
dR=dR,
dP=dP,
alphas=alphas,
eps_L=eps_L)
else:
concentration, _, _, _, _ = infer_prot_conc_onestep(
curve_v=curve_v,
curve_f=curve_f,
ttu=ttu,
dR=dR, dP=dP,
alphas=alphas,
eps_L=eps_L)
return {'times_concentration': list(concentration.x.astype(float)),
'values_concentration': list(concentration.y.astype(float))}
# === PREPROCESSING ==============================================
def wellfare_outliers(data):
""" Removes outliers from a curve.
Command: 'outliers'
Input:
{
'times_curve': [...],
'values_curve': [...],
'percentile_above'= int/float,
'percentile_below'=int/float,
'niter_above'= int,
'niter_below'= int,
'goal_above'= float,
'goal_below'= float,
'smoothing_win'= int,
'nstd'= int,
'above_first': True or False (abso or fluo)
}
Output:
{ 'times_cleaned_curve': [...],
'values_cleaned_curve': [...]}
SUMMARY OF THE PARAMETERS
--------------------------
percentile_above -> 1-100, proportion of up-lying points to keep
percentile_below -> 1-100, proportion of up-lying points to keep
niter_above -> Number of times the up-filter is repeated
niter_below -> Number of times the down-filter is repeated
goal_above -> the algo will stop when second derivatives are below
goal_below -> the algo will stop when -(ddt) are below
smoothing_win -> window size (the more, the smoother)
nstd=1 -> Keep points that are less than "nstd*std" from the smooth.
above_first -> filter upliers first ? (yes for abso, no for fluo)
INDICATIVE VALUES OF THE PARAMETERS (may vary)
-----------------------------------------------
OD:
'percentile_above':50,
'percentile_below':50,
'niter_above':4,
'niter_below':3,
'goal_above':0.001,
'goal_below':0.001,
'smoothing_win':4,
'nstd':1,
'above_first':True
Fluo:
'percentile_above':90,
'percentile_below':90,
'niter_above':2,
'niter_below':2,
'goal_above':1,
'goal_below':5,
'smoothing_win':4,
'nstd':1.5,
'above_first':False
"""
curve = Curve(data.pop('times_curve'),
data.pop('values_curve'))
cleaned_curve = filter_outliers(curve, **data)
return {'times_cleaned_curve': list(cleaned_curve.x.astype(float)),
'values_cleaned_curve': list(cleaned_curve.y.astype(float))}
def wellfare_outliersnew(data):
""" Removes outliers from a curve using smooting spline.
Command: 'outliersnew'
Input:
{
'smoothing_win': int,
'nstd': int/float,
'iterations'= int,
}
Output:
{ 'times_cleaned_curve': [...],
'values_cleaned_curve': [...]}
SUMMARY OF THE PARAMETERS
--------------------------
smoothing_win -> window size (the more, the smoother)
nstd -> Keep points that are less than "nstd*std" from the smooth.
iterations --> number of time to do the smoothing and cut off using nstd
"""
curve = Curve(data.pop('times_curve'),
data.pop('values_curve'))
cleaned_curve = filter_outliersnew(curve, **data)
return {'times_cleaned_curve': list(cleaned_curve.x.astype(float)),
'values_cleaned_curve': list(cleaned_curve.y.astype(float))}
def wellfare_synchronize(data):
""" Returns the lag between two curves.
Command: 'synchronize'
If [x1],[y1] and [x2],[y2] are two curves,
returns the time shift such that
[x1],[y1] ~~ [x2 + d],[y2]
Will only find shifts smaller than the provided 'shift_max'
Input:
{ 'times_curve_1': [...],
'values_curve_1': [...],
'times_curve_2': [...],
'values_curve_2': [...],
'max_shift': float}
Output:
{ 'time_shift': float }
"""
curve_1 = Curve(data['times_curve_1'],
data['values_curve_1'])
curve_2 = Curve(data['times_curve_2'],
data['values_curve_2'])
max_shift = data['max_shift']
shifts0 = np.arange(-max_shift, max_shift, 10)
t_min = max(curve_1.x[0], curve_2.x[0]) + max_shift + 1
t_max = min(curve_1.x[-1], curve_2.x[-1]) - max_shift - 1
tt = np.linspace(t_min, t_max, 50)
time_shift = curve_1.find_shift_gradient([curve_2], tt,
shifts0=shifts0)[0]
return {'time_shift': time_shift}
def wellfare_subtract(data):
""" Returns the difference between two curves.
Will return the curve corresponding to the difference
``(curve1 - curve2)``. The times of the returned curve are the
the times of the curve ``curve1``.
The values of the returned curve are computed using linear
interpolation when necessary.
Command: subtract
Input:
{ 'times_curve_1': [...],
'values_curve_1': [...],
'times_curve_2': [...],
'values_curve_2': [...] }
Output:
{ 'times_subtraction' : [...],
'values_subtraction' : [...] }
"""
curve_1 = Curve(data['times_curve_1'],
data['values_curve_1'])
curve_2 = Curve(data['times_curve_2'],
data['values_curve_2'])
subtraction = (curve_1 - curve_2)
new_x = np.array([x for x in curve_1.x if x in subtraction.x])
return {'times_subtraction': list(new_x),
'values_subtraction': list(subtraction(new_x))}
def wellfare_calibrationcurve(data):
""" Returns the calibration curve (i.e. Fluo = f(Abs)) using polynomial fit.
The returned curve gives to the autofluorescence of the well as a function of its absorbance.
Command: calibrationcurve
Input:
{ 'abs_time': [...],
'abs_value': [...],
'fluo_time': [...],
'fluo_value': [...] }
Output:
{ 'calibrationcurve_abs' : [...],
'calibrationcurve_fluo' : [...] }
"""
abscurve = Curve(data['abs_time'],
data['abs_value'])
fluocurve = Curve(data['fluo_time'],
data['fluo_value'])
calibrationcurve, calibrationcurve_smoothextrapolation = calibration_curve(
abscurve, fluocurve, data['smoothing'], data['extrapoldistance'], data['validinterval'])
return {'calcurve_time': list(calibrationcurve.x.astype(float)),
'calcurve_value': list(calibrationcurve.y.astype(float)),
'calcurvesmoothed_time': list(calibrationcurve_smoothextrapolation.x.astype(float)),
'calcurvesmoothed_value': list(calibrationcurve_smoothextrapolation.y.astype(float))}
def wellfare_parsetecan(data):
""" Returns a dict containing parsed data
Command: parsetecan
Input:
{ 'inputfilename': str }
Output:
{ 'parsedfile' : dict }
"""
filename = data['inputfilename']
parsed_sheets, infodict = parse_tecan(filename, info=True)
wells = merge_wells_dicts(parsed_sheets)
jsonfile = {}
if 'Start Time' in infodict:
jsonfile['initialTime'] = infodict['Start Time']
measureindex = 0
jsonfile['measureTypes'] = {}
jsonfile['programs'] = []
while measureindex in infodict:
programinfo = {}
for info in infodict[measureindex]:
if info[0] == 'Mode':
mode = info[1]
elif info[0] == 'Name':
name = info[1]
programinfo[info[0]] = info[1]
measuretype = -1
if 'Abs' in mode:
measuretype = 0
elif 'Fluo' in mode:
measuretype = 1
jsonfile['measureTypes'][name] = measuretype
jsonfile['programs'].append(programinfo)
measureindex += 1
jsonfile['actions'] = infodict["actions"]
jsonfile['wells'] = {}
for wellname in wells:
well = {}
well['measures'] = {}
for measurename in jsonfile['measureTypes']:
if measurename in wells[wellname]:
measure = {}
measure["time"] = list(
wells[wellname][measurename].x.astype(float))
measure["originalSignal"] = list(
wells[wellname][measurename].y.astype(float))
well['measures'][measurename] = measure
jsonfile['wells'][wellname] = well
return {'parsedfile': jsonfile}
| ibis-inria/wellFARE | wellfare/json_api.py | Python | lgpl-3.0 | 16,896 |
import requests, zipfile
import StringIO
import pandas as pd
import os
import pdb
import numpy as np
from datetime import date
def get_data(name, url):
""" Download and cache Citibike data
"""
out_path = 'tripdata/'
csv_path = out_path + name[:-3] + 'csv'
if os.path.exists(csv_path):
print "\t{} already downloaded".format(csv_path)
else:
# request zipfile and extract
r = requests.get(url, timeout=5)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
orig_name = z.namelist()[0]
z.extract(orig_name, out_path)
z.close()
#rename extracted file
os.rename(out_path + orig_name, csv_path)
print '\tzip file removed'
print '\t{} saved'.format(csv_path)
# extract trip data from tripdata/ folder, specifying month and day
def trip_data(year, month):
'''load trip data for a given year/month'''
basepath = 'tripdata/'
csvPath = '{}{:02}-citibike-tripdata.csv'.format(year, month)
df = pd.read_csv(basepath + csvPath)
# get rid of strange outliers
df = df[(df.stop_lat > 40.6) & (df.start_lat > 40.6)]
# parse times into datetime objects
df.start_time = pd.to_datetime(df.start_time)
df.stop_time = pd.to_datetime(df.stop_time)
# add a column called trip_id within
#df['trip_id'] = df.index.values
return df
# extract rebalancing data from trip data
def shift_cond(bike_df):
"""
Helper function that shifts trips for a given bike to detect
rebalancing events
"""
shift_cols = ['stop_id','stop_time', 'stop_long', 'stop_lat', 'stop_name']
bike_df[shift_cols] = bike_df[shift_cols].shift(1)
return bike_df[bike_df.start_id != bike_df.stop_id]
def rebal_from_trips(trips):
"""
Helper function that returns the rebalanced trips from trip data
"""
trips = trips.sort_values(['bike_id', 'start_time'])
rebal_df = trips.groupby('bike_id').apply(shift_cond).dropna()
rebal_df.index = rebal_df.index.droplevel('bike_id')
#pdb.set_trace()
return rebal_df
def rebal_data(year, month):
'''load rebal data for a given year/month'''
basepath = 'rebals/'
csvPath = '{}{:02}-rebal-data.csv'.format(year, month)
df = pd.read_csv(basepath + csvPath)
# get rid of strange outliers
df = df[df.stop_lat > 40.6]
# parse times into datetime objects
df.start_time = pd.to_datetime(df.start_time)
df.stop_time = pd.to_datetime(df.stop_time)
# add a column called trip_id within
#df['trip_id'] = df.index.values
return df
# grab station names and locations from trip file
def stations_from_trips(df):
'''
create a table of unique stations from the trips dataframe
'''
stations = df.groupby('start_id')['start_name', 'start_lat',
'start_long'].aggregate(lambda x: x.value_counts().index[0])
stations.columns = ['name','lat','long']
stations.index.name = 'id'
stations.sort_index(inplace = True)
stations.to_csv('stations.csv')
return stations
# grab station data from stationdata/ and format to merge with features data
def station_data(year, month):
'''load station data for a given year/month'''
basepath = 'stationdata/'
csvPath = '{}{:02}-bikeshare_nyc_raw.csv'.format(year,month)
cols = ['dock_id', 'avail_bikes', 'avail_docks', '_lat', '_long',
'dock_name', 'date', 'hour', 'minute', 'pm', 'tot_docks']
df = pd.read_csv(basepath + csvPath,
delimiter = '\t', error_bad_lines = False)
# convert columns to numeric datatypes
df[cols[:5]] = df[cols[:5]].apply(pd.to_numeric, errors='coerce')
# get rid of strange outliers
df = df[df._lat > 40.6]
df = df[df.tot_docks > 0]
df = df.rename(columns = {'dock_id': 'id'})
# parse and modify time columns
formatting = '%y-%m-%d'
df['date'] = pd.to_datetime(df.date,format = formatting)
df.loc[(df.hour == 12) & (df.pm == 0), 'hour'] = 0
df.loc[(df.hour == 12) & (df.pm == 1), 'hour'] = 0
df.hour = np.where(df.pm == 0, df.hour, df.hour+12)
# aggregate by hour
aggregator = {'avail_bikes':'mean',
'avail_docks':'mean', 'tot_docks':'max'}
df = df.groupby(['id', 'date', 'hour']).agg(aggregator)
df = df.applymap(round).reset_index()
return df
| djevans071/Rebalancing-Citibike | workflow/data.py | Python | mit | 4,328 |
# Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import errno
from urllib.parse import urlsplit
from gi.repository import Gtk, GObject, Gdk, Gio, Pango
from senf import uri2fsn, fsnative, fsn2text, bytes2fsn
from quodlibet import formats, print_d
from quodlibet import qltk
from quodlibet import _
from quodlibet.util import windows
from quodlibet.qltk.getstring import GetStringDialog
from quodlibet.qltk.views import AllTreeView, RCMHintedTreeView, \
MultiDragTreeView
from quodlibet.qltk.views import TreeViewColumn
from quodlibet.qltk.x import ScrolledWindow, Paned
from quodlibet.qltk.models import ObjectStore, ObjectTreeStore
from quodlibet.qltk import Icons
from quodlibet.util.path import listdir, \
glib2fsn, xdg_get_user_dirs, get_home_dir, xdg_get_config_home
from quodlibet.util import connect_obj
def search_func(model, column, key, iter_, handledirs):
check = model.get_value(iter_, 0)
if check is None:
return True
elif not handledirs or os.sep not in key:
check = os.path.basename(check) or os.sep
return key not in check.lower() and key not in check
def is_image(filename):
IMAGES = [".jpg", ".png", ".jpeg"]
for ext in IMAGES:
if filename.lower().endswith(ext):
return True
return False
def filesel_filter(filename):
if formats.filter(filename):
return True
else:
return is_image(filename)
def _get_win_favorites():
"""Returns a list of paths for commonly used directories.
e.g. My Music, Desktop etc.
"""
assert os.name == "nt"
folders = []
funcs = [windows.get_desktop_dir, windows.get_personal_dir,
windows.get_music_dir]
for func in funcs:
path = func()
if path is not None:
folders.append(path)
# try to extract the favorites listed in explorer and add them
# if not already present
links = windows.get_links_dir()
if links is not None:
try:
link_entries = os.listdir(links)
except OSError:
link_entries = []
for entry in link_entries:
if entry.endswith(".lnk"):
try:
target = windows.get_link_target(
os.path.join(links, entry))
except WindowsError:
pass
else:
if target:
# RecentPlaces.lnk resolves
# to an empty string for example
folders.append(target)
# remove duplicated entries
filtered = []
for path in folders:
if path not in filtered:
filtered.append(path)
return filtered
def get_favorites():
"""A list of paths of commonly used folders (Desktop,..)
Paths don't have to exist.
"""
if os.name == "nt":
return _get_win_favorites()
else:
paths = [get_home_dir()]
xfg_user_dirs = xdg_get_user_dirs()
for key in ["XDG_DESKTOP_DIR", "XDG_DOWNLOAD_DIR", "XDG_MUSIC_DIR"]:
if key in xfg_user_dirs:
path = xfg_user_dirs[key]
if path not in paths:
paths.append(path)
return paths
def get_drives():
"""A list of accessible drives"""
paths = []
for mount in Gio.VolumeMonitor.get().get_mounts():
path = mount.get_root().get_path()
if path is not None:
paths.append(glib2fsn(path))
if os.name != "nt":
paths.append("/")
return sorted(paths)
def parse_gtk_bookmarks(data):
"""
Args:
data (bytes)
Retruns:
List[fsnative]
Raises:
ValueError
"""
assert isinstance(data, bytes)
paths = []
for line in data.splitlines():
parts = line.split()
if not parts:
continue
folder_url = parts[0]
paths.append(bytes2fsn(urlsplit(folder_url)[2], "utf-8"))
return paths
def get_gtk_bookmarks():
"""A list of paths from the GTK+ bookmarks.
The paths don't have to exist.
Returns:
List[fsnative]
"""
if os.name == "nt":
return []
path = os.path.join(xdg_get_config_home(), "gtk-3.0", "bookmarks")
folders = []
try:
with open(path, "rb") as f:
folders = parse_gtk_bookmarks(f.read())
except (EnvironmentError, ValueError):
pass
return folders
class DirectoryTree(RCMHintedTreeView, MultiDragTreeView):
"""A tree view showing multiple folder hierarchies"""
def __init__(self, initial=None, folders=None):
"""
initial -- the path to select/scroll to
folders -- a list of paths to show in the tree view, None
will result in a separator.
"""
model = ObjectTreeStore()
super().__init__(model=model)
if initial is not None:
assert isinstance(initial, fsnative)
column = TreeViewColumn(title=_("Folders"))
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
render = Gtk.CellRendererPixbuf()
render.set_property('icon-name', Icons.FOLDER)
render.props.xpad = 3
column.pack_start(render, False)
render = Gtk.CellRendererText()
if self.supports_hints():
render.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(render, True)
def cell_data(column, cell, model, iter_, userdata):
value = model.get_value(iter_)
if value is not None:
text = fsn2text(os.path.basename(value) or value)
cell.set_property('text', text)
column.set_cell_data_func(render, cell_data)
self.append_column(column)
self.set_search_equal_func(search_func, True)
self.set_search_column(0)
if folders is None:
folders = []
for path in folders:
niter = model.append(None, [path])
if path is not None:
assert isinstance(path, fsnative)
model.append(niter, [fsnative(u"dummy")])
self.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
self.connect(
'test-expand-row', DirectoryTree.__expanded, model)
self.set_row_separator_func(
lambda model, iter_, data: model.get_value(iter_) is None, None)
if initial:
self.go_to(initial)
menu = self._create_menu()
connect_obj(self, 'popup-menu', self._popup_menu, menu)
# Allow to drag and drop files from outside
targets = [
("text/uri-list", 0, 42)
]
targets = [Gtk.TargetEntry.new(*t) for t in targets]
self.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.COPY)
self.connect('drag-data-received', self.__drag_data_received)
def _create_menu(self):
menu = Gtk.Menu()
m = qltk.MenuItem(_(u"_New Folder…"), Icons.DOCUMENT_NEW)
m.connect('activate', self.__mkdir)
menu.append(m)
m = qltk.MenuItem(_("_Delete"), Icons.EDIT_DELETE)
m.connect('activate', self.__rmdir)
menu.append(m)
m = qltk.MenuItem(_("_Refresh"), Icons.VIEW_REFRESH)
m.connect('activate', self.__refresh)
menu.append(m)
m = qltk.MenuItem(_("_Select all Sub-Folders"), Icons.FOLDER)
m.connect('activate', self.__expand)
menu.append(m)
menu.show_all()
return menu
def get_selected_paths(self):
"""A list of fs paths"""
selection = self.get_selection()
model, paths = selection.get_selected_rows()
return [model[p][0] for p in paths]
def go_to(self, path_to_go):
assert isinstance(path_to_go, fsnative)
# The path should be normalized in normal situations.
# On some systems and special environments (pipenv) there might be
# a non-normalized path at least during tests, though.
path_to_go = os.path.normpath(path_to_go)
model = self.get_model()
# Find the top level row which has the largest common
# path with the path we want to go to
roots = dict((p, i) for (i, p) in model.iterrows(None))
head, tail = path_to_go, fsnative(u"")
to_find = []
while head and head not in roots:
new_head, tail = os.path.split(head)
# this can happen for invalid paths on Windows
if head == new_head:
break
head = new_head
to_find.append(tail)
if head not in roots:
return
start_iter = roots[head]
# expand until we find the right directory or the last valid one
# and select/scroll to it
def search(view, model, iter_, to_find):
tree_path = model.get_path(iter_)
# we are where we want, select and scroll
if not to_find:
view.set_cursor(tree_path)
view.scroll_to_cell(tree_path)
return
# expand the row
view.expand_row(tree_path, False)
next_ = to_find.pop(-1)
for sub_iter, path in model.iterrows(iter_):
if os.path.basename(path) == next_:
search(view, model, sub_iter, to_find)
break
else:
# we haven't found the right sub folder, select the parent
# and stop
search(view, model, iter_, [])
search(self, model, start_iter, to_find)
def __drag_data_received(self, widget, drag_ctx, x, y, data, info, time):
if info == 42:
uris = data.get_uris()
if uris:
try:
filename = uri2fsn(uris[0])
except ValueError:
pass
else:
self.go_to(filename)
Gtk.drag_finish(drag_ctx, True, False, time)
return
Gtk.drag_finish(drag_ctx, False, False, time)
def _popup_menu(self, menu):
model, paths = self.get_selection().get_selected_rows()
directories = [model[path][0] for path in paths]
menu_items = menu.get_children()
delete = menu_items[1]
try:
is_empty = not any(len(os.listdir(d)) for d in directories)
delete.set_sensitive(is_empty)
except OSError as err:
if err.errno == errno.ENOENT:
model.remove(model.get_iter(paths[0]))
return False
new_folder = menu_items[0]
new_folder.set_sensitive(len(paths) == 1)
selection = self.get_selection()
selection.unselect_all()
for path in paths:
selection.select_path(path)
return self.popup_menu(menu, 0, Gtk.get_current_event_time())
def __mkdir(self, button):
model, paths = self.get_selection().get_selected_rows()
if len(paths) != 1:
return
path = paths[0]
directory = model[path][0]
dir_ = GetStringDialog(
None, _("New Folder"), _("Enter a name for the new folder:")).run()
if not dir_:
return
dir_ = glib2fsn(dir_)
fullpath = os.path.realpath(os.path.join(directory, dir_))
try:
os.makedirs(fullpath)
except EnvironmentError as err:
error = "<b>%s</b>: %s" % (err.filename, err.strerror)
qltk.ErrorMessage(
None, _("Unable to create folder"), error).run()
return
self.emit('test-expand-row', model.get_iter(path), path)
self.expand_row(path, False)
def __rmdir(self, button):
model, paths = self.get_selection().get_selected_rows()
directories = [model[path][0] for path in paths]
print_d("Deleting %d empty directories" % len(directories))
for directory in directories:
try:
os.rmdir(directory)
except EnvironmentError as err:
error = "<b>%s</b>: %s" % (err.filename, err.strerror)
qltk.ErrorMessage(
None, _("Unable to delete folder"), error).run()
return
ppath = Gtk.TreePath(paths[0][:-1])
expanded = self.row_expanded(ppath)
self.emit('test-expand-row', model.get_iter(ppath), ppath)
if expanded:
self.expand_row(ppath, False)
def __expand(self, button):
selection = self.get_selection()
model, paths = selection.get_selected_rows()
for path in paths:
iter_ = model.get_iter(path)
self.expand_row(path, False)
last = self.__select_children(iter_, model, selection)
selection.select_range(path, last)
def __select_children(self, iter_, model, selection):
nchildren = model.iter_n_children(iter_)
last = model.get_path(iter_)
for i in range(nchildren):
child = model.iter_nth_child(iter_, i)
self.expand_row(model.get_path(child), False)
last = self.__select_children(child, model, selection)
return last
def __refresh(self, button):
model, rows = self.get_selection().get_selected_rows()
expanded = set()
self.map_expanded_rows(
lambda s, iter, data: expanded.add(model[iter][0]), None)
needs_expanding = []
for row in rows:
if self.row_expanded(row):
self.emit('test-expand-row', model.get_iter(row), row)
self.expand_row(row, False)
needs_expanding.append(row)
while len(needs_expanding) > 0:
child = model.iter_children(model.get_iter(needs_expanding.pop()))
while child is not None:
if model[child][0] in expanded:
path = model.get_path(child)
self.emit('test-expand-row', child, path)
self.expand_row(path, False)
needs_expanding.append(path)
child = model.iter_next(child)
def __expanded(self, iter, path, model):
window = self.get_window()
if window:
window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
Gtk.main_iteration_do(False)
try:
try:
if model is None:
return
while model.iter_has_child(iter):
model.remove(model.iter_children(iter))
folder = model[iter][0]
for path in listdir(folder):
try:
if not os.path.isdir(path):
continue
for filename in listdir(path):
if os.path.isdir(filename):
niter = model.append(iter, [path])
model.append(niter, ["dummy"])
break
else:
model.append(iter, [path])
except OSError:
pass
if not model.iter_has_child(iter):
return True
except OSError:
pass
finally:
if window:
window.set_cursor(None)
class FileSelector(Paned):
"""A file selector widget consisting of a folder tree
and a file list below.
"""
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_LAST, None,
(Gtk.TreeSelection,))
}
def __init__(self, initial=None, filter=filesel_filter, folders=None):
"""
initial -- a path to a file which should be shown initially
filter -- a function which filters paths shown in the file list
folders -- list of shown folders in the directory tree
"""
super().__init__(
orientation=Gtk.Orientation.VERTICAL)
self.__filter = filter
if initial is not None:
assert isinstance(initial, fsnative)
if initial and os.path.isfile(initial):
initial = os.path.dirname(initial)
dirlist = DirectoryTree(initial, folders=folders)
model = ObjectStore()
filelist = AllTreeView(model=model)
filelist.connect("draw", self.__restore_scroll_pos_on_draw)
column = TreeViewColumn(title=_("Songs"))
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
render = Gtk.CellRendererPixbuf()
render.props.xpad = 3
def cell_icon(column, cell, model, iter_, userdata):
value = model.get_value(iter_)
if is_image(value):
cell.set_property('icon-name', Icons.IMAGE_X_GENERIC)
else:
cell.set_property('icon-name', Icons.AUDIO_X_GENERIC)
column.set_cell_data_func(render, cell_icon)
column.pack_start(render, False)
render = Gtk.CellRendererText()
if filelist.supports_hints():
render.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(render, True)
def cell_data(column, cell, model, iter_, userdata):
value = model.get_value(iter_)
cell.set_property('text', fsn2text(os.path.basename(value)))
column.set_cell_data_func(render, cell_data)
filelist.append_column(column)
filelist.set_rules_hint(True)
filelist.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
filelist.set_search_equal_func(search_func, False)
filelist.set_search_column(0)
self.__sig = filelist.get_selection().connect(
'changed', self.__changed)
dirlist.get_selection().connect(
'changed', self.__dir_selection_changed, filelist)
dirlist.get_selection().emit('changed')
def select_all_files(view, path, col, fileselection):
view.expand_row(path, False)
fileselection.select_all()
dirlist.connect('row-activated', select_all_files,
filelist.get_selection())
sw = ScrolledWindow()
sw.add(dirlist)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.set_shadow_type(Gtk.ShadowType.IN)
self.pack1(sw, resize=True)
sw = ScrolledWindow()
sw.add(filelist)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.set_shadow_type(Gtk.ShadowType.IN)
self.pack2(sw, resize=True)
def go_to(self, *args, **kwargs):
dirlist = self.get_child1().get_child()
dirlist.go_to(*args, **kwargs)
def get_selected_paths(self):
"""A list of fs paths"""
filelist = self.get_child2().get_child()
selection = filelist.get_selection()
model, paths = selection.get_selected_rows()
return [model[p][0] for p in paths]
def rescan(self):
"""Refill the file list for the current directory selection"""
dirlist = self.get_child1().get_child()
filelist = self.get_child2().get_child()
dir_selection = dirlist.get_selection()
self.__dir_selection_changed(dir_selection, filelist)
def __changed(self, selection):
# forward file list selection changed signals
self.emit('changed', selection)
def __dir_selection_changed(self, selection, filelist):
# dir selection changed, refill the file list
fselect = filelist.get_selection()
fselect.handler_block(self.__sig)
fmodel, frows = fselect.get_selected_rows()
selected = [fmodel[row][0] for row in frows]
fmodel = filelist.get_model()
fmodel.clear()
dmodel, rows = selection.get_selected_rows()
dirs = [dmodel[row][0] for row in rows]
for dir_ in dirs:
try:
files = filter(self.__filter, listdir(dir_))
for file_ in sorted(files):
filename = os.path.join(dir_, file_)
if (os.access(filename, os.R_OK) and
not os.path.isdir(filename)):
fmodel.append([filename])
except OSError:
pass
for iter_, filename in fmodel.iterrows():
if filename in selected:
fselect.select_iter(iter_)
fselect.handler_unblock(self.__sig)
fselect.emit('changed')
self._saved_scroll_pos = filelist.get_vadjustment().get_value()
def __restore_scroll_pos_on_draw(self, treeview, context):
if self._saved_scroll_pos:
vadj = treeview.get_vadjustment()
vadj.set_value(self._saved_scroll_pos)
self._saved_scroll_pos = None
def _get_main_folders():
def filter_exists(paths):
return [p for p in paths if os.path.isdir(p)]
folders = []
favs = filter_exists(get_favorites())
if favs:
folders += favs
drives = filter_exists(get_drives())
if folders and drives:
folders += [None]
if drives:
folders += drives
bookmarks = filter_exists(get_gtk_bookmarks())
if folders and bookmarks:
folders += [None]
if bookmarks:
folders += bookmarks
return folders
class MainFileSelector(FileSelector):
"""The main file selector used in EF.
Shows a useful list of directories in the directory tree.
"""
def __init__(self, initial=None):
folders = _get_main_folders()
super().__init__(
initial, filesel_filter, folders=folders)
class MainDirectoryTree(DirectoryTree):
"""The main directory tree used in QL.
Shows a useful list of directories.
"""
def __init__(self, initial=None, folders=None):
if folders is None:
folders = []
main = _get_main_folders()
if folders and main:
folders += [None]
if main:
folders += main
super().__init__(
initial=initial, folders=folders)
| Mellthas/quodlibet | quodlibet/qltk/filesel.py | Python | gpl-2.0 | 22,372 |
materia1 = 6
materia2 = 10
materia3 = 10
print(materia1 >= 7 and materia2 >= 7 and materia3 >= 7) | laenderoliveira/exerclivropy | cap03/ex-03-06.py | Python | mit | 97 |
from corehq.apps.receiverwrapper.util import submit_form_locally
from casexml.apps.case.models import CommCareCase
from lxml import etree
import os
from datetime import datetime, timedelta
import uuid
from django.core.files.uploadedfile import UploadedFile
from custom.uth.const import UTH_DOMAIN
import re
def scan_case(scanner_serial, scan_id):
"""
Find the appropriate case for a serial/exam id combo.
Throws an exception if there are more than one (this is
an error that we do not expect to be able to make corrections
for).
"""
# this is shown on device and stored on the case with no leading zeroes
# but has them on the file itself
scan_id = scan_id.lstrip('0')
return CommCareCase.get_db().view(
'uth/uth_lookup',
startkey=[UTH_DOMAIN, scanner_serial, scan_id],
endkey=[UTH_DOMAIN, scanner_serial, scan_id, {}],
).one()
def match_case(scanner_serial, scan_id, date=None):
results = scan_case(scanner_serial, scan_id)
if results:
return CommCareCase.get(results['value'])
else:
return None
def get_case_id(patient_xml):
"""
This is the case_id if it's extracted, assumed to be in the PatientID
However, there's a nonzero chance of them either forgetting to scan it
Or putting it in the wrong field like PatientsName
"""
exam_root = etree.fromstring(patient_xml)
case_id = exam_root.find("PatientID").text
if case_id == '(_No_ID_)':
return None
else:
return case_id
def get_study_id(patient_xml):
"""
The GUID the sonosite generates for the particular exam
"""
exam_root = etree.fromstring(patient_xml)
return exam_root.find("SonoStudyInstanceUID").text
def load_template(filename):
xform_template = None
template_path = os.path.join(
os.path.dirname(__file__),
'data',
filename
)
with open(template_path, 'r') as fin:
xform_template = fin.read()
return xform_template
def case_attach_block(key, filename):
return '<n0:%s src="%s" from="local"/>' % (key, os.path.split(filename)[-1])
def render_sonosite_xform(files, exam_uuid, patient_case_id=None):
"""
Render the xml needed to create a new case for a given
screening. This case will be a subcase to the `exam_uuid` case,
which belongs to the patient.
"""
xform_template = load_template('upload_form.xml.template')
case_attachments = [case_attach_block(identifier(f), f) for f in files]
exam_time = datetime.utcnow()
format_dict = {
'time_start': (exam_time - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': exam_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': exam_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': uuid.uuid4().hex,
'patient_case_id': patient_case_id,
'case_attachments': ''.join(case_attachments),
'exam_id': exam_uuid,
'case_name': 'Sonosite Exam - ' + exam_time.strftime('%Y-%m-%d'),
}
final_xml = xform_template % format_dict
return final_xml
def render_vscan_error(case_id):
"""
Render the xml needed add attachments to the patients case.
"""
xform_template = load_template('vscan_error.xml.template')
format_dict = {
'time_start': (datetime.utcnow() - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': case_id,
}
final_xml = xform_template % format_dict
return final_xml
def render_vscan_xform(case_id, files):
"""
Render the xml needed add attachments to the patients case.
"""
xform_template = load_template('vscan_form.xml.template')
case_attachments = [
case_attach_block(os.path.split(f)[-1], f) for f in files
]
format_dict = {
'time_start': (datetime.utcnow() - timedelta(seconds=5)).strftime('%Y-%m-%dT%H:%M:%SZ'),
'time_end': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'modified_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'user_id': 'uth-uploader',
'doc_id': uuid.uuid4().hex,
'case_id': case_id,
'case_attachments': ''.join(case_attachments),
}
final_xml = xform_template % format_dict
return final_xml
def identifier(filename):
"""
File names are of the format: 09.44.32 hrs __[0000312].jpeg and we need
to filter out the 0000312 part to use as identifier
"""
match = re.search('\[(\d+)\]', filename)
if match:
return 'attachment' + match.group(1)
else:
# if we can't match, lets hope returning the filename works
return filename
def create_case(case_id, files, patient_case_id=None):
"""
Handle case submission for the sonosite endpoint
"""
# we already parsed what we need from this, so can just remove it
# without worrying we will need it later
files.pop('PT_PPS.XML', '')
xform = render_sonosite_xform(files, case_id, patient_case_id)
file_dict = {}
for f in files:
file_dict[f] = UploadedFile(files[f], f)
_, _, cases = submit_form_locally(
instance=xform,
attachments=file_dict,
domain=UTH_DOMAIN,
)
case_ids = {case.case_id for case in cases}
return [CommCareCase.get(case_id) for case_id in case_ids]
def attach_images_to_case(case_id, files):
"""
Handle case submission for the vscan endpoint
"""
xform = render_vscan_xform(case_id, files)
file_dict = {}
for f in files:
identifier = os.path.split(f)[-1]
file_dict[identifier] = UploadedFile(files[f], identifier)
submit_form_locally(xform, attachments=file_dict, domain=UTH_DOMAIN)
def submit_error_case(case_id):
"""
Used if something went wrong creating the real vscan
case update.
"""
xform = render_vscan_error(case_id)
submit_form_locally(
instance=xform,
domain=UTH_DOMAIN,
)
def put_request_files_in_doc(request, doc):
for name, f in request.FILES.iteritems():
doc.put_attachment(
f,
name,
)
| qedsoftware/commcare-hq | custom/uth/utils.py | Python | bsd-3-clause | 6,401 |
from pycoin.encoding.bytes32 import from_bytes_32
from pycoin.encoding.hash import double_sha256
from pycoin.satoshi.flags import SIGHASH_FORKID
from ..bitcoin.SolutionChecker import BitcoinSolutionChecker
class BgoldSolutionChecker(BitcoinSolutionChecker):
FORKID_BTG = 79 # atomic number for Au (gold)
def _signature_hash(self, tx_out_script, unsigned_txs_out_idx, hash_type):
"""
Return the canonical hash for a transaction. We need to
remove references to the signature, since it's a signature
of the hash before the signature is applied.
tx_out_script: the script the coins for unsigned_txs_out_idx are coming from
unsigned_txs_out_idx: where to put the tx_out_script
hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL,
optionally bitwise or'ed with SIGHASH_ANYONECANPAY
"""
if hash_type & SIGHASH_FORKID != SIGHASH_FORKID:
raise self.ScriptError()
return self._signature_for_hash_type_segwit(tx_out_script, unsigned_txs_out_idx, hash_type)
def _signature_for_hash_type_segwit(self, script, tx_in_idx, hash_type):
hash_type |= self.FORKID_BTG << 8
return from_bytes_32(double_sha256(self._segwit_signature_preimage(script, tx_in_idx, hash_type)))
| richardkiss/pycoin | pycoin/coins/bgold/SolutionChecker.py | Python | mit | 1,299 |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
import sys
from pyasn1.compat.octets import null
from pysnmp.proto import rfc3411, error
from pysnmp.proto.api import v1, v2c # backend is always SMIv2 compliant
from pysnmp.proto.proxy import rfc2576
from pysnmp import debug
# 3.4
class NotificationReceiver:
pduTypes = (v1.TrapPDU.tagSet, v2c.SNMPv2TrapPDU.tagSet,
v2c.InformRequestPDU.tagSet)
def __init__(self, snmpEngine, cbFun, cbCtx=None):
snmpEngine.msgAndPduDsp.registerContextEngineId(
null, self.pduTypes, self.processPdu # '' is a wildcard
)
self.__cbFunVer = 0
self.__cbFun = cbFun
self.__cbCtx = cbCtx
def close(self, snmpEngine):
snmpEngine.msgAndPduDsp.unregisterContextEngineId(
null, self.pduTypes
)
self.__cbFun = self.__cbCtx = None
def processPdu(self, snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion, PDU,
maxSizeResponseScopedPDU, stateReference):
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
origPdu = PDU
PDU = rfc2576.v1ToV2(PDU)
else:
origPdu = None
errorStatus = 'noError'
errorIndex = 0
varBinds = v2c.apiPDU.getVarBinds(PDU)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, varBinds %s' % (stateReference, varBinds))
# 3.4
if PDU.tagSet in rfc3411.confirmedClassPDUs:
# 3.4.1 --> no-op
rspPDU = v2c.apiPDU.getResponse(PDU)
# 3.4.2
v2c.apiPDU.setErrorStatus(rspPDU, errorStatus)
v2c.apiPDU.setErrorIndex(rspPDU, errorIndex)
v2c.apiPDU.setVarBinds(rspPDU, varBinds)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, confirm PDU %s' % (stateReference, rspPDU.prettyPrint()))
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
rspPDU = rfc2576.v2ToV1(rspPDU, origPdu)
statusInformation = {}
# 3.4.3
try:
snmpEngine.msgAndPduDsp.returnResponsePdu(
snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId,
contextName, pduVersion, rspPDU, maxSizeResponseScopedPDU,
stateReference, statusInformation)
except error.StatusInformation:
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, statusInformation %s' % (stateReference, sys.exc_info()[1]))
snmpSilentDrops, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
elif PDU.tagSet in rfc3411.unconfirmedClassPDUs:
pass
else:
raise error.ProtocolError('Unexpected PDU class %s' % PDU.tagSet)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, user cbFun %s, cbCtx %s, varBinds %s' % (stateReference, self.__cbFun, self.__cbCtx, varBinds))
if self.__cbFunVer:
self.__cbFun(snmpEngine, stateReference, contextEngineId,
contextName, varBinds, self.__cbCtx)
else:
# Compatibility stub (handle legacy cbFun interface)
try:
self.__cbFun(snmpEngine, contextEngineId, contextName,
varBinds, self.__cbCtx)
except TypeError:
self.__cbFunVer = 1
self.__cbFun(snmpEngine, stateReference, contextEngineId,
contextName, varBinds, self.__cbCtx)
| filippog/pysnmp | pysnmp/entity/rfc3413/ntfrcv.py | Python | bsd-3-clause | 4,010 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import fnmatch
import os
import re
import datetime
from functools import partial
import sickbeard
from sickbeard import common
from sickbeard.helpers import sanitizeSceneName
from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard import logger
from sickbeard import db
from sickbeard import encodingKludge as ek
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from lib.unidecode import unidecode
from sickbeard.blackandwhitelist import BlackAndWhiteList
resultFilters = ["sub(bed|ed|pack|s)", "(dk|fin|heb|kor|nor|nordic|pl|swe)sub(bed|ed|s)?",
"(dir|sample|sub|nfo)fix", "sample", "(dvd)?extras",
"dub(bed)?"]
def containsAtLeastOneWord(name, words):
"""
Filters out results based on filter_words
name: name to check
words : string of words separated by a ',' or list of words
Returns: False if the name doesn't contain any word of words list, or the found word from the list.
"""
if isinstance(words, basestring):
words = words.split(',')
items = [(re.compile('(^|[\W_])%s($|[\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]
for regexp, word in items:
if regexp.search(name):
return word
return False
def filterBadReleases(name, parse=True):
"""
Filters out non-english and just all-around stupid releases by comparing them
to the resultFilters contents.
name: the release name to check
Returns: True if the release name is OK, False if it's bad.
"""
try:
if parse:
NameParser().parse(name)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + name + " into a valid episode", logger.DEBUG)
return False
except InvalidShowException:
pass
# logger.log(u"Unable to parse the filename " + name + " into a valid show", logger.DEBUG)
# return False
# if any of the bad strings are in the name then say no
ignore_words = list(resultFilters)
if sickbeard.IGNORE_WORDS:
ignore_words.extend(sickbeard.IGNORE_WORDS.split(','))
word = containsAtLeastOneWord(name, ignore_words)
if word:
logger.log(u"Invalid scene release: " + name + " contains " + word + ", ignoring it", logger.DEBUG)
return False
# if any of the good strings aren't in the name then say no
if sickbeard.REQUIRE_WORDS:
require_words = sickbeard.REQUIRE_WORDS
if not containsAtLeastOneWord(name, require_words):
logger.log(u"Invalid scene release: " + name + " doesn't contain any of " + sickbeard.REQUIRE_WORDS +
", ignoring it", logger.DEBUG)
return False
return True
def sceneToNormalShowNames(name):
"""
Takes a show name from a scene dirname and converts it to a more "human-readable" format.
name: The show name to convert
Returns: a list of all the possible "normal" names
"""
if not name:
return []
name_list = [name]
# use both and and &
new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I)
if new_name not in name_list:
name_list.append(new_name)
results = []
for cur_name in name_list:
# add brackets around the year
results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name))
# add brackets around the country
country_match_str = '|'.join(common.countryList.values())
results.append(re.sub('(?i)([. _-])(' + country_match_str + ')$', '\\1(\\2)', cur_name))
results += name_list
return list(set(results))
def makeSceneShowSearchStrings(show, season=-1, anime=False):
showNames = allPossibleShowNames(show, season=season)
# scenify the names
if anime:
sanitizeSceneNameAnime = partial(sanitizeSceneName, anime=True)
return map(sanitizeSceneNameAnime, showNames)
else:
return map(sanitizeSceneName, showNames)
def makeSceneSeasonSearchString(show, ep_obj, extraSearchType=None):
if show.air_by_date or show.sports:
numseasons = 0
# the search string for air by date shows is just
seasonStrings = [str(ep_obj.airdate).split('-')[0]]
elif show.is_anime:
numseasons = 0
seasonEps = show.getAllEpisodes(ep_obj.season)
# get show qualities
anyQualities, bestQualities = common.Quality.splitQuality(show.quality)
# compile a list of all the episode numbers we need in this 'season'
seasonStrings = []
for episode in seasonEps:
# get quality of the episode
curCompositeStatus = episode.status
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(bestQualities)
else:
highestBestQuality = 0
# if we need a better one then add it to the list of episodes to fetch
if (curStatus in (
common.DOWNLOADED,
common.SNATCHED) and curQuality < highestBestQuality) or curStatus == common.WANTED:
ab_number = episode.scene_absolute_number
if ab_number > 0:
seasonStrings.append("%02d" % ab_number)
else:
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select(
"SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0",
[show.indexerid])
numseasons = int(numseasonsSQlResult[0][0])
seasonStrings = ["S%02d" % int(ep_obj.scene_season)]
showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season))
toReturn = []
# search each show name
for curShow in showNames:
# most providers all work the same way
if not extraSearchType:
# if there's only one season then we can just use the show name straight up
if numseasons == 1:
toReturn.append(curShow)
# for providers that don't allow multiple searches in one request we only search for Sxx style stuff
else:
for cur_season in seasonStrings:
if len(show.release_groups.whitelist) > 0:
for keyword in show.release_groups.whitelist:
toReturn.append(keyword + '.' + curShow+ "." + cur_season)
else:
toReturn.append(curShow + "." + cur_season)
return toReturn
def makeSceneSearchString(show, ep_obj):
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select(
"SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0",
[show.indexerid])
numseasons = int(numseasonsSQlResult[0][0])
# see if we should use dates instead of episodes
if (show.air_by_date or show.sports) and ep_obj.airdate != datetime.date.fromordinal(1):
epStrings = [str(ep_obj.airdate)]
elif show.is_anime:
epStrings = ["%02i" % int(ep_obj.scene_absolute_number if ep_obj.scene_absolute_number > 0 else ep_obj.scene_episode)]
else:
epStrings = ["S%02iE%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode)),
"%ix%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode))]
# for single-season shows just search for the show name -- if total ep count (exclude s0) is less than 11
# due to the amount of qualities and releases, it is easy to go over the 50 result limit on rss feeds otherwise
if numseasons == 1 and not ep_obj.show.is_anime:
epStrings = ['']
showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season))
toReturn = []
for curShow in showNames:
for curEpString in epStrings:
if len(ep_obj.show.release_groups.whitelist) > 0:
for keyword in ep_obj.show.release_groups.whitelist:
toReturn.append(keyword + '.' + curShow + '.' + curEpString)
else:
toReturn.append(curShow + '.' + curEpString)
return toReturn
def isGoodResult(name, show, log=True, season=-1):
"""
Use an automatically-created regex to make sure the result actually is the show it claims to be
"""
all_show_names = allPossibleShowNames(show, season=season)
showNames = map(sanitizeSceneName, all_show_names) + all_show_names
showNames += map(ek.ss, all_show_names)
for curName in set(showNames):
if not show.is_anime:
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName))
if show.startyear:
escaped_name += "(?:\W+" + str(show.startyear) + ")?"
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))'
else:
escaped_name = re.sub('\\\\[\\s.-]', '[\W_]+', re.escape(curName))
# FIXME: find a "automatically-created" regex for anime releases # test at http://regexr.com?2uon3
curRegex = '^((\[.*?\])|(\d+[\.-]))*[ _\.]*' + escaped_name + '(([ ._-]+\d+)|([ ._-]+s\d{2})).*'
if log:
logger.log(u"Checking if show " + name + " matches " + curRegex, logger.DEBUG)
match = re.search(curRegex, name, re.I)
if match:
logger.log(u"Matched " + curRegex + " to " + name, logger.DEBUG)
return True
if log:
logger.log(
u"Provider gave result " + name + " but that doesn't seem like a valid result for " + show.name + " so I'm ignoring it")
return False
def allPossibleShowNames(show, season=-1):
"""
Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name,
country codes on the end, eg. "Show Name (AU)", and any scene exception names.
show: a TVShow object that we should get the names of
Returns: a list of all the possible show names
"""
showNames = get_scene_exceptions(show.indexerid, season=season)[:]
if not showNames: # if we dont have any season specific exceptions fallback to generic exceptions
season = -1
showNames = get_scene_exceptions(show.indexerid, season=season)[:]
if season in [-1, 1]:
showNames.append(show.name)
if not show.is_anime:
newShowNames = []
country_list = common.countryList
country_list.update(dict(zip(common.countryList.values(), common.countryList.keys())))
for curName in set(showNames):
if not curName:
continue
# if we have "Show Name Australia" or "Show Name (Australia)" this will add "Show Name (AU)" for
# any countries defined in common.countryList
# (and vice versa)
for curCountry in country_list:
if curName.endswith(' ' + curCountry):
newShowNames.append(curName.replace(' ' + curCountry, ' (' + country_list[curCountry] + ')'))
elif curName.endswith(' (' + curCountry + ')'):
newShowNames.append(curName.replace(' (' + curCountry + ')', ' (' + country_list[curCountry] + ')'))
# if we have "Show Name (2013)" this will strip the (2013) show year from the show name
#newShowNames.append(re.sub('\(\d{4}\)','',curName))
showNames += newShowNames
return showNames
def determineReleaseName(dir_name=None, nzb_name=None):
"""Determine a release name from an nzb and/or folder name"""
if nzb_name is not None:
logger.log(u"Using nzb_name for release name.")
return nzb_name.rpartition('.')[0]
if dir_name is None:
return None
# try to get the release name from nzb/nfo
file_types = ["*.nzb", "*.nfo"]
for search in file_types:
reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)
files = [file_name for file_name in ek.ek(os.listdir, dir_name) if
ek.ek(os.path.isfile, ek.ek(os.path.join, dir_name, file_name))]
results = filter(reg_expr.search, files)
if len(results) == 1:
found_file = ek.ek(os.path.basename, results[0])
found_file = found_file.rpartition('.')[0]
if filterBadReleases(found_file):
logger.log(u"Release name (" + found_file + ") found from file (" + results[0] + ")")
return found_file.rpartition('.')[0]
# If that fails, we try the folder
folder = ek.ek(os.path.basename, dir_name)
if filterBadReleases(folder):
# NOTE: Multiple failed downloads will change the folder name.
# (e.g., appending #s)
# Should we handle that?
logger.log(u"Folder name (" + folder + ") appears to be a valid release name. Using it.")
return folder
return None
| ahmetabdi/SickRage | sickbeard/show_name_helpers.py | Python | gpl-3.0 | 13,767 |
#!/usr/bin/env python3
"""
Universal file/directory methods are defined here.
"""
import stat, hashlib
from source.libs.define import *
#import source.libs.define as lib
import source.libs.log as log
from source.libs.db import *
def get_fullpath(system, path):
#print(system, path)
if system.startswith('/'): # local or sub
if path.startswith('./') or path.startswith('../') or path.startswith('.\\') or path.startswith('..\\'): # enable relative path also
return path
if path.startswith('~/'): #TODO test
print('IO returning ~ address...')
return os.path.join(lib.global_parameters['HOME'], path[2:])
while path.startswith('/'): # remove leading slashes
path = path[1:]
return os.path.join(system, path)
elif system.startswith('ssh://'):
return system+('/' if not system.endswith('/') else '')+path
else:
# TODO NOT IMPLEMENTED
return IO_ERROR
def get_fd(system, path, mode):
fullpath = get_fullpath(system, path)
if system.startswith('/'): # local or sub
return open(fullpath, mode)
elif system.startswith('ssh://'):
c = get_ssh_connection(system)
sftp = c.connectors[0].open_sftp() # will be closed on connection kill or program termination
c.connectors.append(sftp)
return sftp.open(path, mode)
else:
# TODO NOT IMPLEMENTED
return None
def read_file(system, path, f=None, usedb=False, forcebinary=False, chunk=0, verbose=False):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return IO_ERROR
if not can_read(system, path):
if verbose:
if is_link(system, path):
log.err('\'%s\' (on \'%s\') is a symlink to \'%s\' but it cannot be read.' % (path, system, get_link(system, path)))
else:
log.err('Cannot read \'%s\'.' % (fullpath))
return IO_ERROR
open_and_close = (f is None)
if system.startswith('/'): # local or sub
if open_and_close:
try:
if forcebinary:
raise TypeError # will be opened as binary
f = open(fullpath, 'r', encoding='utf-8')
except: # a binary file?
f = open(fullpath, 'rb')
result = f.read() if chunk == 0 else f.read(chunk)
if open_and_close:
f.close()
if usedb == True or usedb == DBFILE_NOCONTENT:
fileinfo = get_file_info(system, path)
if fileinfo == IO_ERROR:
return IO_ERROR # cannot access file info - something is weird
add = db['analysis'].add_file(system, path, fileinfo['type'], fileinfo['permissions'], fileinfo['UID'], fileinfo['GID'], result if usedb else None, fileinfo['ATIME'], fileinfo['MTIME'], fileinfo['CTIME'])
if not add:
log.err('Database query failed.')
return IO_ERROR
return result
elif system.startswith('ssh://'):
c = get_ssh_connection(system)
if c is not None:
if open_and_close:
try:
sftp = c.connectors[0].open_sftp()
except:
log.err('Cannot create SFTP connection.')
return IO_ERROR
try:
if forcebinary:
raise TypeError # will be treated as binary
f = sftp.open(path, 'r')
except:
f = sftp.open(path, 'rb')
result = f.read() if chunk == 0 else f.read(size=chunk)
if open_and_close:
sftp.close()
if forcebinary:
return result
else:
return result.decode('utf-8')
else:
log.err('Cannot read file on \'%s\' - no such connection' % (system))
return IO_ERROR
# TODO usedb, chunk etc.
else: # FTP/TFTP/HTTP
# TODO NOT IMPLEMENTED
return IO_ERROR
def write_file(system, path, content, lf=True, utf8=False):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return IO_ERROR
if not can_write(system, path) and not can_create(system, path):
if is_link(system, path):
log.err('\'%s\' (on %s) is a symlink to \'%s\' but it cannot be written.' % (path, system, get_link(system, path)))
else:
log.err('Cannot write \'%s\'.' % (fullpath))
return IO_ERROR
if system.startswith('/'): # local or sub
# LF? UTF-8?
args = {}
if lf:
args['newline'] = ''
if utf8:
args['encoding'] = 'utf-8'
try:
# write file
with open(fullpath, 'w', **args) as f:
f.write(content)
except UnicodeEncodeError:
# weird char? try utf8
args['encoding'] = 'utf-8'
with open(fullpath, 'w', **args) as f:
f.write(content)
except TypeError:
# str, not bytes? write as binary
with open(fullpath, 'wb') as f:
f.write(content)
return True
else: # SSH/FTP/TFTP/HTTP
# TODO NOT IMPLEMENTED
return IO_ERROR
def mkdir(system, path):
fullpath = get_fullpath(system, path)
if can_read(system, path):
typ = get_file_info(system, path)['type']
if typ != 'd':
log.err('This file already exists (and is not a directory).')
return
if system.startswith('/'): # local or sub
os.mkdir(fullpath)
else:
#TODO NOT IMPLEMENTED
pass
def delete(system, path):
typ = get_file_info(system, path)['type']
fullpath = get_fullpath(system, path)
if system.startswith('/'): # local or sub
if typ == 'f':
os.remove(fullpath)
elif typ == 'd':
try:
# empty directory?
os.rmdir(fullpath)
except:
# recursively?
import shutil
shutil.rmtree(fullpath)
def get_file_info(system, path, verbose=False):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return IO_ERROR
result = {}
if system.startswith('/'): # local or sub
if can_read(system, path):
# TODO platform-specific
stats = os.stat(fullpath)
stm = stats.st_mode
result['type'] = get_file_type_char(stat.S_IFMT(stm))
result['permissions'] = '%o' % (stat.S_IMODE(stm))
result['UID'] = stats[stat.ST_UID]
result['GID'] = stats[stat.ST_GID]
result['ATIME'] = stats[stat.ST_ATIME]
result['MTIME'] = stats[stat.ST_MTIME]
result['CTIME'] = stats[stat.ST_CTIME] # actually mtime on UNIX, TODO
else:
if verbose:
log.info('File \'%s\' is not accessible.' % (fullpath))
result['type'] = None
result['permissions'] = None
result['UID'] = None
result['GID'] = None
result['ATIME'] = None
result['MTIME'] = None
result['CTIME'] = None
return result
else: # SSH/FTP/TFTP/HTTP
# TODO NOT IMPLEMENTED
return IO_ERROR
def can_read(system, path):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return False
if system.startswith('/'):
if os.access(fullpath, os.R_OK):
return True
else:
return False
elif system.startswith('ssh://'):
c = get_ssh_connection(system)
if c is not None:
try:
sftp = c.connectors[0].open_sftp()
fs = sftp.listdir(path)
result = len(fs)>0
except: # not a directory
try:
f = sftp.open(path)
#result = f.readable()
# no readable() on Debian??? # TODO monitor this situation, meanwhile:
tmpbuf = f.read(size=1)
result = True if len(tmpbuf)>0 else False
except (PermissionError, FileNotFoundError):
return False
except Exception as e:
return False
sftp.close()
return result
else:
return False # no connection
else: # unknown system
return False
def can_write(system, path):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return False
if system.startswith('/'):
if os.access(fullpath, os.W_OK):
return True
else:
return False
def can_execute(system, path):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return False
if system.startswith('/'):
if os.access(fullpath, os.X_OK):
return True
else:
return False
else:
# TODO NOT IMPLEMENTED
return False
def can_create(system, path):
fullpath = get_fullpath(system, path)
if fullpath == IO_ERROR:
return False
# file does not exist but can be created
if not can_write(system, path):
try:
with open(fullpath, 'w') as f:
f.write('.')
os.remove(fullpath)
return True
except:
return False
else:
return False
def is_link(system, path):
#os = get_system_type_from_active_root(system)
if system.startswith('/'):
return os.path.islink(get_fullpath(system, path))
#TODO NOT IMPLEMENTED
return False
def get_link(system, path):
# system is not in the output!
if is_link(system, path):
if system.startswith('/'): # local or sub
return os.readlink(get_fullpath(system, path))
else:
return path # TODO not implemented
else:
return path
def list_dir(system, path, sortby=IOSORT_NAME):
result = []
if system.startswith('/'): # local or sub
try:
result = os.listdir(get_fullpath(system, path))
except:
result = []
elif system.startswith('ssh://'):
c = get_ssh_connection(system)
try:
sftp = c.connectors[0].open_sftp()
result = sftp.listdir(path)
except:
result = []
else:
# TODO NOT IMPLEMENTED
return []
# sort if necessary
if sortby == IOSORT_NAME:
return sorted(result)
else: # by some parameter
fileinfos = {k:get_file_info(system, os.path.join(path, k)) for k in result} # TODO not really platform-independent
if sortby == IOSORT_MTIME:
return [k for k,v in sorted(fileinfos.items(), key=lambda x: x[1]['MTIME'])]
else:
return result
# TODO more possibilities would be great
def find(system, start, filename, location=False):
result = []
if system.startswith('/'): # local or sub
for root, dirs, files in os.walk(get_fullpath(system, start)):
if filename in files or filename in dirs:
if location:
result.append(get_fullpath(system, root))
else:
result.append(get_fullpath(system, os.path.join(root, filename)))
else:
#TODO NOT IMPLEMENTED
result = []
return result
def hash(system, path, function, hexdigest=True):
# TODO universal?
hasher = function()
f = get_fd(system, path, 'rb')
if f == IO_ERROR:
result = 'UNKNOWN'
else:
while True:
buf = read_file(system, path, f=f, chunk=65535)
if len(buf) <= 0:
break
hasher.update(buf)
result = hasher.hexdigest() if hexdigest else hasher.digest()
f.close()
return result
def md5(system, path, hexdigest=True):
return hash(system, path, hashlib.md5, hexdigest)
def sha1(system, path, hexdigest=True):
return hash(system, path, hashlib.sha1, hexdigest)
def sha256(system, path, hexdigest=True):
return hash(system, path, hashlib.sha256, hexdigest)
def get_file_type_char(mask):
#chars = '??????????????df'
#for i in range(0, len(chars)):
# x = 2**i
# if x == mask:
# return chars[i]
known = {
0x6000: 'b',
0x4000: 'd',
0x8000: 'f',
0x2000: 'c',
}
if mask in known:
return known[mask]
return '?'
def get_system_type_from_active_root(activeroot, verbose=False, dontprint=''):
if activeroot == '/':
return sys.platform
#
# check db if the system is known
#
# TODO
#
# new system - detect it and write into db
#
# chroot or similar?
if activeroot.startswith(('/', 'ssh://')): # sub or ssh
# linux should have some folders in / ...
success = 0
linux_folders = ['/bin', '/boot', '/dev', '/etc', '/home', '/lib', '/media', '/opt', '/proc', '/root', '/sbin', '/srv', '/sys', '/tmp', '/usr']
for folder in linux_folders:
if can_read(activeroot, folder):
success += 1
linux_score = success/len(linux_folders)
if verbose:
if type(dontprint) != str or dontprint=='':
log.info('Linux score for \'%s\': %f' % (activeroot, linux_score))
else:
log.info('Linux score for \'%s\': %f' % (activeroot.partition(dontprint)[2], linux_score))
if linux_score > 0.3: # this should be linux
#TODO write into DB
return 'linux'
#TODO NOT IMPLEMENTED
return 'unknown'
def get_ssh_connection(system):
# returns first connection for desired system
cs = [x for x in lib.connections if x.description == system]
if len(cs)>0:
return cs[0]
return None
| lightfaith/locasploit | source/libs/io.py | Python | gpl-2.0 | 14,049 |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1, 2, 3, 4, 5],
validation_buildings=[1, 2, 3, 4, 5],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
subsample_target=8,
ignore_incomplete=False,
skip_probability=0.5,
offset_probability=1
# ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
target_seq_length = seq_length // source.subsample_target
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (target_seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, target_seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 1504)
]
for experiment, appliance, seq_length in APPLIANCES[-1:]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e493.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| JackKelly/neuralnilm_prototype | scripts/e493.py | Python | mit | 6,836 |
import pytest
import glob
import logging.config
import os
import minst.logger
import minst.utils
logging.config.dictConfig(minst.logger.get_config('INFO'))
def collect_files(exts, data, depth=8):
afiles = []
for n in range(depth):
fmt = os.path.join(data, "/".join(["*"] * n), "*.{}*")
for ext in exts:
afiles += glob.glob(fmt.format(ext))
return afiles
def __test(value, expected):
assert value == expected
def test_generate_id():
def __test_hash(prefix, result, hlen, exp):
assert result.startswith(prefix)
assert len(result[len(prefix):]) == hlen
assert result == exp
tests = [("A", "foobar.mp3", 3, 'A6fb'),
("BC", "testwhat.foo", 8, 'BC87188425'),
("TR", "i'matestfile.aiff", 12, 'TR35a75e8d3dcb')]
for prefix, name, hlen, exp in tests:
result = minst.utils.generate_id(prefix, name, hlen)
yield __test_hash, prefix, result, hlen, exp
def test_get_note_distance():
test_pairs = [(('Bb1', 'B1'), 1),
(('C4', 'B4'), 11),
(('Bb1', 'B2'), 13),
(('C3', 'C4'), 12),
(('F5', 'F5'), 0),
(('C#3', 'C4'), 11)]
for value, expected in test_pairs:
result = minst.utils.note_distance(value)
yield __test, result, expected
def test_check_audio_file(data_root):
for af in collect_files(['mp3', 'aif', 'aiff'], data_root):
__test(minst.utils.check_audio_file(af), (True, None))
for af in collect_files(['zip'], data_root):
__test(minst.utils.check_audio_file(af)[0], False)
__test(minst.utils.check_audio_file('heavy_metal.wav')[0], False)
def test_check_many_audio_files(data_root):
afiles = collect_files(['mp3', 'aif', 'aiff'], data_root)
for sterr in minst.utils.check_many_audio_files(afiles):
__test(sterr, (True, None))
other_files = collect_files(['zip'], data_root)
for sterr in minst.utils.check_many_audio_files(other_files):
__test(sterr[0], False)
def test_trim(workspace, data_root):
afiles = collect_files(['mp3', 'aif', 'aiff'], data_root)
ofile = minst.utils.trim(afiles[0], workspace, 0.5)
assert ofile
other_files = collect_files(['zip'], data_root)
ofile = minst.utils.trim(other_files[0], workspace, 0.5)
assert ofile is None
| ejhumphrey/minst-dataset | minst/tests/test_utils.py | Python | isc | 2,380 |
#!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from database import Database
from incomesource import IncomeSource
import common
class IncomeSourceManager:
def addIncomeSource(self, incomesourcename, incometype):
''' Adds (saves) a new incomesource to the database '''
incomesource = IncomeSource(self.pid, incomesourcename, incometype)
return incomesource
def editIncomeSource(self, incomesourcename, incometype, newincomesourcename="" ):
''' Edits an existing incomesource '''
newincomesourcename = incomesourcename if newincomesourcename == "" else newincomesourcename
# get incomesource
incomesource = IncomeSource(self.pid, incomesourcename)
# modify the data
incomesource.editIncomeSource(newincomesourcename, incometype)
return incomesource
def deleteIncomeSources(self, incomesources):
''' Deletes incomesources matching names in the array incomesources from database '''
db = Database()
db.open()
for incomesourcename in incomesources:
incomesourcename = common.getDbString( incomesourcename )
query = "DELETE FROM projectincomesources WHERE pid=%s AND incomesource='%s'" % (self.pid, incomesourcename)
db.execUpdateQuery( query )
db.close()
def getIncomeSource(self, incomesourcename):
''' Retrieve a incomesource identified by incomesourcename '''
incomesource = IncomeSource(self.pid, incomesourcename)
return incomesource
def getIncomeSources(self, incometype=""):
''' Retrieves all incomesources from the database and returns an array of incomesource objects '''
# create filtering condition
incometype = common.getDbString( incometype )
condition = "AND incometype LIKE '%"+incometype+"%' " if incometype != "" else ""
query = "SELECT incomesource FROM projectincomesources WHERE pid=%s %s ORDER BY incomesource" % (self.pid, condition)
db = Database()
db.open()
records = db.execSelectQuery( query )
incomesources = []
for rec in records:
incomesourcename = rec[0]
incomesource = IncomeSource(self.pid, incomesourcename)
incomesources.append( incomesource )
db.close()
return incomesources
def getCropIncomes(self):
''' Retrieves crop incomes '''
pass
def getLivestockIncomes(self):
''' Retrieves livestock incomes '''
pass
def getWildfoodIncomes(self):
''' Retrieves wildfood incomes '''
pass
def getFoodIncomes(self,incometype=None):
''' Retrieve food (crop, livestock or wildfood incomes) '''
# select query to get matchng food incomes
query = '''SELECT name FROM setup_foods_crops WHERE category='%s' ''' % ( incometype )
db = Database()
db.open()
records = db.execSelectQuery( query )
incomesources = []
for rec in records:
incomesource = rec[0]
incomesources.append( incomesource )
db.close()
return incomesources
def getEmploymentIncomes(self):
''' Retrieve employment incomes '''
# select query to get matchng food incomes
query = '''SELECT incomesource FROM setup_employment'''
db = Database()
db.open()
records = db.execSelectQuery( query )
incomesources = []
for rec in records:
incomesource = rec[0]
incomesources.append( incomesource )
db.close()
return incomesources
def getTransferIncomes(self):
''' Retrieve transfer incomes '''
# select query to get matchng food incomes
query = '''SELECT assistancetype FROM setup_transfers'''
db = Database()
db.open()
records = db.execSelectQuery( query )
incomesources = []
for rec in records:
incomesource = rec[0]
incomesources.append( incomesource )
db.close()
return incomesources
| tectronics/open-ihm | src/openihm/data/incomesourcemanager.py | Python | lgpl-3.0 | 5,156 |
# -*- coding: latin1 -*-
# Copyright (C) 2006-2010 João Luís Silva <jsilva@fc.up.pt>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#-----------------------------------------------------------------------
# Evolution of the pulse through the material
#-----------------------------------------------------------------------
from pynlo.interactions.FourWaveMixing import SSFM
from . import SSFM | ycasg/PyNLO | src/pynlo/interactions/FourWaveMixing/__init__.py | Python | gpl-3.0 | 1,055 |
# -*- coding: utf-8 -*-
#
# python-apt documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 7 17:04:36 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import glob
import sys
# Find the path to the built apt_pkg and apt_inst extensions
if os.path.exists("../../build"):
version = '.'.join(str(x) for x in sys.version_info[:2])
for apt_pkg_path in glob.glob('../../build/lib*%s/*.so' % version):
sys.path.insert(0, os.path.abspath(os.path.dirname(apt_pkg_path)))
try:
import apt_pkg
except ImportError as exc:
# Not the correct version
sys.stderr.write('W: Ignoring error %s\n' % exc)
sys.path.pop(0)
else:
sys.stdout.write('I: Found apt_pkg.so in %s\n' % sys.path[0])
break
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo']
intersphinx_mapping = {'http://docs.python.org/': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
#master_doc = 'contents'
# General information about the project.
project = u'python-apt'
copyright = u'2009-2010, Julian Andres Klode <jak@debian.org>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
release=os.environ['DEBVER']
except KeyError:
from subprocess import Popen, PIPE
p1 = Popen(["dpkg-parsechangelog", "-l../../debian/changelog"],
stdout=PIPE)
p2 = Popen(["sed", "-n", 's/^Version: //p'], stdin=p1.stdout, stdout=PIPE)
release = p2.communicate()[0]
# Handle the alpha release scheme
release_raw = "0"
for c in release.split("~")[0].split(".")[2]:
if not c.isdigit():
break
release_raw += c
if int(release_raw) >= 90:
version_s = release.split("~")[0].split(".")[:3]
# Set the version to 0.X.100 if the release is 0.X.9Y (0.7.90 => 0.7.100)
# Use
# version_s[1] = str(int(version_s[1]) + 1)
# version_s[2] = "0"
# if the version of a 0.X.9Y release should be 0.X+1.0 (0.7.90=>0.8)
version_s[2] = "100"
version = '.'.join(version_s)
del version_s
else:
version = '.'.join(release.split("~")[0].split('.')[:3])
del release_raw
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "indexcontent.html"}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-aptdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source index, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'python-apt.tex', ur'python-apt Documentation',
ur'Julian Andres Klode <jak@debian.org>', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
todo_include_todos = True
| suokko/python-apt | doc/source/conf.py | Python | gpl-2.0 | 7,520 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all proposal line items.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v202202')
# Create a statement to select proposal line items.
statement = ad_manager.StatementBuilder(version='v202202')
# Retrieve a small amount of proposal line items at a time, paging
# through until all proposal line items have been retrieved.
while True:
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for proposal_line_item in response['results']:
# Print out some information for each proposal line item.
print('Proposal line item with ID "%d" and name "%s" was found.\n' %
(proposal_line_item['id'], proposal_line_item['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| googleads/googleads-python-lib | examples/ad_manager/v202202/proposal_line_item_service/get_all_proposal_line_items.py | Python | apache-2.0 | 1,904 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_07_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_07_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_route_filter_rules_operations.py | Python | mit | 28,638 |
# Copyright (c) 2011-2022 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json, logging, sys, traceback
from django.core.mail.backends import dummy, smtp
from django.db import transaction
from smtplib import SMTPException
class DevLoggingEmailBackend(dummy.EmailBackend):
def send_messages(self, email_messages):
for email in email_messages:
log_email(email)
class LoggingEmailBackend(smtp.EmailBackend):
def send_messages(self, email_messages):
with transaction.atomic():
for email in email_messages:
log_email(email)
try:
return super(LoggingEmailBackend, self).send_messages([email])
except SMTPException:
logger = logging.getLogger('huxley.api')
exc_type, exc_value, exc_traceback = sys.exc_info()
exc_traceback = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
log = json.dumps({
'message': exc_traceback,
'uri': ', '.join(email.to),
'status_code': 500,
'username': ''})
logger.exception(log)
def log_email(email):
logger = logging.getLogger('huxley.api')
recipients = ', '.join(email.to)
log = json.dumps({
'message': "Sending email",
'uri': recipients,
'status_code': 0,
'username': ''})
logger.info(log)
| bmun/huxley | huxley/logging/mail.py | Python | bsd-3-clause | 1,598 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2019 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
def fib(n):
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
start = time.clock()
for i in range(5):
print(fib(20))
print("use: " + str(time.clock() - start))
| ASMlover/study | cplusplus/wren_cc/test/bench_fib.py | Python | bsd-2-clause | 1,588 |
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(raw_input())
l = []
for i in range(n):
commands = raw_input().split()
count = 0
method = ""
method = commands[0]
args = map(int, commands[1:])
if method == "print":
print l
elif not args and method != "print":
getattr(l, method)()
elif len(args) == 1:
getattr(l, method)(args[0])
else:
getattr(l, method)(args[0], args[1])
| spradeepv/dive-into-python | hackerrank/domain/python/data_types/lists.py | Python | mit | 470 |
# Copyright 2015 AvanzOSC - Ainara Galdona
# Copyright 2015-2020 Tecnativa - Pedro M. Baeza
# Copyright 2016 Tecnativa - Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "AEAT modelo 115",
"version": "14.0.1.0.0",
"development_status": "Mature",
"category": "Localisation/Accounting",
"author": "AvanzOSC," "Tecnativa," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-spain",
"license": "AGPL-3",
"depends": ["l10n_es", "l10n_es_aeat"],
"data": [
"security/ir.model.access.csv",
"security/l10n_es_aeat_mod115_security.xml",
"data/aeat_export_mod115_data.xml",
"data/tax_code_map_mod115_data.xml",
"views/mod115_view.xml",
],
"installable": True,
"maintainers": ["pedrobaeza"],
}
| cubells/l10n-spain | l10n_es_aeat_mod115/__manifest__.py | Python | agpl-3.0 | 838 |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs all benchmarks in PerfKitBenchmarker.
All benchmarks in PerfKitBenchmarker export the following interface:
GetConfig: this returns, the name of the benchmark, the number of machines
required to run one instance of the benchmark, a detailed description
of the benchmark, and if the benchmark requires a scratch disk.
Prepare: this function takes a list of VMs as an input parameter. The benchmark
will then get all binaries required to run the benchmark and, if
required, create data files.
Run: this function takes a list of VMs as an input parameter. The benchmark will
then run the benchmark upon the machines specified. The function will
return a dictonary containing the results of the benchmark.
Cleanup: this function takes a list of VMs as an input parameter. The benchmark
will then return the machine to the state it was at before Prepare
was called.
PerfKitBenchmarker has the following run stages: provision, prepare,
run, cleanup, teardown, and all.
provision: Read command-line flags, decide what benchmarks to run, and
create the necessary resources for each benchmark, including
networks, VMs, disks, and keys, and generate a run_uri, which can
be used to resume execution at later stages.
prepare: Execute the Prepare function of each benchmark to install
necessary software, upload datafiles, etc.
run: Execute the Run function of each benchmark and collect the
generated samples. The publisher may publish these samples
according to PKB's settings. The Run stage can be called multiple
times with the run_uri generated by the provision stage.
cleanup: Execute the Cleanup function of each benchmark to uninstall
software and delete data files.
teardown: Delete VMs, key files, networks, and disks created in the
'provision' stage.
all: PerfKitBenchmarker will run all of the above stages (provision,
prepare, run, cleanup, teardown). Any resources generated in the
provision stage will be automatically deleted in the teardown
stage, even if there is an error in an earlier stage. When PKB is
running in this mode, the run cannot be repeated or resumed using
the run_uri.
"""
import collections
import getpass
import itertools
import logging
import multiprocessing
import sys
import time
import uuid
from perfkitbenchmarker import archive
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import benchmark_sets
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import benchmark_status
from perfkitbenchmarker import configs
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import log_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import requirements
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import stages
from perfkitbenchmarker import static_virtual_machine
from perfkitbenchmarker import timing_util
from perfkitbenchmarker import traces
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.linux_benchmarks import cluster_boot_benchmark
from perfkitbenchmarker.publisher import SampleCollector
LOG_FILE_NAME = 'pkb.log'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
FLAGS = flags.FLAGS
flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_list('benchmarks', [benchmark_sets.STANDARD_SET],
'Benchmarks and/or benchmark sets that should be run. The '
'default is the standard set. For more information about '
'benchmarks and benchmark sets, see the README and '
'benchmark_sets.py.')
flags.DEFINE_string('archive_bucket', None,
'Archive results to the given S3/GCS bucket.')
flags.DEFINE_string('project', None, 'GCP project ID under which '
'to create the virtual machines')
flags.DEFINE_list(
'zones', [],
'A list of zones within which to run PerfKitBenchmarker. '
'This is specific to the cloud provider you are running on. '
'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
'zone, until enough VMs are created as specified in each '
'benchmark. The order in which this flag is applied to VMs is '
'undefined.')
flags.DEFINE_list(
'extra_zones', [],
'Zones that will be appended to the "zones" list. This is functionally '
'the same, but allows flag matrices to have two zone axes.')
# TODO(user): note that this is currently very GCE specific. Need to create a
# module which can traslate from some generic types to provider specific
# nomenclature.
flags.DEFINE_string('machine_type', None, 'Machine '
'types that will be created for benchmarks that don\'t '
'require a particular type.')
flags.DEFINE_integer('num_vms', 1, 'For benchmarks which can make use of a '
'variable number of machines, the number of VMs to use.')
flags.DEFINE_string('image', None, 'Default image that will be '
'linked to the VM')
flags.DEFINE_string('run_uri', None, 'Name of the Run. If provided, this '
'should be alphanumeric and less than or equal to 10 '
'characters in length.')
flags.DEFINE_string('owner', getpass.getuser(), 'Owner name. '
'Used to tag created resources and performance records.')
flags.DEFINE_enum(
'log_level', log_util.INFO,
[log_util.DEBUG, log_util.INFO],
'The log level to run at.')
flags.DEFINE_enum(
'file_log_level', log_util.DEBUG, [log_util.DEBUG, log_util.INFO],
'Anything logged at this level or higher will be written to the log file.')
flags.DEFINE_integer('duration_in_seconds', None,
'duration of benchmarks. '
'(only valid for mesh_benchmark)')
flags.DEFINE_string('static_vm_file', None,
'The file path for the Static Machine file. See '
'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version', False, 'Display the version and exit.')
flags.DEFINE_enum(
'scratch_disk_type', None,
[disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
'Type for all scratch disks. The default is standard')
flags.DEFINE_string(
'data_disk_type', None,
'Type for all data disks. If a provider keeps the operating system and '
'user data on separate disks, this only affects the user data disk(s).'
'If the provider has OS and user data on the same disk, this flag affects'
'that disk.')
flags.DEFINE_integer('scratch_disk_size', None, 'Size, in gb, for all scratch '
'disks.')
flags.DEFINE_integer('data_disk_size', None, 'Size, in gb, for all data disks.')
flags.DEFINE_integer('scratch_disk_iops', None,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('num_striped_disks', None,
'The number of data disks to stripe together to form one '
'"logical" data disk. This defaults to 1 '
'(except with local disks), which means no striping. '
'When using local disks, they default to striping '
'all disks together. The striped disks will appear as '
'one disk (data_disk_0) in the metadata.',
lower_bound=1)
flags.DEFINE_bool('install_packages', None,
'Override for determining whether packages should be '
'installed. If this is false, no packages will be installed '
'on any VMs. This option should probably only ever be used '
'if you have already created an image with all relevant '
'packages installed.')
flags.DEFINE_bool(
'stop_after_benchmark_failure', False,
'Determines response when running multiple benchmarks serially and a '
'benchmark run fails. When True, no further benchmarks are scheduled, and '
'execution ends. When False, benchmarks continue to be scheduled. Does not '
'apply to keyboard interrupts, which will always prevent further '
'benchmarks from being scheduled.')
flags.DEFINE_boolean(
'ignore_package_requirements', False,
'Disables Python package requirement runtime checks.')
flags.DEFINE_enum('spark_service_type', None,
[spark_service.PKB_MANAGED, spark_service.PROVIDER_MANAGED],
'Type of spark service to use')
flags.DEFINE_boolean(
'publish_after_run', False,
'If true, PKB will publish all samples available immediately after running '
'each benchmark. This may be useful in scenarios where the PKB run time '
'for all benchmarks is much greater than a single benchmark.')
flags.DEFINE_integer(
'run_stage_time', 0,
'PKB will run/re-run the run stage of each benchmark until it has spent '
'at least this many seconds. It defaults to 0, so benchmarks will only '
'be run once unless some other value is specified.')
flags.DEFINE_integer(
'run_stage_retries', 0,
'The number of allowable consecutive failures during the run stage. After '
'this number of failures any exceptions will cause benchmark termination. '
'If run_stage_time is exceeded, the run stage will not be retried even if '
'the number of failures is less than the value of this flag.')
flags.DEFINE_boolean(
'boot_samples', False,
'Whether to publish boot time samples for all tests.')
flags.DEFINE_integer(
'run_processes', 1,
'The number of parallel processes to use to run benchmarks.',
lower_bound=1)
# Support for using a proxy in the cloud environment.
flags.DEFINE_string('http_proxy', '',
'Specify a proxy for HTTP in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('https_proxy', '',
'Specify a proxy for HTTPS in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('ftp_proxy', '',
'Specify a proxy for FTP in the form '
'[user:passwd@]proxy.server:port.')
MAX_RUN_URI_LENGTH = 8
_TEARDOWN_EVENT = multiprocessing.Event()
events.initialization_complete.connect(traces.RegisterAll)
def _InjectBenchmarkInfoIntoDocumentation():
"""Appends each benchmark's information to the main module's docstring."""
# TODO: Verify if there is other way of appending additional help
# message.
# Inject more help documentation
# The following appends descriptions of the benchmarks and descriptions of
# the benchmark sets to the help text.
benchmark_sets_list = [
'%s: %s' %
(set_name, benchmark_sets.BENCHMARK_SETS[set_name]['message'])
for set_name in benchmark_sets.BENCHMARK_SETS]
sys.modules['__main__'].__doc__ = (
'PerfKitBenchmarker version: {version}\n\n{doc}\n'
'Benchmarks (default requirements):\n'
'\t{benchmark_doc}').format(
version=version.VERSION,
doc=__doc__,
benchmark_doc=_GenerateBenchmarkDocumentation())
sys.modules['__main__'].__doc__ += ('\n\nBenchmark Sets:\n\t%s'
% '\n\t'.join(benchmark_sets_list))
def _ParseFlags(argv=sys.argv):
"""Parses the command-line flags."""
try:
argv = FLAGS(argv)
except flags.FlagsError as e:
logging.error('%s\nUsage: %s ARGS\n%s', e, sys.argv[0], FLAGS)
sys.exit(1)
def CheckVersionFlag():
"""If the --version flag was specified, prints the version and exits."""
if FLAGS.version:
print version.VERSION
sys.exit(0)
def _InitializeRunUri():
"""Determines the PKB run URI and sets FLAGS.run_uri."""
if FLAGS.run_uri is None:
if stages.PROVISION in FLAGS.run_stage:
FLAGS.run_uri = str(uuid.uuid4())[-8:]
else:
# Attempt to get the last modified run directory.
run_uri = vm_util.GetLastRunUri()
if run_uri:
FLAGS.run_uri = run_uri
logging.warning(
'No run_uri specified. Attempting to run the following stages with '
'--run_uri=%s: %s', FLAGS.run_uri, ', '.join(FLAGS.run_stage))
else:
raise errors.Setup.NoRunURIError(
'No run_uri specified. Could not run the following stages: %s' %
', '.join(FLAGS.run_stage))
elif not FLAGS.run_uri.isalnum() or len(FLAGS.run_uri) > MAX_RUN_URI_LENGTH:
raise errors.Setup.BadRunURIError('run_uri must be alphanumeric and less '
'than or equal to 8 characters in '
'length.')
def _CreateBenchmarkSpecs():
"""Create a list of BenchmarkSpecs for each benchmark run to be scheduled.
Returns:
A list of BenchmarkSpecs.
"""
specs = []
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
benchmark_counts = collections.defaultdict(itertools.count)
for benchmark_module, user_config in benchmark_tuple_list:
# Construct benchmark config object.
name = benchmark_module.BENCHMARK_NAME
expected_os_types = (
os_types.WINDOWS_OS_TYPES if FLAGS.os_type in os_types.WINDOWS_OS_TYPES
else os_types.LINUX_OS_TYPES)
merged_flags = benchmark_config_spec.FlagsDecoder().Decode(
user_config.get('flags'), 'flags', FLAGS)
with flag_util.FlagDictSubstitution(FLAGS, lambda: merged_flags):
config_dict = benchmark_module.GetConfig(user_config)
config_spec_class = getattr(
benchmark_module, 'BENCHMARK_CONFIG_SPEC_CLASS',
benchmark_config_spec.BenchmarkConfigSpec)
config = config_spec_class(name, expected_os_types=expected_os_types,
flag_values=FLAGS, **config_dict)
# Assign a unique ID to each benchmark run. This differs even between two
# runs of the same benchmark within a single PKB run.
uid = name + str(benchmark_counts[name].next())
# Optional step to check flag values and verify files exist.
check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
if check_prereqs:
try:
with config.RedirectFlags(FLAGS):
check_prereqs()
except:
logging.exception('Prerequisite check failed for %s', name)
raise
specs.append(benchmark_spec.BenchmarkSpec.GetBenchmarkSpec(
benchmark_module, config, uid))
return specs
def DoProvisionPhase(spec, timer):
"""Performs the Provision phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of resource
provisioning.
"""
logging.info('Provisioning resources for benchmark %s', spec.name)
# spark service needs to go first, because it adds some vms.
spec.ConstructSparkService()
spec.ConstructVirtualMachines()
# Pickle the spec before we try to create anything so we can clean
# everything up on a second run if something goes wrong.
spec.Pickle()
events.benchmark_start.send(benchmark_spec=spec)
try:
with timer.Measure('Resource Provisioning'):
spec.Provision()
finally:
# Also pickle the spec after the resources are created so that
# we have a record of things like AWS ids. Otherwise we won't
# be able to clean them up on a subsequent run.
spec.Pickle()
def DoPreparePhase(spec, timer):
"""Performs the Prepare phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Prepare function.
"""
logging.info('Preparing benchmark %s', spec.name)
with timer.Measure('BenchmarkSpec Prepare'):
spec.Prepare()
with timer.Measure('Benchmark Prepare'):
spec.BenchmarkPrepare(spec)
spec.StartBackgroundWorkload()
def DoRunPhase(spec, collector, timer):
"""Performs the Run phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
collector: The SampleCollector object to add samples to.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Run function.
"""
deadline = time.time() + FLAGS.run_stage_time
run_number = 0
consecutive_failures = 0
while True:
samples = []
logging.info('Running benchmark %s', spec.name)
events.before_phase.send(events.RUN_PHASE, benchmark_spec=spec)
try:
with timer.Measure('Benchmark Run'):
samples = spec.BenchmarkRun(spec)
if (FLAGS.boot_samples or
spec.name == cluster_boot_benchmark.BENCHMARK_NAME):
samples.extend(cluster_boot_benchmark.GetTimeToBoot(spec.vms))
except Exception:
consecutive_failures += 1
if consecutive_failures > FLAGS.run_stage_retries:
raise
logging.exception('Run failed (consecutive_failures=%s); retrying.',
consecutive_failures)
else:
consecutive_failures = 0
finally:
events.after_phase.send(events.RUN_PHASE, benchmark_spec=spec)
events.samples_created.send(
events.RUN_PHASE, benchmark_spec=spec, samples=samples)
if FLAGS.run_stage_time:
for sample in samples:
sample.metadata['run_number'] = run_number
collector.AddSamples(samples, spec.name, spec)
if FLAGS.publish_after_run:
collector.PublishSamples()
run_number += 1
if time.time() > deadline:
break
def DoCleanupPhase(spec, timer):
"""Performs the Cleanup phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Cleanup function.
"""
logging.info('Cleaning up benchmark %s', spec.name)
if spec.always_call_cleanup or any([vm.is_static for vm in spec.vms]):
spec.StopBackgroundWorkload()
with timer.Measure('Benchmark Cleanup'):
spec.BenchmarkCleanup(spec)
def DoTeardownPhase(spec, timer):
"""Performs the Teardown phase of benchmark execution.
Args:
name: A string containing the benchmark name.
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of
resource teardown.
"""
logging.info('Tearing down resources for benchmark %s', spec.name)
with timer.Measure('Resource Teardown'):
spec.Delete()
def RunBenchmark(spec, collector):
"""Runs a single benchmark and adds the results to the collector.
Args:
spec: The BenchmarkSpec object with run information.
collector: The SampleCollector object to add samples to.
"""
spec.status = benchmark_status.FAILED
# Modify the logger prompt for messages logged within this function.
label_extension = '{}({}/{})'.format(
spec.name, spec.sequence_number, spec.total_benchmarks)
context.SetThreadBenchmarkSpec(spec)
log_context = log_util.GetThreadLogContext()
with log_context.ExtendLabel(label_extension):
with spec.RedirectGlobalFlags():
end_to_end_timer = timing_util.IntervalTimer()
detailed_timer = timing_util.IntervalTimer()
try:
with end_to_end_timer.Measure('End to End'):
if stages.PROVISION in FLAGS.run_stage:
DoProvisionPhase(spec, detailed_timer)
if stages.PREPARE in FLAGS.run_stage:
DoPreparePhase(spec, detailed_timer)
if stages.RUN in FLAGS.run_stage:
DoRunPhase(spec, collector, detailed_timer)
if stages.CLEANUP in FLAGS.run_stage:
DoCleanupPhase(spec, detailed_timer)
if stages.TEARDOWN in FLAGS.run_stage:
DoTeardownPhase(spec, detailed_timer)
# Add timing samples.
if (FLAGS.run_stage == stages.STAGES and
timing_util.EndToEndRuntimeMeasurementEnabled()):
collector.AddSamples(
end_to_end_timer.GenerateSamples(), spec.name, spec)
if timing_util.RuntimeMeasurementsEnabled():
collector.AddSamples(
detailed_timer.GenerateSamples(), spec.name, spec)
except:
# Resource cleanup (below) can take a long time. Log the error to give
# immediate feedback, then re-throw.
logging.exception('Error during benchmark %s', spec.name)
# If the particular benchmark requests us to always call cleanup, do it
# here.
if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
DoCleanupPhase(spec, detailed_timer)
raise
finally:
if stages.TEARDOWN in FLAGS.run_stage:
spec.Delete()
events.benchmark_end.send(benchmark_spec=spec)
# Pickle spec to save final resource state.
spec.Pickle()
spec.status = benchmark_status.SUCCEEDED
def RunBenchmarkTask(spec):
"""Task that executes RunBenchmark.
This is designed to be used with RunParallelProcesses.
Arguments:
spec: BenchmarkSpec. The spec to call RunBenchmark with.
Returns:
A tuple of BenchmarkSpec, list of samples.
"""
if _TEARDOWN_EVENT.is_set():
return spec, []
# Many providers name resources using run_uris. When running multiple
# benchmarks in parallel, this causes name collisions on resources.
# By modifying the run_uri, we avoid the collisions.
if FLAGS.run_processes > 1:
spec.config.flags['run_uri'] = FLAGS.run_uri + str(spec.sequence_number)
collector = SampleCollector()
try:
RunBenchmark(spec, collector)
except BaseException as e:
msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
spec.sequence_number, spec.total_benchmarks, spec.name, spec.uid)
if isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure:
logging.error('%s Execution will not continue.', msg)
_TEARDOWN_EVENT.set()
else:
logging.error('%s Execution will continue.', msg)
finally:
# We need to return both the spec and samples so that we know
# the status of the test and can publish any samples that
# haven't yet been published.
return spec, collector.samples
def _LogCommandLineFlags():
result = []
for flag in FLAGS.FlagDict().values():
if flag.present:
result.append(flag.Serialize())
logging.info('Flag values:\n%s', '\n'.join(result))
def SetUpPKB():
"""Set globals and environment variables for PKB.
After SetUpPKB() returns, it should be possible to call PKB
functions, like benchmark_spec.Prepare() or benchmark_spec.Run().
SetUpPKB() also modifies the local file system by creating a temp
directory and storing new SSH keys.
"""
try:
_InitializeRunUri()
except errors.Error as e:
logging.error(e)
sys.exit(1)
# Initialize logging.
vm_util.GenTempDir()
log_util.ConfigureLogging(
stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level],
log_path=vm_util.PrependTempDir(LOG_FILE_NAME),
run_uri=FLAGS.run_uri,
file_log_level=log_util.LOG_LEVELS[FLAGS.file_log_level])
logging.info('PerfKitBenchmarker version: %s', version.VERSION)
# Translate deprecated flags and log all provided flag values.
disk.WarnAndTranslateDiskFlags()
_LogCommandLineFlags()
# Check environment.
if not FLAGS.ignore_package_requirements:
requirements.CheckBasicRequirements()
if FLAGS.os_type == os_types.WINDOWS and not vm_util.RunningOnWindows():
logging.error('In order to run benchmarks on Windows VMs, you must be '
'running on Windows.')
sys.exit(1)
for executable in REQUIRED_EXECUTABLES:
if not vm_util.ExecutableOnPath(executable):
raise errors.Setup.MissingExecutableError(
'Could not find required executable "%s"', executable)
vm_util.SSHKeyGen()
if FLAGS.static_vm_file:
with open(FLAGS.static_vm_file) as fp:
static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
fp)
events.initialization_complete.send(parsed_flags=FLAGS)
def RunBenchmarks():
"""Runs all benchmarks in PerfKitBenchmarker.
Returns:
Exit status for the process.
"""
benchmark_specs = _CreateBenchmarkSpecs()
collector = SampleCollector()
try:
tasks = [(RunBenchmarkTask, (spec,), {})
for spec in benchmark_specs]
spec_sample_tuples = background_tasks.RunParallelProcesses(
tasks, FLAGS.run_processes)
benchmark_specs, sample_lists = zip(*spec_sample_tuples)
for sample_list in sample_lists:
collector.samples.extend(sample_list)
finally:
if collector.samples:
collector.PublishSamples()
if benchmark_specs:
logging.info(benchmark_status.CreateSummary(benchmark_specs))
logging.info('Complete logs can be found at: %s',
vm_util.PrependTempDir(LOG_FILE_NAME))
if stages.TEARDOWN not in FLAGS.run_stage:
logging.info(
'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)
if FLAGS.archive_bucket:
archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket,
gsutil_path=FLAGS.gsutil_path,
prefix=FLAGS.run_uri + '_')
all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
for spec in benchmark_specs)
return 0 if all_benchmarks_succeeded else 1
def _GenerateBenchmarkDocumentation():
"""Generates benchmark documentation to show in --help."""
benchmark_docs = []
for benchmark_module in (linux_benchmarks.BENCHMARKS +
windows_benchmarks.BENCHMARKS):
benchmark_config = configs.LoadMinimalConfig(
benchmark_module.BENCHMARK_CONFIG, benchmark_module.BENCHMARK_NAME)
vm_groups = benchmark_config.get('vm_groups', {})
total_vm_count = 0
vm_str = ''
scratch_disk_str = ''
for group in vm_groups.itervalues():
group_vm_count = group.get('vm_count', 1)
if group_vm_count is None:
vm_str = 'variable'
else:
total_vm_count += group_vm_count
if group.get('disk_spec'):
scratch_disk_str = ' with scratch volume(s)'
name = benchmark_module.BENCHMARK_NAME
if benchmark_module in windows_benchmarks.BENCHMARKS:
name += ' (Windows)'
benchmark_docs.append('%s: %s (%s VMs%s)' %
(name,
benchmark_config['description'],
vm_str or total_vm_count,
scratch_disk_str))
return '\n\t'.join(benchmark_docs)
def Main():
log_util.ConfigureBasicLogging()
_InjectBenchmarkInfoIntoDocumentation()
_ParseFlags()
CheckVersionFlag()
SetUpPKB()
return RunBenchmarks()
| meteorfox/PerfKitBenchmarker | perfkitbenchmarker/pkb.py | Python | apache-2.0 | 27,717 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, Element, SubElement
from xml.etree.cElementTree import fromstring, tostring
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..Settings import Settings
from ..I18N import _, ngettext
class XMLControl(fsui.TextArea):
def __init__(self, parent):
fsui.TextArea.__init__(self, parent, horizontal_scroll=True)
self.path = ""
def connect_game(self, info):
tree = self.get_tree()
root = tree.getroot()
if not root.tag == "config":
return
game_node = self.find_or_create_node(root, "game")
game_node.set("uuid", info["uuid"])
game_name_node = self.find_or_create_node(game_node, "name")
game_name_node.text = info["name"]
self.set_tree(tree)
def find_or_create_node(self, element, name):
node = element.find(name)
if node is None:
node = SubElement(element, name)
return node
def set_path(self, path):
if not os.path.exists(path):
path = ""
self.path = path
if path:
self.load_xml(path)
else:
self.set_text("")
def get_tree(self):
text = self.get_text().strip()
try:
root = fromstring(text.encode("UTF-8"))
except Exception:
# FIXME: show message
import traceback
traceback.print_exc()
return
tree = ElementTree(root)
indent_tree(root)
return tree
def set_tree(self, tree):
data = tostring(tree.getroot(), encoding="UTF-8").decode("UTF-8")
std_decl = "<?xml version='1.0' encoding='UTF-8'?>"
if data.startswith(std_decl):
data = data[len(std_decl):].strip()
self.set_text(data)
def load_xml(self, path):
with open(path, "rb") as f:
data = f.read()
self.set_text(data)
def save(self):
if not self.path:
print("no path to save XML to")
return
self.save_xml(self.path)
def save_xml(self, path):
self.get_tree().write(self.path)
def indent_tree(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| cnvogelg/fs-uae-gles | launcher/fs_uae_launcher/editor/XMLControl.py | Python | gpl-2.0 | 2,828 |
import _plotly_utils.basevalidators
class FormatValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="format", parent_name="table.cells", **kwargs):
super(FormatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/table/cells/_format.py | Python | mit | 445 |
# coding: utf-8
from datetime import timedelta as td
import json
from django.core import mail
from django.utils.timezone import now
from hc.api.models import Channel, Check, Notification, Ping
from hc.test import BaseTestCase
class NotifyEmailTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check(project=self.project)
self.check.name = "Daily Backup"
self.check.desc = "Line 1\nLine2"
self.check.tags = "foo bar"
self.check.status = "down"
self.check.last_ping = now() - td(minutes=61)
self.check.n_pings = 112233
self.check.save()
self.ping = Ping(owner=self.check)
self.ping.remote_addr = "1.2.3.4"
self.ping.body = "Body Line 1\nBody Line 2"
self.ping.save()
self.channel = Channel(project=self.project)
self.channel.kind = "email"
self.channel.value = "alice@example.org"
self.channel.email_verified = True
self.channel.save()
self.channel.checks.add(self.check)
def test_email(self):
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "")
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.extra_headers["X-Status-Url"], n.status_url())
self.assertTrue("List-Unsubscribe" in email.extra_headers)
self.assertTrue("List-Unsubscribe-Post" in email.extra_headers)
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
self.assertIn("Line 1<br>Line2", html)
self.assertIn("Alices Project", html)
self.assertIn("foo</code>", html)
self.assertIn("bar</code>", html)
self.assertIn("1 day", html)
self.assertIn("from 1.2.3.4", html)
self.assertIn("112233", html)
self.assertIn("Body Line 1<br>Body Line 2", html)
# Check's code must not be in the html
self.assertNotIn(str(self.check.code), html)
# Check's code must not be in the plain text body
self.assertNotIn(str(self.check.code), email.body)
def test_it_shows_cron_schedule(self):
self.check.kind = "cron"
self.check.schedule = "0 18-23,0-8 * * *"
self.check.save()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("<code>0 18-23,0-8 * * *</code>", html)
def test_it_truncates_long_body(self):
self.ping.body = "X" * 10000 + ", and the rest gets cut off"
self.ping.save()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("[truncated]", html)
self.assertNotIn("the rest gets cut off", html)
def test_it_handles_missing_ping_object(self):
self.ping.delete()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
def test_it_handles_missing_profile(self):
self.channel.value = "alice+notifications@example.org"
self.channel.save()
self.channel.notify(self.check)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice+notifications@example.org")
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
self.assertNotIn("Projects Overview", html)
def test_email_transport_handles_json_value(self):
payload = {"value": "alice@example.org", "up": True, "down": True}
self.channel.value = json.dumps(payload)
self.channel.save()
self.channel.notify(self.check)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
def test_it_reports_unverified_email(self):
self.channel.email_verified = False
self.channel.save()
self.channel.notify(self.check)
# If an email is not verified, it should say so in the notification:
n = Notification.objects.get()
self.assertEqual(n.error, "Email not verified")
def test_email_checks_up_down_flags(self):
payload = {"value": "alice@example.org", "up": True, "down": False}
self.channel.value = json.dumps(payload)
self.channel.save()
self.channel.notify(self.check)
# This channel should not notify on "down" events:
self.assertEqual(Notification.objects.count(), 0)
self.assertEqual(len(mail.outbox), 0)
def test_email_handles_amperstand(self):
self.check.name = "Foo & Bar"
self.check.save()
self.channel.notify(self.check)
email = mail.outbox[0]
self.assertEqual(email.subject, "DOWN | Foo & Bar")
| iphoting/healthchecks | hc/api/tests/test_notify_email.py | Python | bsd-3-clause | 5,005 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
from functools import partial
from powerline.segments import shell, tmux, common
from powerline.lib.vcs import get_fallback_create_watcher
import tests.vim as vim_module
from tests.lib import Args, urllib_read, replace_attr, new_module, replace_module_module, replace_env, Pl
from tests import TestCase, SkipTest
vim = None
def get_dummy_guess(**kwargs):
if 'directory' in kwargs:
def guess(path, create_watcher):
return Args(branch=lambda: os.path.basename(path), **kwargs)
else:
def guess(path, create_watcher):
return Args(branch=lambda: os.path.basename(path), directory=path, **kwargs)
return guess
class TestShell(TestCase):
def test_last_status(self):
pl = Pl()
segment_info = {'args': Args(last_exit_code=10)}
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': '10', 'highlight_group': 'exit_fail'}
])
segment_info['args'].last_exit_code = 0
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = None
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
def test_last_pipe_status(self):
pl = Pl()
segment_info = {'args': Args(last_pipe_status=[])}
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 2, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_group': 'exit_success', 'draw_inner_divider': True},
{'contents': '2', 'highlight_group': 'exit_fail', 'draw_inner_divider': True},
{'contents': '0', 'highlight_group': 'exit_success', 'draw_inner_divider': True}
])
def test_jobnum(self):
pl = Pl()
segment_info = {'args': Args(jobnum=0)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '0')
segment_info = {'args': Args(jobnum=1)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '1')
def test_continuation(self):
pl = Pl()
self.assertEqual(shell.continuation(pl=pl, segment_info={}), [{
'contents': '',
'width': 'auto',
'highlight_group': ['continuation:current', 'continuation'],
}])
segment_info = {'parser_state': 'if cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_group': ['continuation'],
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_group': ['continuation'],
'width': 'auto',
'align': 'r',
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': 'IF'}), [
{
'contents': 'IF',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': None}), [
{
'contents': '',
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
segment_info = {'parser_state': 'then then then cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_group': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_group': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_group': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(shell, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
segment_info.update(shortened_path='~foo/ghi')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_shortened_path=False), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
segment_info.pop('shortened_path')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_group': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
def test_date(self):
pl = Pl()
with replace_attr(common, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))):
self.assertEqual(common.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_group': ['date'], 'divider_highlight_group': None}])
self.assertEqual(common.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_group': ['time', 'date'], 'divider_highlight_group': 'time:divider'}])
class TestTmux(TestCase):
def test_attached_clients(self):
def get_tmux_output(cmd, *args):
if cmd == 'list-panes':
return 'session_name\n'
elif cmd == 'list-clients':
return '/dev/pts/2: 0 [191x51 xterm-256color] (utf8)\n/dev/pts/3: 0 [191x51 xterm-256color] (utf8)'
pl = Pl()
with replace_attr(tmux, 'get_tmux_output', get_tmux_output):
self.assertEqual(tmux.attached_clients(pl=pl), '2')
self.assertEqual(tmux.attached_clients(pl=pl, minimum=3), None)
class TestCommon(TestCase):
def test_hostname(self):
pl = Pl()
with replace_env('SSH_CLIENT', '192.168.0.12 40921 22') as segment_info:
with replace_module_module(common, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc')
with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), 'abc')
segment_info['environ'].pop('SSH_CLIENT')
with replace_module_module(common, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), None)
with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), None)
def test_user(self):
new_os = new_module('os', getpid=lambda: 1)
class Process(object):
def __init__(self, pid):
pass
def username(self):
return 'def'
if hasattr(common, 'psutil') and not callable(common.psutil.Process.username):
username = property(username)
new_psutil = new_module('psutil', Process=Process)
pl = Pl()
with replace_env('USER', 'def') as segment_info:
common.username = False
with replace_attr(common, 'os', new_os):
with replace_attr(common, 'psutil', new_psutil):
with replace_attr(common, '_geteuid', lambda: 5):
self.assertEqual(common.user(pl=pl, segment_info=segment_info), [
{'contents': 'def', 'highlight_group': 'user'}
])
self.assertEqual(common.user(pl=pl, segment_info=segment_info, hide_user='abc'), [
{'contents': 'def', 'highlight_group': 'user'}
])
self.assertEqual(common.user(pl=pl, segment_info=segment_info, hide_user='def'), None)
with replace_attr(common, '_geteuid', lambda: 0):
self.assertEqual(common.user(pl=pl, segment_info=segment_info), [
{'contents': 'def', 'highlight_group': ['superuser', 'user']}
])
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
segment_info = {'getcwd': os.getcwd}
branch = partial(common.branch, pl=pl, create_watcher=create_watcher)
with replace_attr(common, 'guess', get_dummy_guess(status=lambda: None, directory='/tmp/tests')):
with replace_attr(common, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'highlight_group': ['branch'], 'contents': 'tests'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'contents': 'tests', 'highlight_group': ['branch_clean', 'branch']}
])
with replace_attr(common, 'guess', get_dummy_guess(status=lambda: 'D ', directory='/tmp/tests')):
with replace_attr(common, 'tree_status', lambda repo, pl: 'D '):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'highlight_group': ['branch'], 'contents': 'tests'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'contents': 'tests', 'highlight_group': ['branch_dirty', 'branch']}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'highlight_group': ['branch'], 'contents': 'tests'}
])
with replace_attr(common, 'guess', lambda path, create_watcher: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), None)
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(common, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_group': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, common.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, common.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
def test_date(self):
pl = Pl()
with replace_attr(common, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))):
self.assertEqual(common.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_group': ['date'], 'divider_highlight_group': None}])
self.assertEqual(common.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_group': ['time', 'date'], 'divider_highlight_group': 'time:divider'}])
def test_fuzzy_time(self):
time = Args(hour=0, minute=45)
pl = Pl()
with replace_attr(common, 'datetime', Args(now=lambda: time)):
self.assertEqual(common.fuzzy_time(pl=pl), 'quarter to one')
time.hour = 23
time.minute = 59
self.assertEqual(common.fuzzy_time(pl=pl), 'round about midnight')
time.minute = 33
self.assertEqual(common.fuzzy_time(pl=pl), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(common.fuzzy_time(pl=pl), 'twelve o\'clock')
time.minute = 33
self.assertEqual(common.fuzzy_time(pl=pl, unicode_text=False), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(common.fuzzy_time(pl=pl, unicode_text=False), 'twelve o\'clock')
time.minute = 33
self.assertEqual(common.fuzzy_time(pl=pl, unicode_text=True), 'twenty‐five to twelve')
time.minute = 60
self.assertEqual(common.fuzzy_time(pl=pl, unicode_text=True), 'twelve o’clock')
def test_external_ip(self):
pl = Pl()
with replace_attr(common, 'urllib_read', urllib_read):
self.assertEqual(common.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}])
def test_uptime(self):
pl = Pl()
with replace_attr(common, '_get_uptime', lambda: 259200):
self.assertEqual(common.uptime(pl=pl), [{'contents': '3d', 'divider_highlight_group': 'background:divider'}])
with replace_attr(common, '_get_uptime', lambda: 93784):
self.assertEqual(common.uptime(pl=pl), [{'contents': '1d 2h 3m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=4), [{'contents': '1d 2h 3m 4s', 'divider_highlight_group': 'background:divider'}])
with replace_attr(common, '_get_uptime', lambda: 65536):
self.assertEqual(common.uptime(pl=pl), [{'contents': '18h 12m 16s', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=2), [{'contents': '18h 12m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=1), [{'contents': '18h', 'divider_highlight_group': 'background:divider'}])
def _get_uptime():
raise NotImplementedError
with replace_attr(common, '_get_uptime', _get_uptime):
self.assertEqual(common.uptime(pl=pl), None)
def test_weather(self):
pl = Pl()
with replace_attr(common, 'urllib_read', urllib_read):
self.assertEqual(common.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, temp_coldest=0, temp_hottest=100), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 0}
])
self.assertEqual(common.weather(pl=pl, temp_coldest=-100, temp_hottest=-50), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 100}
])
self.assertEqual(common.weather(pl=pl, icons={'cloudy': 'o'}), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'o '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, icons={'partly_cloudy_day': 'x'}), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'x '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, unit='F'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '16°F', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, unit='K'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '264K', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, temp_format='{temp:.1e}C'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9.0e+00C', 'gradient_level': 30.0}
])
def test_system_load(self):
pl = Pl()
with replace_module_module(common, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)):
with replace_attr(common, '_cpu_count', lambda: 2):
self.assertEqual(common.system_load(pl=pl), [
{'contents': '7.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '3.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0},
{'contents': '1.5', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0}
])
self.assertEqual(common.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1), [
{'contents': '8 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '4 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '2', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}
])
def test_cpu_load_percent(self):
pl = Pl()
with replace_module_module(common, 'psutil', cpu_percent=lambda **kwargs: 52.3):
self.assertEqual(common.cpu_load_percent(pl=pl), [{
'contents': '52%',
'gradient_level': 52.3,
'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
self.assertEqual(common.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{
'contents': '52.3%',
'gradient_level': 52.3,
'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
def test_network_load(self):
from time import sleep
def gb(interface):
return None
f = [gb]
def _get_bytes(interface):
return f[0](interface)
pl = Pl()
with replace_attr(common, '_get_bytes', _get_bytes):
common.network_load.startup(pl=pl)
try:
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
sleep(common.network_load.interval)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
while 'prev' not in common.network_load.interfaces.get('eth0', {}):
sleep(0.1)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
l = [0, 0]
def gb2(interface):
l[0] += 1200
l[1] += 2400
return tuple(l)
f[0] = gb2
while not common.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]:
sleep(0.1)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'DL 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 'UL 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 Kibps', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 Kibps', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 kB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 kB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
class ApproxEqual(object):
def __eq__(self, i):
return abs(i - 50.0) < 1
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()},
])
finally:
common.network_load.shutdown()
def test_virtualenv(self):
pl = Pl()
with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info:
self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
segment_info['environ'].pop('VIRTUAL_ENV')
self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), None)
def test_environment(self):
pl = Pl()
variable = 'FOO'
value = 'bar'
with replace_env(variable, value) as segment_info:
self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), value)
segment_info['environ'].pop(variable)
self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), None)
def test_email_imap_alert(self):
# TODO
pass
def test_now_playing(self):
# TODO
pass
def test_battery(self):
pl = Pl()
def _get_capacity(pl):
return 86
with replace_attr(common, '_get_capacity', _get_capacity):
self.assertEqual(common.battery(pl=pl), [{
'contents': '86%',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(common.battery(pl=pl, format='{capacity:.2f}'), [{
'contents': '0.86',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(common.battery(pl=pl, steps=7), [{
'contents': '86%',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(common.battery(pl=pl, gamify=True), [
{
'contents': 'OOOO',
'draw_inner_divider': False,
'highlight_group': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': 'O',
'draw_inner_divider': False,
'highlight_group': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
self.assertEqual(common.battery(pl=pl, gamify=True, full_heart='+', empty_heart='-', steps='10'), [
{
'contents': '++++++++',
'draw_inner_divider': False,
'highlight_group': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': '--',
'draw_inner_divider': False,
'highlight_group': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
def test_internal_ip(self):
try:
import netifaces
except ImportError:
raise SkipTest()
pl = Pl()
addr = {
'enp2s0': {
netifaces.AF_INET: [{'addr': '192.168.100.200'}],
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777%enp2s0'}]
},
'lo': {
netifaces.AF_INET: [{'addr': '127.0.0.1'}],
netifaces.AF_INET6: [{'addr': '::1'}]
},
'teredo': {
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777'}]
},
}
interfaces = ['lo', 'enp2s0', 'teredo']
with replace_module_module(
common, 'netifaces',
interfaces=(lambda: interfaces),
ifaddresses=(lambda interface: addr[interface]),
AF_INET=netifaces.AF_INET,
AF_INET6=netifaces.AF_INET6,
):
self.assertEqual(common.internal_ip(pl=pl), '192.168.100.200')
self.assertEqual(common.internal_ip(pl=pl, interface='detect'), '192.168.100.200')
self.assertEqual(common.internal_ip(pl=pl, interface='lo'), '127.0.0.1')
self.assertEqual(common.internal_ip(pl=pl, interface='teredo'), None)
self.assertEqual(common.internal_ip(pl=pl, ipv=4), '192.168.100.200')
self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=4), '192.168.100.200')
self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=4), '127.0.0.1')
self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=4), None)
self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=6), '::1')
self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(common.internal_ip(pl=pl, ipv=6), '::1')
interfaces[:] = ()
self.assertEqual(common.internal_ip(pl=pl, ipv=6), None)
class TestVim(TestCase):
def test_mode(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'NORMAL')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'i': 'INS'}), 'NORMAL')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'n': 'NORM'}), 'NORM')
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', chr(ord('V') - 0x40)) as segment_info:
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'V-BLCK')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'^V': 'VBLK'}), 'VBLK')
def test_visual_range(self):
pl = Pl()
vr = partial(vim.visual_range, pl=pl)
vim_module.current.window.cursor = [0, 0]
try:
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '')
with vim_module._with('mode', '^V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', '^S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', 'V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'v') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 's') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
finally:
vim_module._close(1)
def test_modified_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info), None)
segment_info['buffer'][0] = 'abc'
try:
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info), '+')
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info, text='-'), '-')
finally:
vim_module._bw(segment_info['bufnr'])
def test_paste_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('options', paste=1):
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info), 'PASTE')
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info, text='P'), 'P')
def test_readonly_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', readonly=1):
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info), 'RO')
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info, text='L'), 'L')
def test_file_scheme(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(vim.file_scheme(pl=pl, segment_info=segment_info), 'zipfile')
def test_file_directory(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), None)
with replace_env('HOME', '/home/foo', os.environ):
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/’’/')
with vim_module._with('buffer', b'/tmp/\xFF\xFF/abc') as segment_info:
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/<ff><ff>/')
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/')
os.environ['HOME'] = '/tmp'
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '~/')
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
os.environ['HOME'] = '/tmp'
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
def test_file_name(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), None)
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True), [
{'contents': '[No file]', 'highlight_group': ['file_name_no_file', 'file_name']}
])
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True, no_file_text='X'), [
{'contents': 'X', 'highlight_group': ['file_name_no_file', 'file_name']}
])
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), 'abc')
with vim_module._with('buffer', '/tmp/’’') as segment_info:
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), '’’')
with vim_module._with('buffer', b'/tmp/\xFF\xFF') as segment_info:
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), '<ff><ff>')
def test_file_size(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_size(pl=pl, segment_info=segment_info), '0 B')
with vim_module._with('buffer', os.path.join(os.path.dirname(__file__), 'empty')) as segment_info:
self.assertEqual(vim.file_size(pl=pl, segment_info=segment_info), '0 B')
def test_file_opts(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_format(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'unix'}
])
self.assertEqual(vim.file_encoding(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'utf-8'}
])
self.assertEqual(vim.file_type(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', filetype='python'):
self.assertEqual(vim.file_type(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'python'}
])
def test_window_title(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.window_title(pl=pl, segment_info=segment_info), None)
with vim_module._with('wvars', quickfix_title='Abc'):
self.assertEqual(vim.window_title(pl=pl, segment_info=segment_info), 'Abc')
def test_line_percent(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(100)]
try:
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info), '1')
vim_module._set_cursor(50, 0)
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info), '50')
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50', 'highlight_group': ['line_percent_gradient', 'line_percent'], 'gradient_level': 50 * 100.0 / 101}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_line_count(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
try:
self.assertEqual(vim.line_count(pl=pl, segment_info=segment_info), '100')
vim_module._set_cursor(50, 0)
self.assertEqual(vim.line_count(pl=pl, segment_info=segment_info), '100')
finally:
vim_module._bw(segment_info['bufnr'])
def test_position(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
try:
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
vim_module._set_cursor(49, 0)
self.assertEqual(vim.position(pl=pl, segment_info=segment_info), '50%')
self.assertEqual(vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50%', 'highlight_group': ['position_gradient', 'position'], 'gradient_level': 50.0}
])
vim_module._set_cursor(0, 0)
self.assertEqual(vim.position(pl=pl, segment_info=segment_info), 'Top')
vim_module._set_cursor(97, 0)
self.assertEqual(vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Final')
segment_info['buffer'][0:-1] = [str(i) for i in range(2)]
vim_module._set_cursor(0, 0)
self.assertEqual(vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Todo')
self.assertEqual(vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': 'All', 'highlight_group': ['position_gradient', 'position'], 'gradient_level': 0.0}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_cursor_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.line_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(vim.col_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(vim.virtcol_current(pl=pl, segment_info=segment_info), [{
'highlight_group': ['virtcol_current_gradient', 'virtcol_current', 'col_current'], 'contents': '1', 'gradient_level': 100.0 / 80,
}])
self.assertEqual(vim.virtcol_current(pl=pl, segment_info=segment_info, gradient=False), [{
'highlight_group': ['virtcol_current', 'col_current'], 'contents': '1',
}])
def test_modified_buffers(self):
pl = Pl()
self.assertEqual(vim.modified_buffers(pl=pl), None)
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
branch = partial(vim.branch, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(vim, 'guess', get_dummy_guess(status=lambda: None)):
with replace_attr(vim, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch_clean', 'branch'], 'contents': 'foo'}
])
with replace_attr(vim, 'guess', get_dummy_guess(status=lambda: 'DU')):
with replace_attr(vim, 'tree_status', lambda repo, pl: 'DU'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
def test_file_vcs_status(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
file_vcs_status = partial(vim.file_vcs_status, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), [
{'highlight_group': ['file_vcs_status_M', 'file_vcs_status'], 'contents': 'M'}
])
with replace_attr(vim, 'guess', get_dummy_guess(status=lambda file: None)):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
with vim_module._with('buffer', '/bar') as segment_info:
with vim_module._with('bufoptions', buftype='nofile'):
with replace_attr(vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
def test_trailing_whitespace(self):
pl = Pl()
with vim_module._with('buffer', 'tws') as segment_info:
trailing_whitespace = partial(vim.trailing_whitespace, pl=pl, segment_info=segment_info)
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(trailing_whitespace(), [{
'highlight_group': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
self.assertEqual(trailing_whitespace(), [{
'highlight_group': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
vim_module.current.buffer[0] = ''
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
def test_tabnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.tabnr(pl=pl, segment_info=segment_info, show_current=True), '1')
self.assertEqual(vim.tabnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_bufnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.bufnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['bufnr']))
self.assertEqual(vim.bufnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_winnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.winnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['winnr']))
self.assertEqual(vim.winnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_single_tab(self):
pl = Pl()
single_tab = partial(vim.single_tab, pl=pl)
with vim_module._with('tabpage'):
self.assertEqual(single_tab(), [{'highlight_group': ['many_tabs'], 'contents': 'Tabs'}])
self.assertEqual(single_tab(single_text='s', multiple_text='m'), [{'highlight_group': ['many_tabs'], 'contents': 'm'}])
self.assertEqual(single_tab(multiple_text='m'), [{'highlight_group': ['many_tabs'], 'contents': 'm'}])
self.assertEqual(single_tab(single_text='s'), [{'highlight_group': ['many_tabs'], 'contents': 'Tabs'}])
self.assertEqual(single_tab(), [{'highlight_group': ['single_tab'], 'contents': 'Bufs'}])
self.assertEqual(single_tab(single_text='s', multiple_text='m'), [{'highlight_group': ['single_tab'], 'contents': 's'}])
self.assertEqual(single_tab(multiple_text='m'), [{'highlight_group': ['single_tab'], 'contents': 'Bufs'}])
self.assertEqual(single_tab(single_text='s'), [{'highlight_group': ['single_tab'], 'contents': 's'}])
def test_segment_info(self):
pl = Pl()
with vim_module._with('tabpage'):
with vim_module._with('buffer', '1') as segment_info:
self.assertEqual(vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_group': ['tab_modified_indicator', 'modified_indicator'],
}])
vim_module._undo()
self.assertEqual(vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer = vim_module.current.buffer
vim_module._new('2')
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer[0] = ' '
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info), None)
self.assertEqual(vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_group': ['tab_modified_indicator', 'modified_indicator'],
}])
old_cwd = None
def setUpModule():
global old_cwd
global __file__
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
old_cwd = os.getcwd()
__file__ = os.path.abspath(__file__)
os.chdir(os.path.dirname(__file__))
from powerline.segments import vim
globals()['vim'] = vim
def tearDownModule():
global old_cwd
os.chdir(old_cwd)
sys.path.pop(0)
if __name__ == '__main__':
from tests import main
main()
| magus424/powerline | tests/test_segments.py | Python | mit | 59,653 |
import complexism as cx
import epidag as dag
__author__ = 'TimeWz667'
# Create a new blueprint of CTBN
bp = cx.BlueprintCTBN('Test')
# Add microstates
bp.add_microstate('A', ['N', 'Y'])
bp.add_microstate('B', ['N', 'Y'])
# Name combinations of microstates as states
bp.add_state('A', A='Y')
bp.add_state('a', A='N')
bp.add_state('B', B='Y')
bp.add_state('b', B='N')
bp.add_state('ab', A='N', B='N')
bp.add_state('AB', A='Y', B='Y')
# Add transitions
bp.add_transition('TrA', 'A', 'exp(0.1)')
bp.add_transition('TrB', 'B')
# Link transitions to states
bp.link_state_transition('a', 'TrA')
bp.link_state_transition('b', 'TrB')
psc = """
PCore ABC{
beta ~ exp(0.5)
TrA ~ lnorm(beta, 1)
TrB ~ gamma(beta, 100)
}
"""
# Sample root nodes
pc = dag.quick_build_parameter_core(psc)
print('\nUse a parameter model to support samplers')
print(pc.Actors.keys())
# Use pc to generate a dynamic core
dc = bp.generate_model('TestCTBN', **pc.Actors)
print('\nCombining parameter model and dynamic model')
print(dc)
state_ab = dc['ab']
state_a = dc['a']
state_A = dc['A']
print('\nTest inclusions')
# print('ab have a:', state_ab.isa(state_a))
print('ab have a:', state_a in state_ab)
print('ab have A:', state_A in state_ab)
print('\nTransitions follows ab')
print(state_ab.next_transitions())
| TimeWz667/Kamanian | example/OOP/O2.2 SS, CTBN.py | Python | mit | 1,302 |
__author__ = 'modmuss50'
import sys
import os
#(PACKNAME) (PACKMAKER) (PACKDESCRIPTION) (MCVER) (FORGEVER)
print "Starting to generate your mod pack!"
packName = ""
packCreator = ""
packDescription = ""
mcVersion = ""
forgeVersion = ""
packName = sys.argv[1]
packCreator = sys.argv[2]
packDescription = sys.argv[3]
mcVersion = sys.argv[4]
forgeVersion = sys.argv[5]
filename = "packs/" + packName
if os.path.isdir(filename):
print "That pack exits! I cannot continue!"
sys.exit(1)
else:
os.makedirs(filename)
file = open(filename + "/desc.txt", "w")
file.write(packDescription)
file.close()
file = open(filename + "/mcInfo.txt", "w")
file.write(mcVersion + "-" + forgeVersion)
file.close()
file = open(filename + "/packInfo.txt", "w")
file.write(packName + "\n")
file.write(packCreator)
file.close()
file = open(filename + "/mods.txt", "w")
file.close()
print "Mod pack structure created!"
| OpenLauncher/OpenLauncherAPI | PACKCREATOR/makePack.py | Python | gpl-3.0 | 914 |
#!/usr/bin/env python
"""Script replacing flowcell_summary_uppload_LIMS.py
Gets data from the sequencing step and uploads it to statusdb.
Denis Moreno, Science for Life Laboratory, Stockholm, Sweden.
"""
import argparse
import os
import yaml
import logging
import logging.handlers
import LIMS2DB.objectsDB.process_categories as pc_cg
from LIMS2DB.flowcell_sql import create_lims_data_obj, get_sequencing_steps, upload_to_couch
from LIMS2DB.utils import setupServer
from LIMS2DB.classes import Process
from genologics_sql.utils import get_session
from sqlalchemy import text
def main(args):
#get the session with the lims db
db_session=get_session()
#set up a log
mainlog = logging.getLogger('fsullogger')
mainlog.setLevel(level=logging.INFO)
mfh = logging.handlers.RotatingFileHandler(args.logfile, maxBytes=209715200, backupCount=5)
mft = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
mfh.setFormatter(mft)
mainlog.addHandler(mfh)
#read the configuration
with open(args.conf) as conf_file:
conf=yaml.load(conf_file)
couch=setupServer(conf)
interval="{} hours".format(args.hours)
#list the right sequencing steps
if args.flowcell:
query="select distinct pro.* from container ct \
inner join containerplacement cp on ct.containerid=cp.containerid \
inner join processiotracker piot on piot.inputartifactid=cp.processartifactid \
inner join process pro on pro.processid=piot.processid \
where pro.typeid in ({seq_type_ids}) and ct.name='{ct_name}';".format(seq_type_ids=",".join(pc_cg.SEQUENCING.keys()),ct_name=args.flowcell)
seq_steps=db_session.query(Process).from_statement(text(query)).all()
else:
seq_steps=get_sequencing_steps(db_session, interval)
for step in seq_steps:
for udf in step.udfs:
if udf.udfname=="Run ID":
fcid=udf.udfvalue
mainlog.info("updating {}".format(fcid))
#generate the lims_data dict key
lims_data=create_lims_data_obj(db_session, step)
#update the couch right couch document
upload_to_couch(couch,fcid, lims_data)
if __name__=="__main__":
usage = "Usage: python flowcell_sql_upload.py [options]"
parser = argparse.ArgumentParser(description='Upload flowcells lims data to statusdb.', usage=usage)
parser.add_argument("-a", "--all_flowcells", dest="all_flowcells", action="store_true", default=False,
help = "Tries to upload all the data matching the given update frame (-t) into couchDB." )
parser.add_argument("-t", "--hours", dest="hours", default=24, type=int,
help="Runs older than t hours are not updated. Default is 24 hours.")
parser.add_argument("-f", "--flowcell", dest="flowcell", default=None,
help="Name of the flowcell WITHOUT the position")
parser.add_argument("-l", "--logfile", dest = "logfile", help = ("log file",
" that will be used. default is $HOME/lims2db_flowcells.log "), default=os.path.expanduser("~/lims2db_flowcells.log"))
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
args = parser.parse_args()
main(args)
| Galithil/LIMS2DB | scripts/flowcell_sql_upload.py | Python | mit | 3,402 |
import sys
if sys.version_info[0] == 2: # Just checking your Python version to import Tkinter properly.
from Tkinter import *
else:
from tkinter import *
#from tkinter.messagebox import showinfo
import matplotlib.pyplot as plt
import RentCrawl as rc
master = Tk()
class Fullscreen_Window:
def __init__(self):
self.tk = Tk()
self.frame = Frame(self.tk)
self.frame.pack()
self.state = False
self.tk.bind("<Escape>", self.end_fullscreen)
self.tk.bind("<Return>", self.toggle_showinfo)
self.toggle_fullscreen()
self.tk.configure(background='black')
self.tk.update_idletasks()
self.create_canvas()
def create_canvas(self):
window_width = self.tk.winfo_width()
window_height = self.tk.winfo_height()
self.canvas = Canvas(self.tk,
width=window_width,
height=window_height)
self.canvas.configure(background="white")
self.canvas.pack()
def toggle_fullscreen(self, event=None):
print('Setting to fullscreen')
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def toggle_showinfo(self, event=None):
plt.plot([1,2,3,4])
plt.ylabel('some nums')
plt.show()
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
self.tk.destroy()
return "break"
class ScrollApplication(Frame):
width = 900
height = 800
def __init__(self, master=None):
self.rentData = rc.get_hist()
self.apartments = rc.APARTMENTS
# for data in self.rentData:
# print(data['date'])
master.minsize(width=self.width, height=self.height)
master.maxsize(width=self.width, height=self.height)
Frame.__init__(self, master)
self.grid(sticky=N+S+E+W)
self.mainframe()
def mainframe(self):
""" Creating widgets. """
""" Create listbox of units. """
self.data = Listbox(self, bg='white', height=15, font='Courier')
for i in range(100):
self.data.insert(END, str(i))
self.scrollbar = Scrollbar(self.data, orient=VERTICAL)
self.data.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.data.yview)
self.data.bind("<Double-Button-1>", self.OnListboxDouble)
self.data.grid(row=0, column=0, columnspan=2, rowspan=1, sticky=NSEW)
self.data.columnconfigure(0, weight=3)
self.data.rowconfigure(0, weight=3)
# self.run = Button(self, text='run', width=20)
# self.stop = Button(self, text='stop', width=20)
# self.run.grid(row=1, column=0, sticky=EW)
# self.stop.grid(row=1, column=1, sticky=EW)
self.scrollbar.grid(column=2, sticky=N+S)
self.grid_rowconfigure(0, minsize=600)
self.grid_columnconfigure(0, minsize=450)
self.grid_columnconfigure(1, minsize=450)
""" Create radioboxes of apartments. """
self.radioboxesApartment = Label(self, text="Choose Apartment")
self.radioboxesApartment.grid(row=1, column=0, sticky=W)
self.apartmentChoice = StringVar()
self.apartmentChoice.set(self.apartments[0])
for idx, apartment in enumerate(self.apartments):
l = Radiobutton(self, text=apartment,
variable = self.apartmentChoice, value = apartment,
command=self.OnRadiobuttonApartmentChange)
print(idx+2)
l.grid(row = (idx + 2), column = 0, sticky=W)
""" Create radioboxes of sortorder. """
self.radioboxesSort = Label(self, text='Choose Sort Order')
self.radioboxesSort.grid(row=1, column=1, sticky=W)
self.sortOrder = ['unit', 'price', 'sqft', 'date']
self.sortChoice = StringVar()
self.sortChoice.set(self.sortOrder[0])
for idx, order in enumerate(self.sortOrder):
l = Radiobutton(self, text=order,
variable=self.sortChoice, value=order,
command=self.OnRadiobuttonSortChange)
l.grid(row = (idx + 2), column = 1, sticky=W)
""" Update listbox data. """
self.UpdateListboxData()
def UpdateListboxData(self):
self.data.delete(0, END)
apartment = self.apartmentChoice.get()
displayData = []
for unit_key, data in self.rentData[-1].items():
if unit_key != 'date' and data['apartment'] == apartment:
unit_str = "%s price: %0.1f sqft: %d date: %s" \
% (unit_key, data['price'], data['sqft'], data['date'])
unit_display_key = data[self.sortChoice.get()]
displayData.append((unit_display_key, unit_str))
print(unit_str)
displayData.sort()
for _, unit_str in displayData:
self.data.insert(END, unit_str)
def OnRadiobuttonApartmentChange(self):
# print(self.apartmentChoice.get())
self.UpdateListboxData()
def OnRadiobuttonSortChange(self):
self.UpdateListboxData()
def OnListboxDouble(self, event):
y_min = 1600
y_max = 2500
widget = event.widget
selection = widget.curselection()
value = widget.get(selection[0])
print("Selection:", selection, ": '%s'" % value)
valueArr = value.split(' ')
unit_key = valueArr[0]
date_arr = []
price_arr = []
for data in self.rentData:
if unit_key in data:
date_arr.append(data['date'].strftime('%m-%d'))
price_arr.append(data[unit_key]['price'])
elif len(date_arr) > 0:
print('%s is sold.' % unit_key)
x_arr = list(range(len(date_arr)))
plt.xticks(x_arr, date_arr)
plt.plot(x_arr, price_arr)
plt.title('%s rent' % unit_key)
#x1,x2,y1,y2 = plt.axis()
#plt.axis((x1, x2, y_min, y_max))
plt.show()
if __name__ == '__main__':
try:
# plt.plot([1,2,3,4])
# plt.ylabel('some nums')
# plt.show()
#input('pause')
# check for update
#w = Fullscreen_Window()
#w.tk.mainloop()
a = ScrollApplication(master)
a.mainframe()
a.mainloop()
except:
print(sys.exc_info())
input('pause for error')
| BowenBao/pycrawl | RentCrawl/gui.py | Python | mit | 6,837 |
import datetime
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from tagging.fields import TagField
class ItemManager(models.Manager):
def __init__(self):
super(ItemManager, self).__init__()
self.models_by_name = {}
def create_or_update(self, instance, timestamp=None, url=None, tags="", source="INTERACTIVE", source_id="", **kwargs):
"""
Create or update an Item from some instace.
"""
# If the instance hasn't already been saved, save it first. This
# requires disconnecting the post-save signal that might be sent to
# this function (otherwise we could get an infinite loop).
if instance._get_pk_val() is None:
try:
signals.post_save.disconnect(self.create_or_update, sender=type(instance))
except Exception, err:
reconnect = False
else:
reconnect = True
instance.save()
if reconnect:
signals.post_save.connect(self.create_or_update, sender=type(instance))
# Make sure the item "should" be registered.
if not getattr(instance, "jellyrollable", True):
return
# Check to see if the timestamp is being updated, possibly pulling
# the timestamp from the instance.
if hasattr(instance, "timestamp"):
timestamp = instance.timestamp
if timestamp is None:
update_timestamp = False
timestamp = datetime.datetime.now()
else:
update_timestamp = True
# Ditto for tags.
if not tags:
for f in instance._meta.fields:
if isinstance(f, TagField):
tags = getattr(instance, f.attname)
break
if not url:
if hasattr(instance,'url'):
url = instance.url
# Create the Item object.
ctype = ContentType.objects.get_for_model(instance)
item, created = self.get_or_create(
content_type = ctype,
object_id = force_unicode(instance._get_pk_val()),
defaults = dict(
timestamp = timestamp,
source = source,
source_id = source_id,
tags = tags,
url = url,
)
)
item.tags = tags
item.source = source
item.source_id = source_id
if update_timestamp:
item.timestamp = timestamp
# Save and return the item.
item.save()
return item
def follow_model(self, model):
"""
Follow a particular model class, updating associated Items automatically.
"""
self.models_by_name[model.__name__.lower()] = model
signals.post_save.connect(self.create_or_update, sender=model)
def get_for_model(self, model):
"""
Return a QuerySet of only items of a certain type.
"""
return self.filter(content_type=ContentType.objects.get_for_model(model))
def get_last_update_of_model(self, model, **kwargs):
"""
Return the last time a given model's items were updated. Returns the
epoch if the items were never updated.
"""
qs = self.get_for_model(model)
if kwargs:
qs = qs.filter(**kwargs)
try:
return qs.order_by('-timestamp')[0].timestamp
except IndexError:
return datetime.datetime.fromtimestamp(0)
| jacobian-archive/jellyroll | src/jellyroll/managers.py | Python | bsd-3-clause | 3,713 |
from collections import OrderedDict
from glob import glob
import logging
import os
import tempfile
from six import string_types
from metamds import Task
from metamds.io import rsync_to
from metamds.db import add_doc_db
class Simulation(object):
"""
Attributes
----------
name :
tasks :
template :
output_dir :
input_dir :
remote_dir :
info :
debug :
"""
def __init__(self, name=None, template='', output_dir='', input_dir=''):
if name is None:
name = 'project'
self.name = name
self._tasks = OrderedDict()
self.template = template
if not input_dir:
self.input_dir = os.getcwd()
self.input_dir = os.path.abspath(input_dir)
if not output_dir:
self._tmp_dir = tempfile.mkdtemp(prefix='metamds_')
output_dir = os.path.join(self._tmp_dir, self.name)
os.mkdir(output_dir)
else:
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
self.output_dir = os.path.abspath(output_dir)
self.input_files = [f for f in glob('{}/*'.format(self.input_dir))
if not f.endswith(('.py', '.ipynb')) and
f != self.output_dir]
self.remote_dir = None
self.info = logging.getLogger('{}_info'.format(self.name))
self.info.setLevel(logging.INFO)
log_file = os.path.join(self.output_dir, '{}_info.log'.format(self.name))
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.info.addHandler(handler)
self.debug = logging.getLogger('{}_debug'.format(self.name))
self.debug.setLevel(logging.DEBUG)
log_file = os.path.join(self.output_dir, '{}_debug.log'.format(self.name))
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.debug.addHandler(handler)
def create_remote_dir(self, client, hostname, username):
"""Create a copy of all input files and `output_dir` on a remote host.
Parameters
----------
client : paramiko.SSHClient
"""
if not self.remote_dir:
#---------------------------------------------------------------- TJ
# For different resources it is necessary to move to preferred production directories
# TODO: Add as variable somewhere rather than hardcoding
if "nersc" in hostname:
_, stdout, stderr = client.exec_command('cd $SCRATCH; mktemp -d; pwd')
elif "accre" in hostname:
_, stdout, stderr = client.exec_command('cd /scratch/{}; mktemp -d; pwd'.format(username))
if "rahman" in hostname:
_, stdout, stderr = client.exec_command('mktemp -d; pwd')
#---------------------------------------------------------------- TJ
if stderr.readlines():
raise IOError(stderr.read().decode('utf-8'))
remote_dir, home = (line.rstrip() for line in stdout.readlines())
# TODO: tidy up temp dir creation and copying
self.remote_dir = os.path.join(home, remote_dir[5:])
cmd = 'rsync -r {tmp_dir} ~'.format(tmp_dir=remote_dir)
_, stdout, stderr = client.exec_command(cmd)
if stderr.readlines():
raise IOError(stderr.read().decode('utf-8'))
# Move input files
rsync_to(flags='-r -h --progress --partial',
src=' '.join(self.input_files),
dst=self.remote_dir,
user=client.username,
host=client.hostname,
logger=self.debug)
# Move output directory including relative symlinks to input files
rsync_to(flags='-r -h --links --progress --partial',
src=self.output_dir,
dst=self.remote_dir,
user=client.username,
host=client.hostname,
logger=self.debug)
def tasks(self):
"""Yield all tasks in this simulation. """
for v in self._tasks.values():
yield v
@property
def n_tasks(self):
"""Return the number of tasks in this simulation. """
return len(self._tasks)
def task_names(self):
"""Return the names of all tasks in this simulation. """
for task in self._tasks:
yield task.name
def add_task(self, task):
"""Add a task to this simulation. """
if not task.name:
task.name = 'task_{:d}'.format(self.n_tasks + 1)
self._tasks[task.name] = task
def execute_all(self, hostname=None, username=None):
"""Execute all tasks in this simulation. """
for task in self.tasks():
task.execute(hostname=hostname, username=username)
def sync_all(self):
for task in self.tasks():
task.sync()
def parametrize(self, **parameters):
"""Parametrize and add a task to this simulation. """
task = Task(simulation=self)
parameters['input_dir'] = os.path.relpath(self.input_dir, task.output_dir)
cwd = os.getcwd()
os.chdir(task.output_dir)
if hasattr(self.template, '__call__'):
script = self.template(**parameters)
# elif is_url(self.template):
# treat as blockly and download from github
elif _is_iterable_of_strings(self.template):
script = list()
for command in self.template:
command.format(**parameters)
script.append(command)
else:
script = None
if not _is_iterable_of_strings(script):
raise ValueError('Unusable template: {}\n Templates should either '
'be an iterable of strings or a function that '
'returns an iterable of strings.'.format(self.template))
os.chdir(cwd)
# Parametrizing a task can and typically will produce input files.
self.input_files = [f for f in glob('{}/*'.format(self.input_dir))
if not f.endswith(('.py', '.ipynb')) and
f != self.output_dir]
task.script = script
self.add_task(task)
return task
def add_to_db(self, host="127.0.0.1", port=27017, database="shearing_simulations",
user=None, password=None, collection="tasks", use_full_uri=False,
update_duplicates=False, **parameters):
"""Adds simulation parameters and io file locations to db.
Parameters
----------
host : str, optional
database connection host (the default is 127.0.0.1, or the local computer being used)
port : int, optional
database host port (default is 27017, which is the pymongo default port).
database : str, optional
name of the database being used (default is shearing_simulations).
user : str, optional
user name (default is None, meaning the database is public).
password : str, optional
user password (default is None, meaning there is no password access to database).
collection : str, optional
database collection name for doc location (default is tasks).
use_full_uri : bool, optional
optional use of full uri path name, necessary for hosted database (default is False,
meaning the files being used in the database are local).
update_duplicates : bool, optional
determines ifduplicates in the database will be updated (default is False, meaning
the added doc should not replace an existing doc that is equivalent)
**parameters : dict, optional
keys and fields added in doc.
"""
# TODO:: add user//pw functionality when MongoDB is hosted
for key in parameters:
if type(parameters[key]).__name__ in ['function', 'type']:
parameters[key] = parameters[key].__name__
if use_full_uri:
output_dir = get_uri("{}/task_{:d}/".format(self.output_dir, self.n_tasks-1))
input_dir = get_uri(self.input_dir)
input_files = get_uri(self.input_files)
else:
output_dir = "{}/task_{:d}/".format(self.output_dir, self.n_tasks-1)
input_dir = self.input_dir
input_files = self.input_files
parameters['output_dir'] = output_dir
parameters['input_dir'] = input_dir
parameters['input_files'] = input_files
add_doc_db(host=host, port=port,database=database, user=user, password=password,
collection=collection, doc=parameters,
update_duplicates=update_duplicates)
def _is_iterable_of_strings(script):
try:
return all(isinstance(line, string_types) for line in script)
except:
return False
| iModels/metamds | metamds/simulation.py | Python | mit | 9,217 |
import datetime
import sys
from string import Template
from dsl_parsers.parsing_utils import communication_is_ice, get_name_number, IDSLPool
from templates.common.templatedict import TemplateDict
class src_main_py(TemplateDict):
def __init__(self, component):
super(src_main_py, self).__init__()
self.component = component
self['year'] = str(datetime.date.today().year)
self['component_name'] = self.component.name
| Kmayankkr/robocomp | tools/robocompdsl/templates/templatePython/plugins/base/functions/src/main_py.py | Python | gpl-3.0 | 454 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
@classmethod
# TODO(GMann): Once Bug# 1311500 is fixed, these test can run
# for Neutron also.
@testtools.skipIf(CONF.service_available.neutron,
"Skip as this functionality is not yet "
"implemented in Neutron. Related Bug#1311500")
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(SecurityGroupDefaultRulesTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(SecurityGroupDefaultRulesTest, cls).setup_clients()
cls.adm_client = cls.os_adm.security_group_default_rules_client
def _create_security_group_default_rules(self, ip_protocol='tcp',
from_port=22, to_port=22,
cidr='10.10.0.0/24'):
# Create Security Group default rule
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr)['security_group_default_rule']
self.assertEqual(ip_protocol, rule['ip_protocol'])
self.assertEqual(from_port, rule['from_port'])
self.assertEqual(to_port, rule['to_port'])
self.assertEqual(cidr, rule['ip_range']['cidr'])
return rule
@test.idempotent_id('6d880615-eec3-4d29-97c5-7a074dde239d')
def test_create_delete_security_group_default_rules(self):
# Create and delete Security Group default rule
ip_protocols = ['tcp', 'udp', 'icmp']
for ip_protocol in ip_protocols:
rule = self._create_security_group_default_rules(ip_protocol)
# Delete Security Group default rule
self.adm_client.delete_security_group_default_rule(rule['id'])
self.assertRaises(lib_exc.NotFound,
self.adm_client.show_security_group_default_rule,
rule['id'])
@test.idempotent_id('4d752e0a-33a1-4c3a-b498-ff8667ca22e5')
def test_create_security_group_default_rule_without_cidr(self):
ip_protocol = 'udp'
from_port = 80
to_port = 80
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port)['security_group_default_rule']
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@test.idempotent_id('29f2d218-69b0-4a95-8f3d-6bd0ef732b3a')
def test_create_security_group_default_rule_with_blank_cidr(self):
ip_protocol = 'icmp'
from_port = 10
to_port = 10
cidr = ''
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr)['security_group_default_rule']
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@test.idempotent_id('6e6de55e-9146-4ae0-89f2-3569586e0b9b')
def test_security_group_default_rules_list(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
rules = (self.adm_client.list_security_group_default_rules()
['security_group_default_rules'])
self.assertNotEqual(0, len(rules))
self.assertIn(rule, rules)
@test.idempotent_id('15cbb349-86b4-4f71-a048-04b7ef3f150b')
def test_default_security_group_default_rule_show(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
fetched_rule = self.adm_client.show_security_group_default_rule(
rule['id'])['security_group_default_rule']
self.assertEqual(rule, fetched_rule)
| Tesora/tesora-tempest | tempest/api/compute/admin/test_security_group_default_rules.py | Python | apache-2.0 | 5,798 |
"""
**DEPRECATED**
Use :mod:`celery.defaults` instead.
"""
from celery import current_app
from celery.app import defaults
_DEFAULTS = defaults.DEFAULTS
conf = current_app.conf
ALWAYS_EAGER = conf.CELERY_ALWAYS_EAGER
EAGER_PROPAGATES_EXCEPTIONS = conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS
RESULT_BACKEND = conf.CELERY_RESULT_BACKEND
CACHE_BACKEND = conf.CELERY_CACHE_BACKEND
CACHE_BACKEND_OPTIONS = conf.CELERY_CACHE_BACKEND_OPTIONS
TASK_SERIALIZER = conf.CELERY_TASK_SERIALIZER
TASK_RESULT_EXPIRES = conf.CELERY_TASK_RESULT_EXPIRES
IGNORE_RESULT = conf.CELERY_IGNORE_RESULT
TRACK_STARTED = conf.CELERY_TRACK_STARTED
ACKS_LATE = conf.CELERY_ACKS_LATE
REDIRECT_STDOUTS = conf.CELERY_REDIRECT_STDOUTS
REDIRECT_STDOUTS_LEVEL = conf.CELERY_REDIRECT_STDOUTS_LEVEL
RESULT_DBURI = conf.CELERY_RESULT_DBURI
RESULT_ENGINE_OPTIONS = conf.CELERY_RESULT_ENGINE_OPTIONS
MAX_CACHED_RESULTS = conf.CELERY_MAX_CACHED_RESULTS
SEND_EVENTS = conf.CELERY_SEND_EVENTS
DEFAULT_RATE_LIMIT = conf.CELERY_DEFAULT_RATE_LIMIT
DISABLE_RATE_LIMITS = conf.CELERY_DISABLE_RATE_LIMITS
CELERYD_TASK_TIME_LIMIT = conf.CELERYD_TASK_TIME_LIMIT
CELERYD_TASK_SOFT_TIME_LIMIT = conf.CELERYD_TASK_SOFT_TIME_LIMIT
CELERYD_MAX_TASKS_PER_CHILD = conf.CELERYD_MAX_TASKS_PER_CHILD
STORE_ERRORS_EVEN_IF_IGNORED = conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED
CELERY_SEND_TASK_ERROR_EMAILS = conf.CELERY_SEND_TASK_ERROR_EMAILS
CELERY_TASK_ERROR_WHITELIST = conf.CELERY_TASK_ERROR_WHITELIST
CELERYD_LOG_FORMAT = conf.CELERYD_LOG_FORMAT
CELERYD_TASK_LOG_FORMAT = conf.CELERYD_TASK_LOG_FORMAT
CELERYD_LOG_FILE = conf.CELERYD_LOG_FILE
CELERYD_LOG_COLOR = conf.CELERYD_LOG_COLOR
CELERYD_LOG_LEVEL = conf.CELERYD_LOG_LEVEL
CELERYD_STATE_DB = conf.CELERYD_STATE_DB
CELERYD_CONCURRENCY = conf.CELERYD_CONCURRENCY
CELERYD_PREFETCH_MULTIPLIER = conf.CELERYD_PREFETCH_MULTIPLIER
CELERYD_POOL_PUTLOCKS = conf.CELERYD_POOL_PUTLOCKS
CELERYD_POOL = conf.CELERYD_POOL
CELERYD_LISTENER = conf.CELERYD_CONSUMER
CELERYD_MEDIATOR = conf.CELERYD_MEDIATOR
CELERYD_ETA_SCHEDULER = conf.CELERYD_ETA_SCHEDULER
CELERYD_ETA_SCHEDULER_PRECISION = conf.CELERYD_ETA_SCHEDULER_PRECISION
ADMINS = conf.ADMINS
SERVER_EMAIL = conf.SERVER_EMAIL
EMAIL_HOST = conf.EMAIL_HOST
EMAIL_HOST_USER = conf.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = conf.EMAIL_HOST_PASSWORD
EMAIL_PORT = conf.EMAIL_PORT
BROKER_HOST = conf.BROKER_HOST
BROKER_PORT = conf.BROKER_PORT
BROKER_USER = conf.BROKER_USER
BROKER_PASSWORD = conf.BROKER_PASSWORD
BROKER_VHOST = conf.BROKER_VHOST
BROKER_USE_SSL = conf.BROKER_USE_SSL
BROKER_INSIST = conf.BROKER_INSIST
BROKER_CONNECTION_TIMEOUT = conf.BROKER_CONNECTION_TIMEOUT
BROKER_CONNECTION_RETRY = conf.BROKER_CONNECTION_RETRY
BROKER_CONNECTION_MAX_RETRIES = conf.BROKER_CONNECTION_MAX_RETRIES
BROKER_BACKEND = conf.BROKER_TRANSPORT
DEFAULT_QUEUE = conf.CELERY_DEFAULT_QUEUE
DEFAULT_ROUTING_KEY = conf.CELERY_DEFAULT_ROUTING_KEY
DEFAULT_EXCHANGE = conf.CELERY_DEFAULT_EXCHANGE
DEFAULT_EXCHANGE_TYPE = conf.CELERY_DEFAULT_EXCHANGE_TYPE
DEFAULT_DELIVERY_MODE = conf.CELERY_DEFAULT_DELIVERY_MODE
QUEUES = conf.CELERY_QUEUES
CREATE_MISSING_QUEUES = conf.CELERY_CREATE_MISSING_QUEUES
ROUTES = conf.CELERY_ROUTES
BROADCAST_QUEUE = conf.CELERY_BROADCAST_QUEUE
BROADCAST_EXCHANGE = conf.CELERY_BROADCAST_EXCHANGE
BROADCAST_EXCHANGE_TYPE = conf.CELERY_BROADCAST_EXCHANGE_TYPE
EVENT_SERIALIZER = conf.CELERY_EVENT_SERIALIZER
RESULT_EXCHANGE = conf.CELERY_RESULT_EXCHANGE
RESULT_EXCHANGE_TYPE = conf.CELERY_RESULT_EXCHANGE_TYPE
RESULT_SERIALIZER = conf.CELERY_RESULT_SERIALIZER
RESULT_PERSISTENT = conf.CELERY_RESULT_PERSISTENT
CELERYBEAT_LOG_LEVEL = conf.CELERYBEAT_LOG_LEVEL
CELERYBEAT_LOG_FILE = conf.CELERYBEAT_LOG_FILE
CELERYBEAT_SCHEDULER = conf.CELERYBEAT_SCHEDULER
CELERYBEAT_SCHEDULE = conf.CELERYBEAT_SCHEDULE
CELERYBEAT_SCHEDULE_FILENAME = conf.CELERYBEAT_SCHEDULE_FILENAME
CELERYBEAT_MAX_LOOP_INTERVAL = conf.CELERYBEAT_MAX_LOOP_INTERVAL
CELERYMON_LOG_LEVEL = conf.CELERYMON_LOG_LEVEL
CELERYMON_LOG_FILE = conf.CELERYMON_LOG_FILE
| WoLpH/celery | celery/conf.py | Python | bsd-3-clause | 3,960 |
import re
from urllib import parse
from collections import defaultdict
from openstates.scrape import Person, Scraper
import lxml.html
class WVPersonScraper(Scraper):
jurisdiction = "wv"
def scrape(self, chamber=None):
chambers = [chamber] if chamber is not None else ["upper", "lower"]
for chamber in chambers:
yield from self.scrape_chamber(chamber)
def scrape_chamber(self, chamber):
if chamber == "upper":
chamber_abbrev = "Senate1"
else:
chamber_abbrev = "House"
url = "http://www.legis.state.wv.us/%s/roster.cfm" % chamber_abbrev
page = lxml.html.fromstring(self.get(url).text)
page.make_links_absolute(url)
for link in page.xpath("//td/a[contains(@href, '?member=')]"):
if not link.text:
continue
name = link.xpath("string()").strip()
leg_url = self.urlescape(link.attrib["href"])
if name in [
"Members",
"Senate Members",
"House Members",
"Vacancy",
"VACANT",
"Vacant",
"To Be Announced",
"To Be Appointed",
]:
continue
print(name)
yield from self.scrape_legislator(chamber, name, leg_url)
def scrape_legislator(self, chamber, name, url):
html = self.get(url).text
page = lxml.html.fromstring(html)
page.make_links_absolute(url)
district = (
page.xpath('//h1[contains(., "DISTRICT")]/text()')
.pop()
.split()[1]
.strip()
.lstrip("0")
)
party = page.xpath("//h2").pop().text_content()
party = re.search(r"\((R|D|I)[ \-\]]", party).group(1)
if party == "D":
party = "Democratic"
elif party == "R":
party = "Republican"
elif party == "I":
party = "Independent"
photo_url = page.xpath("//img[contains(@src, 'images/members/')]")[0].attrib[
"src"
]
leg = Person(
name, district=district, party=party, image=photo_url, primary_org=chamber
)
leg.add_link(url)
leg.add_source(url)
self.scrape_offices(leg, page)
yield leg
def scrape_offices(self, legislator, doc):
# Retrieve element that should contain all contact information for the
# legislator and turn its text into a list.
text = doc.xpath('//b[contains(., "Capitol Office:")]')[0]
text = text.getparent().itertext()
text = filter(None, [t.strip() for t in text])
# Parse capitol office contact details.
officedata = defaultdict(list)
current = None
for chunk in text:
# Skip parsing biography link.
if chunk.lower() == "biography":
break
# Contact snippets should be elements with headers that end in
# colons.
if chunk.strip().endswith(":"):
current_key = chunk.strip()
current = officedata[current_key]
elif current is not None:
current.append(chunk)
if current_key == "Business Phone:":
break
email = doc.xpath('//a[contains(@href, "mailto:")]/@href')[1]
email = email[7:]
try:
if officedata["Capitol Phone:"][0] not in ("", "NA"):
capitol_phone = officedata["Capitol Phone:"][0]
else:
raise ValueError("Invalid phone number")
except (IndexError, ValueError):
capitol_phone = None
if officedata["Capitol Office:"]:
capitol_address = "\n".join(officedata["Capitol Office:"])
else:
capitol_address = None
if email:
legislator.add_contact_detail(
type="email", value=email, note="Capitol Office"
)
if capitol_phone:
legislator.add_contact_detail(
type="voice", value=capitol_phone, note="Capitol Office"
)
if capitol_address:
legislator.add_contact_detail(
type="address", value=capitol_address, note="Capitol Office"
)
# If a business or home phone is listed, attempt to use the
# home phone first, then fall back on the business phone for
# the district office number.
try:
if officedata["Home Phone:"][0] not in ("", "NA"):
district_phone = officedata["Home Phone:"][0]
elif officedata["Business Phone:"][0] not in ("", "NA"):
district_phone = officedata["Business Phone:"][0]
else:
raise ValueError("Invalid phone number")
except (IndexError, ValueError):
district_phone = None
if officedata["Home:"]:
district_address = "\n".join(officedata["Home:"])
else:
district_address = None
# Add district office entry only if data exists for it.
if district_phone:
legislator.add_contact_detail(
type="voice", value=district_phone, note="District Office"
)
if district_address:
legislator.add_contact_detail(
type="address", value=district_address, note="District Office"
)
def urlescape(self, url):
scheme, netloc, path, qs, anchor = parse.urlsplit(url)
path = parse.quote(path, "/%")
qs = parse.quote_plus(qs, ":&=")
return parse.urlunsplit((scheme, netloc, path, qs, anchor))
| sunlightlabs/openstates | scrapers/wv/people.py | Python | gpl-3.0 | 5,733 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibcirculation web interface """
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
# others invenio imports
from invenio.config import CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_SITE_RECORD
from invenio.webuser import getUid, page_not_authorized, isGuestUser, \
collect_user_info
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.search_engine import create_navtrail_links, \
guess_primary_collection_of_a_record, \
get_colID, check_user_can_view_record, \
record_exists
from invenio.urlutils import redirect_to_url, \
make_canonical_urlargd
from invenio.messages import gettext_set_language
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.websearchadminlib import get_detailed_page_tabs
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.access_control_mailcookie import mail_cookie_create_authorize_action
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
websearch_templates = invenio.template.load('websearch')
# bibcirculation imports
bibcirculation_templates = invenio.template.load('bibcirculation')
from invenio.bibcirculation import perform_new_request, \
perform_new_request_send, \
perform_get_holdings_information, \
perform_borrower_loans, \
perform_loanshistoricaloverview, \
display_ill_form, \
ill_register_request, \
ill_request_with_recid, \
ill_register_request_with_recid
class WebInterfaceYourLoansPages(WebInterfaceDirectory):
"""Defines the set of /yourloans pages."""
_exports = ['', 'display', 'loanshistoricaloverview']
def index(self, req, form):
""" The function called by default
"""
redirect_to_url(req, "%s/yourloans/display?%s" % (CFG_SITE_URL,
req.args))
def display(self, req, form):
"""
Displays all loans of a given user
@param ln: language
@return the page for inbox
"""
argd = wash_urlargd(form, {'barcode': (str, ""),
'borrower_id': (int, 0),
'request_id': (int, 0)})
# Check if user is logged
uid = getUid(req)
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "%s/yourloans/display" % \
(CFG_SITE_URL,),
navmenuid="yourloans")
elif uid == -1 or isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourloans/display%s" % (
CFG_SITE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})), norobot=True)
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_useloans']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use loans."))
body = perform_borrower_loans(uid=uid,
barcode=argd['barcode'],
borrower_id=argd['borrower_id'],
request_id=argd['request_id'],
ln=argd['ln'])
return page(title = _("Your Loans"),
body = body,
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "yourloans",
secure_page_p=1)
def loanshistoricaloverview(self, req, form):
"""
Show loans historical overview.
"""
argd = wash_urlargd(form, {})
# Check if user is logged
uid = getUid(req)
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "%s/yourloans/loanshistoricaloverview" % \
(CFG_SITE_URL,),
navmenuid="yourloans")
elif uid == -1 or isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourloans/loanshistoricaloverview%s" % (
CFG_SITE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})), norobot=True)
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_useloans']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use loans."))
body = perform_loanshistoricaloverview(uid=uid,
ln=argd['ln'])
return page(title = _("Loans - historical overview"),
body = body,
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "yourloans",
secure_page_p=1)
class WebInterfaceILLPages(WebInterfaceDirectory):
"""Defines the set of /ill pages."""
_exports = ['', 'display', 'register_request']
def index(self, req, form):
""" The function called by default
"""
redirect_to_url(req, "%s/ill/display?%s" % (CFG_SITE_URL,
req.args))
def display(self, req, form):
"""
Displays all loans of a given user
@param ln: language
@return the page for inbox
"""
argd = wash_urlargd(form, {})
# Check if user is logged
uid = getUid(req)
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "%s/ill/display" % \
(CFG_SITE_URL,),
navmenuid="ill")
elif uid == -1 or isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/ill/display%s" % (
CFG_SITE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})), norobot=True)
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_useloans']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use ill."))
body = display_ill_form(ln=argd['ln'])
return page(title = _("Interlibrary loan request for books"),
body = body,
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "ill")
def register_request(self, req, form):
"""
Displays all loans of a given user
@param ln: language
@return the page for inbox
"""
argd = wash_urlargd(form, {'ln': (str, ""),
'title': (str, ""),
'authors': (str, ""),
'place': (str, ""),
'publisher': (str, ""),
'year': (str, ""),
'edition': (str, ""),
'isbn': (str, ""),
'period_of_interest_from': (str, ""),
'period_of_interest_to': (str, ""),
'additional_comments': (str, ""),
'conditions': (str, ""),
'only_edition': (str, ""),
})
# Check if user is logged
uid = getUid(req)
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "%s/ill/register_request" % \
(CFG_SITE_URL,),
navmenuid="ill")
elif uid == -1 or isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/ill/register_request%s" % (
CFG_SITE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})), norobot=True)
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_useloans']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use ill."))
body = ill_register_request(uid=uid,
title=argd['title'],
authors=argd['authors'],
place=argd['place'],
publisher=argd['publisher'],
year=argd['year'],
edition=argd['edition'],
isbn=argd['isbn'],
period_of_interest_from = argd['period_of_interest_from'],
period_of_interest_to = argd['period_of_interest_to'],
additional_comments = argd['additional_comments'],
conditions = argd['conditions'],
only_edition = argd['only_edition'],
request_type='book',
ln=argd['ln'])
return page(title = _("Interlibrary loan request for books"),
body = body,
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "ill")
class WebInterfaceHoldingsPages(WebInterfaceDirectory):
"""Defines the set of /holdings pages."""
_exports = ['', 'display', 'request', 'send', 'ill_request_with_recid', 'ill_register_request_with_recid']
def __init__(self, recid=-1):
self.recid = recid
def index(self, req, form):
"""
Redirects to display function
"""
return self.display(req, form)
def display(self, req, form):
"""
Show the tab 'holdings'.
"""
argd = wash_urlargd(form, {'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'voted': (int, -1),
'reported': (int, -1),
})
_ = gettext_set_language(argd['ln'])
record_exists_p = record_exists(self.recid)
if record_exists_p != 1:
if record_exists_p == -1:
msg = _("The record has been deleted.")
else:
msg = _("Requested record does not seem to exist.")
msg = '<span class="quicknote">' + msg + '</span>'
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])
return page(title = title,
show_title_p = False,
body = msg,
description = description,
keywords = keywords,
uid = getUid(req),
language = argd['ln'],
req = req,
navmenuid='search')
body = perform_get_holdings_information(self.recid, req, argd['ln'])
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['holdings'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, _order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])[0]
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
navtrail += ' > <a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
req=req,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/img/jquery-ui.css\" type=\"text/css\" />" % CFG_SITE_URL,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
# Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/
__call__ = index
def request(self, req, form):
"""
Show new hold request form.
"""
argd = wash_urlargd(form, {'ln': (str, ""), 'barcode': (str, "")})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
body = perform_new_request(recid=self.recid,
barcode=argd['barcode'],
ln=argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../holdings/request",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
if not CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/%s/%s/holdings/request%s" % (
CFG_SITE_URL,
CFG_SITE_RECORD,
self.recid,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})), norobot=True)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['holdings'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, _order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])[0]
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
navtrail += ' > <a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
req=req,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/img/jquery-ui.css\" type=\"text/css\" />" % CFG_SITE_URL,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
def send(self, req, form):
"""
Create a new hold request.
"""
argd = wash_urlargd(form, {'period_from': (str, ""),
'period_to': (str, ""),
'barcode': (str, "")
})
uid = getUid(req)
body = perform_new_request_send(recid=self.recid,
uid=uid,
period_from=argd['period_from'],
period_to=argd['period_to'],
barcode=argd['barcode'])
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['holdings'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, _order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])[0]
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
navtrail += ' > <a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
req=req,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__,
language=argd['ln'], req=req)
def ill_request_with_recid(self, req, form):
"""
Show ILL request form.
"""
argd = wash_urlargd(form, {'ln': (str, "")})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
body = ill_request_with_recid(recid=self.recid,
ln=argd['ln'])
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../holdings/ill_request_with_recid",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
if not CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/%s/%s/holdings/ill_request_with_recid%s" % (
CFG_SITE_URL,
CFG_SITE_RECORD,
self.recid,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['holdings'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, _order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])[0]
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
navtrail += ' > <a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
req=req,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/img/jquery-ui.css\" type=\"text/css\" />" % CFG_SITE_URL,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
def ill_register_request_with_recid(self, req, form):
"""
Register ILL request.
"""
argd = wash_urlargd(form, {'ln': (str, ""),
'period_of_interest_from': (str, ""),
'period_of_interest_to': (str, ""),
'additional_comments': (str, ""),
'conditions': (str, ""),
'only_edition': (str, ""),
})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
body = ill_register_request_with_recid(recid=self.recid,
uid=uid,
period_of_interest_from = argd['period_of_interest_from'],
period_of_interest_to = argd['period_of_interest_to'],
additional_comments = argd['additional_comments'],
conditions = argd['conditions'],
only_edition = argd['only_edition'],
ln=argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../holdings/ill_request_with_recid",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
if not CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/%s/%s/holdings/ill_request_with_recid%s" % (
CFG_SITE_URL,
CFG_SITE_RECORD,
self.recid,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, tab_id, link_ln), \
tab_id in ['holdings'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, _order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'])
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
title = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])[0]
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
navtrail += ' > <a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += title
navtrail += '</a>'
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
req=req,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
| NikolaYolov/invenio_backup | modules/bibcirculation/lib/bibcirculation_webinterface.py | Python | gpl-2.0 | 35,129 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"Tensorboard",},
)
class Tensorboard(proto.Message):
r"""Tensorboard is a physical database that stores users'
training metrics. A default Tensorboard is provided in each
region of a GCP project. If needed users can also create extra
Tensorboards in their projects.
Attributes:
name (str):
Output only. Name of the Tensorboard. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
display_name (str):
Required. User provided name of this
Tensorboard.
description (str):
Description of this Tensorboard.
encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
Customer-managed encryption key spec for a
Tensorboard. If set, this Tensorboard and all
sub-resources of this Tensorboard will be
secured by this key.
blob_storage_path_prefix (str):
Output only. Consumer project Cloud Storage
path prefix used to store blob data, which can
either be a bucket or directory. Does not end
with a '/'.
run_count (int):
Output only. The number of Runs stored in
this Tensorboard.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Tensorboard
was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Tensorboard
was last updated.
labels (Sequence[google.cloud.aiplatform_v1.types.Tensorboard.LabelsEntry]):
The labels with user-defined metadata to
organize your Tensorboards.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed. No more than 64 user labels can be
associated with one Tensorboard (System labels
are excluded).
See https://goo.gl/xmQnxf for more information
and examples of labels. System reserved label
keys are prefixed with
"aiplatform.googleapis.com/" and are immutable.
etag (str):
Used to perform a consistent
read-modify-write updates. If not set, a blind
"overwrite" update happens.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
encryption_spec = proto.Field(
proto.MESSAGE, number=11, message=gca_encryption_spec.EncryptionSpec,
)
blob_storage_path_prefix = proto.Field(proto.STRING, number=10,)
run_count = proto.Field(proto.INT32, number=5,)
create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
labels = proto.MapField(proto.STRING, proto.STRING, number=8,)
etag = proto.Field(proto.STRING, number=9,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/types/tensorboard.py | Python | apache-2.0 | 4,087 |
# -*- coding: utf-8 -*-
"""
Lexer for the Groovy language.
http://groovy.codehaus.org
:copyright: 2009 Matthew Taylor
:license: BSD, see LICENSE for more details.
"""
from pygments.scanner import Scanner
from pygments.lexer import RegexLexer, include, bygroups, using, \
this
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['GroovyLexer'] | rhyolight/pygments-groovy | lexer/__init__.py | Python | bsd-3-clause | 516 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dataplex.v1",
manifest={
"StorageSystem",
"CreateEntityRequest",
"UpdateEntityRequest",
"DeleteEntityRequest",
"ListEntitiesRequest",
"ListEntitiesResponse",
"GetEntityRequest",
"ListPartitionsRequest",
"CreatePartitionRequest",
"DeletePartitionRequest",
"ListPartitionsResponse",
"GetPartitionRequest",
"Entity",
"Partition",
"Schema",
"StorageFormat",
},
)
class StorageSystem(proto.Enum):
r"""Identifies the cloud system that manages the data storage."""
STORAGE_SYSTEM_UNSPECIFIED = 0
CLOUD_STORAGE = 1
BIGQUERY = 2
class CreateEntityRequest(proto.Message):
r"""Create a metadata entity request.
Attributes:
parent (str):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
entity (google.cloud.dataplex_v1.types.Entity):
Required. Entity resource.
validate_only (bool):
Optional. Only validate the request, but do
not perform mutations. The default is false.
"""
parent = proto.Field(proto.STRING, number=1,)
entity = proto.Field(proto.MESSAGE, number=3, message="Entity",)
validate_only = proto.Field(proto.BOOL, number=4,)
class UpdateEntityRequest(proto.Message):
r"""Update a metadata entity request.
The exiting entity will be fully replaced by the entity in the
request. The entity ID is mutable. To modify the ID, use the
current entity ID in the request URL and specify the new ID in
the request body.
Attributes:
entity (google.cloud.dataplex_v1.types.Entity):
Required. Update description.
validate_only (bool):
Optional. Only validate the request, but do
not perform mutations. The default is false.
"""
entity = proto.Field(proto.MESSAGE, number=2, message="Entity",)
validate_only = proto.Field(proto.BOOL, number=3,)
class DeleteEntityRequest(proto.Message):
r"""Delete a metadata entity request.
Attributes:
name (str):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
etag (str):
Required. The etag associated with the
partition if it was previously retrieved.
"""
name = proto.Field(proto.STRING, number=1,)
etag = proto.Field(proto.STRING, number=2,)
class ListEntitiesRequest(proto.Message):
r"""List metadata entities request.
Attributes:
parent (str):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}``.
view (google.cloud.dataplex_v1.types.ListEntitiesRequest.EntityView):
Required. Specify the entity view to make a
partial list request.
page_size (int):
Optional. Maximum number of entities to
return. The service may return fewer than this
value. If unspecified, 100 entities will be
returned by default. The maximum value is 500;
larger values will will be truncated to 500.
page_token (str):
Optional. Page token received from a previous
``ListEntities`` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters
provided to ``ListEntities`` must match the call that
provided the page token.
filter (str):
Optional. The following filter parameters can be added to
the URL to limit the entities returned by the API:
- Entity ID: ?filter="id=entityID"
- Asset ID: ?filter="asset=assetID"
- Data path ?filter="data_path=gs://my-bucket"
- Is HIVE compatible: ?filter=”hive_compatible=true”
- Is BigQuery compatible:
?filter=”bigquery_compatible=true”
"""
class EntityView(proto.Enum):
r"""Entity views."""
ENTITY_VIEW_UNSPECIFIED = 0
TABLES = 1
FILESETS = 2
parent = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=2, enum=EntityView,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
filter = proto.Field(proto.STRING, number=5,)
class ListEntitiesResponse(proto.Message):
r"""List metadata entities response.
Attributes:
entities (Sequence[google.cloud.dataplex_v1.types.Entity]):
Entities in the specified parent zone.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no remaining results in
the list.
"""
@property
def raw_page(self):
return self
entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEntityRequest(proto.Message):
r"""Get metadata entity request.
Attributes:
name (str):
Required. The resource name of the entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}.``
view (google.cloud.dataplex_v1.types.GetEntityRequest.EntityView):
Optional. Used to select the subset of entity information to
return. Defaults to ``BASIC``.
"""
class EntityView(proto.Enum):
r"""Entity views for get entity partial result."""
ENTITY_VIEW_UNSPECIFIED = 0
BASIC = 1
SCHEMA = 2
FULL = 4
name = proto.Field(proto.STRING, number=1,)
view = proto.Field(proto.ENUM, number=2, enum=EntityView,)
class ListPartitionsRequest(proto.Message):
r"""List metadata partitions request.
Attributes:
parent (str):
Required. The resource name of the parent entity:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
page_size (int):
Optional. Maximum number of partitions to
return. The service may return fewer than this
value. If unspecified, 100 partitions will be
returned by default. The maximum page size is
500; larger values will will be truncated to
500.
page_token (str):
Optional. Page token received from a previous
``ListPartitions`` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters
provided to ``ListPartitions`` must match the call that
provided the page token.
filter (str):
Optional. Filter the partitions returned to the caller using
a key vslue pair expression. The filter expression supports:
- logical operators: AND, OR
- comparison operators: <, >, >=, <= ,=, !=
- LIKE operators:
- The right hand of a LIKE operator supports “.” and “*”
for wildcard searches, for example "value1 LIKE
".*oo.*"
- parenthetical grouping: ( )
Sample filter expression: \`?filter="key1 < value1 OR key2 >
value2"
**Notes:**
- Keys to the left of operators are case insensitive.
- Partition results are sorted first by creation time, then
by lexicographic order.
- Up to 20 key value filter pairs are allowed, but due to
performance considerations, only the first 10 will be
used as a filter.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
class CreatePartitionRequest(proto.Message):
r"""Create metadata partition request.
Attributes:
parent (str):
Required. The resource name of the parent zone:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}``.
partition (google.cloud.dataplex_v1.types.Partition):
Required. Partition resource.
validate_only (bool):
Optional. Only validate the request, but do
not perform mutations. The default is false.
"""
parent = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.MESSAGE, number=3, message="Partition",)
validate_only = proto.Field(proto.BOOL, number=4,)
class DeletePartitionRequest(proto.Message):
r"""Delete metadata partition request.
Attributes:
name (str):
Required. The resource name of the partition. format:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an ordered
sequence of partition values separated by "/". All values
must be provided.
etag (str):
Optional. The etag associated with the
partition if it was previously retrieved.
"""
name = proto.Field(proto.STRING, number=1,)
etag = proto.Field(proto.STRING, number=2,)
class ListPartitionsResponse(proto.Message):
r"""List metadata partitions response.
Attributes:
partitions (Sequence[google.cloud.dataplex_v1.types.Partition]):
Partitions under the specified parent entity.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no remaining results in
the list.
"""
@property
def raw_page(self):
return self
partitions = proto.RepeatedField(proto.MESSAGE, number=1, message="Partition",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetPartitionRequest(proto.Message):
r"""Get metadata partition request.
Attributes:
name (str):
Required. The resource name of the partition:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{entity_id}/partitions/{partition_value_path}``.
The {partition_value_path} segment consists of an ordered
sequence of partition values separated by "/". All values
must be provided.
"""
name = proto.Field(proto.STRING, number=1,)
class Entity(proto.Message):
r"""Represents tables and fileset metadata contained within a
zone.
Attributes:
name (str):
Output only. The resource name of the entity, of the form:
``projects/{project_number}/locations/{location_id}/lakes/{lake_id}/zones/{zone_id}/entities/{id}``.
display_name (str):
Optional. Display name must be shorter than
or equal to 63 characters.
description (str):
Optional. User friendly longer description
text. Must be shorter than or equal to 1024
characters.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the entity was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the entity was
last updated.
id (str):
Required. A user-provided entity ID. It is
mutable, and will be used as the published table
name. Specifying a new ID in an update entity
request will override the existing value.
The ID must contain only letters (a-z, A-Z),
numbers (0-9), and underscores. Must begin with
a letter.
etag (str):
Optional. The etag for this entity. Required
for update and delete requests. Must match the
server's etag.
type_ (google.cloud.dataplex_v1.types.Entity.Type):
Required. Immutable. The type of entity.
asset (str):
Required. Immutable. The ID of the asset
associated with the storage location containing
the entity data. The entity must be with in the
same zone with the asset.
data_path (str):
Required. Immutable. The storage path of the entity data.
For Cloud Storage data, this is the fully-qualified path to
the entity, such as ``gs://bucket/path/to/data``. For
BigQuery data, this is the name of the table resource, such
as
``projects/project_id/datasets/dataset_id/tables/table_id``.
data_path_pattern (str):
Optional. The set of items within the data path constituting
the data in the entity, represented as a glob path. Example:
``gs://bucket/path/to/data/**/*.csv``.
catalog_entry (str):
Output only. The name of the associated Data
Catalog entry.
system (google.cloud.dataplex_v1.types.StorageSystem):
Required. Immutable. Identifies the storage
system of the entity data.
format_ (google.cloud.dataplex_v1.types.StorageFormat):
Required. Identifies the storage format of
the entity data. It does not apply to entities
with data stored in BigQuery.
compatibility (google.cloud.dataplex_v1.types.Entity.CompatibilityStatus):
Output only. Metadata stores that the entity
is compatible with.
schema (google.cloud.dataplex_v1.types.Schema):
Required. The description of the data structure and layout.
The schema is not included in list responses. It is only
included in ``SCHEMA`` and ``FULL`` entity views of a
``GetEntity`` response.
"""
class Type(proto.Enum):
r"""The type of entity."""
TYPE_UNSPECIFIED = 0
TABLE = 1
FILESET = 2
class CompatibilityStatus(proto.Message):
r"""Provides compatibility information for various metadata
stores.
Attributes:
hive_metastore (google.cloud.dataplex_v1.types.Entity.CompatibilityStatus.Compatibility):
Output only. Whether this entity is
compatible with Hive Metastore.
bigquery (google.cloud.dataplex_v1.types.Entity.CompatibilityStatus.Compatibility):
Output only. Whether this entity is
compatible with BigQuery.
"""
class Compatibility(proto.Message):
r"""Provides compatibility information for a specific metadata
store.
Attributes:
compatible (bool):
Output only. Whether the entity is compatible
and can be represented in the metadata store.
reason (str):
Output only. Provides additional detail if
the entity is incompatible with the metadata
store.
"""
compatible = proto.Field(proto.BOOL, number=1,)
reason = proto.Field(proto.STRING, number=2,)
hive_metastore = proto.Field(
proto.MESSAGE, number=1, message="Entity.CompatibilityStatus.Compatibility",
)
bigquery = proto.Field(
proto.MESSAGE, number=2, message="Entity.CompatibilityStatus.Compatibility",
)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
id = proto.Field(proto.STRING, number=7,)
etag = proto.Field(proto.STRING, number=8,)
type_ = proto.Field(proto.ENUM, number=10, enum=Type,)
asset = proto.Field(proto.STRING, number=11,)
data_path = proto.Field(proto.STRING, number=12,)
data_path_pattern = proto.Field(proto.STRING, number=13,)
catalog_entry = proto.Field(proto.STRING, number=14,)
system = proto.Field(proto.ENUM, number=15, enum="StorageSystem",)
format_ = proto.Field(proto.MESSAGE, number=16, message="StorageFormat",)
compatibility = proto.Field(proto.MESSAGE, number=19, message=CompatibilityStatus,)
schema = proto.Field(proto.MESSAGE, number=50, message="Schema",)
class Partition(proto.Message):
r"""Represents partition metadata contained within entity
instances.
Attributes:
name (str):
Output only. The values must be HTML URL
encoded two times before constructing the path.
For example, if you have a value of "US:CA",
encoded it two times and you get "US%253ACA".
Then if you have the 2nd value is
"CA#Sunnyvale", encoded two times and you get
"CA%2523Sunnyvale". The partition values path is
"US%253ACA/CA%2523Sunnyvale". The final URL will
be
"https://.../partitions/US%253ACA/CA%2523Sunnyvale".
The name field in the responses will always have
the encoded format.
values (Sequence[str]):
Required. Immutable. The set of values
representing the partition, which correspond to
the partition schema defined in the parent
entity.
location (str):
Required. Immutable. The location of the entity data within
the partition, for example,
``gs://bucket/path/to/entity/key1=value1/key2=value2``. Or
``projects/<project_id>/datasets/<dataset_id>/tables/<table_id>``
etag (str):
Optional. The etag for this partition.
"""
name = proto.Field(proto.STRING, number=1,)
values = proto.RepeatedField(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
etag = proto.Field(proto.STRING, number=4,)
class Schema(proto.Message):
r"""Schema information describing the structure and layout of the
data.
Attributes:
user_managed (bool):
Required. Whether the schema is user-managed or managed by
the service.
- Set user_manage to false if you would like Dataplex to
help you manage the schema. You will get the full service
provided by Dataplex discovery, including new data
discovery, schema inference and schema evolution. You can
still provide input the schema of the entities, for
example renaming a schema field, changing CSV or Json
options if you think the discovered values are not as
accurate. Dataplex will consider your input as the
initial schema (as if they were produced by the previous
discovery run), and will evolve schema or flag actions
based on that.
- Set user_manage to true if you would like to fully manage
the entity schema by yourself. This is useful when you
would like to manually specify the schema for a table. In
this case, the schema defined by the user is guaranteed
to be kept unchanged and would not be overwritten. But
this also means Dataplex will not provide schema
evolution management for you. Dataplex will still be able
to manage partition registration (i.e., keeping the list
of partitions up to date) when Dataplex discovery is
turned on and user_managed is set to true.
fields (Sequence[google.cloud.dataplex_v1.types.Schema.SchemaField]):
Optional. The sequence of fields describing
data in table entities.
partition_fields (Sequence[google.cloud.dataplex_v1.types.Schema.PartitionField]):
Optional. The sequence of fields describing
the partition structure in entities. If this
field is empty, there are no partitions within
the data.
partition_style (google.cloud.dataplex_v1.types.Schema.PartitionStyle):
Optional. The structure of paths containing
partition data within the entity.
"""
class Type(proto.Enum):
r"""Type information for fields in schemas and partition schemas."""
TYPE_UNSPECIFIED = 0
BOOLEAN = 1
BYTE = 2
INT16 = 3
INT32 = 4
INT64 = 5
FLOAT = 6
DOUBLE = 7
DECIMAL = 8
STRING = 9
BINARY = 10
TIMESTAMP = 11
DATE = 12
TIME = 13
RECORD = 14
NULL = 100
class Mode(proto.Enum):
r"""Additional qualifiers to define field semantics."""
MODE_UNSPECIFIED = 0
REQUIRED = 1
NULLABLE = 2
REPEATED = 3
class PartitionStyle(proto.Enum):
r"""The structure of paths within the entity, which represent
partitions.
"""
PARTITION_STYLE_UNSPECIFIED = 0
HIVE_COMPATIBLE = 1
class SchemaField(proto.Message):
r"""Represents a column field within a table schema.
Attributes:
name (str):
Required. The name of the field. The maximum length is 767
characters. The name must begins with a letter and not
contains ``:`` and ``.``.
description (str):
Optional. User friendly field description.
Must be less than or equal to 1024 characters.
type_ (google.cloud.dataplex_v1.types.Schema.Type):
Required. The type of field.
mode (google.cloud.dataplex_v1.types.Schema.Mode):
Required. Additional field semantics.
fields (Sequence[google.cloud.dataplex_v1.types.Schema.SchemaField]):
Optional. Any nested field for complex types.
"""
name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
type_ = proto.Field(proto.ENUM, number=3, enum="Schema.Type",)
mode = proto.Field(proto.ENUM, number=4, enum="Schema.Mode",)
fields = proto.RepeatedField(
proto.MESSAGE, number=10, message="Schema.SchemaField",
)
class PartitionField(proto.Message):
r"""Represents a key field within the entity's partition
structure. You could have up to 20 partition fields, but only
the first 10 partitions have the filtering ability due to
performance consideration.
Attributes:
name (str):
Required. Partition name is editable if only
the partition style is not HIVE compatible. The
maximum length allowed is 767 characters.
type_ (google.cloud.dataplex_v1.types.Schema.Type):
Required. Immutable. The type of field.
"""
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum="Schema.Type",)
user_managed = proto.Field(proto.BOOL, number=1,)
fields = proto.RepeatedField(proto.MESSAGE, number=2, message=SchemaField,)
partition_fields = proto.RepeatedField(
proto.MESSAGE, number=3, message=PartitionField,
)
partition_style = proto.Field(proto.ENUM, number=4, enum=PartitionStyle,)
class StorageFormat(proto.Message):
r"""Describes the format of the data within its storage location.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
format_ (google.cloud.dataplex_v1.types.StorageFormat.Format):
Output only. The data format associated with
the stored data, which represents content type
values. The value is inferred from mime type.
compression_format (google.cloud.dataplex_v1.types.StorageFormat.CompressionFormat):
Optional. The compression type associated
with the stored data. If unspecified, the data
is uncompressed.
mime_type (str):
Required. The mime type descriptor for the
data. Must match the pattern {type}/{subtype}.
Supported values: - application/x-parquet
- application/x-avro
- application/x-orc
- application/x-tfrecord
- application/json
- application/{subtypes}
- text/csv
- text/<subtypes>
- image/{image subtype}
- video/{video subtype}
- audio/{audio subtype}
csv (google.cloud.dataplex_v1.types.StorageFormat.CsvOptions):
Optional. Additional information about CSV
formatted data.
This field is a member of `oneof`_ ``options``.
json (google.cloud.dataplex_v1.types.StorageFormat.JsonOptions):
Optional. Additional information about CSV
formatted data.
This field is a member of `oneof`_ ``options``.
"""
class Format(proto.Enum):
r"""The specific file format of the data."""
FORMAT_UNSPECIFIED = 0
PARQUET = 1
AVRO = 2
ORC = 3
CSV = 100
JSON = 101
IMAGE = 200
AUDIO = 201
VIDEO = 202
TEXT = 203
TFRECORD = 204
OTHER = 1000
UNKNOWN = 1001
class CompressionFormat(proto.Enum):
r"""The specific compressed file format of the data."""
COMPRESSION_FORMAT_UNSPECIFIED = 0
GZIP = 2
BZIP2 = 3
class CsvOptions(proto.Message):
r"""Describes CSV and similar semi-structured data formats.
Attributes:
encoding (str):
Optional. The character encoding of the data.
Accepts "US-ASCII", "UTF-8", and "ISO-8859-1".
Defaults to UTF-8 if unspecified.
header_rows (int):
Optional. The number of rows to interpret as
header rows that should be skipped when reading
data rows. Defaults to 0.
delimiter (str):
Optional. The delimiter used to separate
values. Defaults to ','.
quote (str):
Optional. The character used to quote column
values. Accepts '"' and '''. Defaults to '"' if
unspecified.
"""
encoding = proto.Field(proto.STRING, number=1,)
header_rows = proto.Field(proto.INT32, number=2,)
delimiter = proto.Field(proto.STRING, number=3,)
quote = proto.Field(proto.STRING, number=4,)
class JsonOptions(proto.Message):
r"""Describes JSON data format.
Attributes:
encoding (str):
Optional. The character encoding of the data.
Accepts "US-ASCII", "UTF-8" and "ISO-8859-1".
Defaults to UTF-8 if not specified.
"""
encoding = proto.Field(proto.STRING, number=1,)
format_ = proto.Field(proto.ENUM, number=1, enum=Format,)
compression_format = proto.Field(proto.ENUM, number=2, enum=CompressionFormat,)
mime_type = proto.Field(proto.STRING, number=3,)
csv = proto.Field(proto.MESSAGE, number=10, oneof="options", message=CsvOptions,)
json = proto.Field(proto.MESSAGE, number=11, oneof="options", message=JsonOptions,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-dataplex | google/cloud/dataplex_v1/types/metadata_.py | Python | apache-2.0 | 28,857 |
import os
from django.template.loader import render_to_string
from django.test.testcases import SimpleTestCase
from corehq.apps.app_manager.models import Application, Module
from corehq.apps.app_manager.util import generate_xmlns
from corehq.util.test_utils import TestFileMixin
QUESTIONS = [
{
'tag': 'input',
'repeat': None,
'group': None,
'constraintMsg_ref': 'question1-constraintMsg',
'value': '/data/question1',
'hashtagValue': '#form/question1',
'label': 'label en ____ label en',
'label_ref': 'question1-label',
'translations': {
'en': 'label en ____ label en',
'es': 'label es ____\n____\n____',
},
'type': 'Text',
'required': False,
'relevant': ("instance('casedb')/casedb/case[@case_id=instance('casedb')/casedb/case["
"@case_id=instance('commcaresession')/session/data/case_id]/index/parent"
"]/parent_property_1 + 1 + "
"instance('casedb')/casedb/case[@case_id=instance('casedb')/casedb/case["
"@case_id=instance('commcaresession')/session/data/case_id]/index/parent"
"]/parent_property_1"),
'constraint': "1 + instance('casedb')/casedb/case[@case_id=instance('commcaresession')/session/data/case_id]/child_property_1",
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/question2',
'hashtagValue': '#form/question2',
'label': 'label en ____ label en',
'label_ref': 'question2-label',
'translations': {'en': 'label en ____ label en'},
'type': 'Text',
'required': False,
'relevant': None,
'constraint': None,
'comment': "This is a comment",
'setvalue': None,
'is_group': False,
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/question3',
'hashtagValue': '#form/question3',
'label': 'no references here!',
'label_ref': 'question3-label',
'translations': {'en': 'no references here!'},
'type': 'Text',
'required': False,
'relevant': None,
'constraint': None,
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'trigger',
'repeat': None,
'group': None,
'value': '/data/hi',
'hashtagValue': '#form/hi',
'label': 'woo',
'label_ref': 'hi-label',
'translations': {'en': 'woo'},
'type': 'Trigger',
'required': False,
'relevant': None,
'constraint': None,
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'input',
'repeat': '/data/question15',
'group': '/data/question15',
'value': '/data/question15/question16',
'hashtagValue': '#form/question15/question16',
'label': None,
'label_ref': 'question16-label',
'translations': {},
'type': 'Text',
'required': False,
'relevant': None,
'constraint': '1',
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'select1',
'repeat': '/data/question15',
'group': '/data/question15',
'options': [
{
'value': 'item22',
'label': None,
'label_ref': 'question21-item22-label',
'translations': {},
}
],
'value': '/data/question15/question21',
'hashtagValue': '#form/question15/question21',
'label': None,
'label_ref': 'question21-label',
'translations': {},
'type': 'Select',
'required': False,
'relevant': None,
'constraint': None,
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'input',
'repeat': '/data/question15',
'group': '/data/question15',
'value': '/data/question15/question25',
'hashtagValue': '#form/question15/question25',
'label': None,
'label_ref': 'question25-label',
'translations': {},
'type': 'Int',
'required': False,
'relevant': None,
'constraint': None,
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/thing',
'hashtagValue': '#form/thing',
'label': None,
'label_ref': 'thing-label',
'translations': {},
'type': 'Text',
'required': False,
'relevant': None,
'constraint': None,
'comment': None,
'setvalue': None,
'is_group': False,
},
{
'tag': 'hidden',
'repeat': None,
'group': None,
'value': '/data/datanode',
'hashtagValue': '#form/datanode',
'label': '#form/datanode',
'translations': {},
'type': 'DataBindOnly',
'relevant': None,
'calculate': None,
'constraint': None,
'comment': None,
'setvalue': None,
},
]
class GetFormQuestionsTest(SimpleTestCase, TestFileMixin):
domain = 'test-domain'
file_path = ('data',)
root = os.path.dirname(__file__)
maxDiff = None
def setUp(self):
self.app = Application.new_app(self.domain, "Test")
self.app.add_module(Module.new_module("Module", 'en'))
module = self.app.get_module(0)
module.case_type = 'test'
form = self.app.new_form(
module.id,
name="Form",
lang='en',
attachment=self.get_xml('case_in_form').decode('utf-8')
)
form_with_repeats = self.app.new_form(
module.id,
name="Form with repeats",
lang='en',
attachment=self.get_xml('form_with_repeats').decode('utf-8')
)
self.form_unique_id = form.unique_id
self.form_with_repeats_unique_id = form_with_repeats.unique_id
def test_get_questions(self):
form = self.app.get_form(self.form_unique_id)
questions = form.wrapped_xform().get_questions(['en', 'es'], include_translations=True)
non_label_questions = [
q for q in QUESTIONS if q['tag'] not in ('label', 'trigger')]
self.assertEqual(questions, non_label_questions)
def test_get_questions_with_triggers(self):
form = self.app.get_form(self.form_unique_id)
questions = form.wrapped_xform().get_questions(
['en', 'es'], include_triggers=True, include_translations=True)
self.assertEqual(questions, QUESTIONS)
def test_get_questions_with_repeats(self):
"""
This test ensures that questions that start with the repeat group id
do not get marked as repeats. For example:
/data/repeat_name <-- repeat group path
/data/repeat_name_count <-- question path
Before /data/repeat_name_count would be tagged as a repeat incorrectly.
See http://manage.dimagi.com/default.asp?234108 for context
"""
form = self.app.get_form(self.form_with_repeats_unique_id)
questions = form.wrapped_xform().get_questions(
['en'],
include_groups=True,
)
repeat_name_count = list(filter(
lambda question: question['value'] == '/data/repeat_name_count',
questions,
))[0]
self.assertIsNone(repeat_name_count['repeat'])
repeat_question = list(filter(
lambda question: question['value'] == '/data/repeat_name/question5',
questions,
))[0]
self.assertEqual(repeat_question['repeat'], '/data/repeat_name')
def test_blank_form(self):
blank_form = render_to_string("app_manager/blank_form.xml", context={
'xmlns': generate_xmlns(),
})
form = self.app.new_form(self.app.get_module(0).id, 'blank', 'en')
form.source = blank_form
questions = form.get_questions(['en'])
self.assertEqual([], questions)
def test_save_to_case_in_groups(self):
"""Ensure that save to case questions have the correct group and repeat context
when there are no other questions in that group
"""
save_to_case_with_groups = self.app.new_form(
self.app.get_module(0).id,
name="Save to case in groups",
lang='en',
attachment=self.get_xml('save_to_case_in_groups').decode('utf-8')
)
questions = save_to_case_with_groups.get_questions(['en'], include_groups=True, include_triggers=True)
group_question = [q for q in questions if q['value'] == '/data/a_group/save_to_case_in_group/case'][0]
repeat_question = [q for q in questions if q['value'] == '/data/a_repeat/save_to_case_in_repeat/case'][0]
self.assertEqual(group_question['group'], '/data/a_group')
self.assertIsNone(group_question['repeat'])
self.assertEqual(repeat_question['repeat'], '/data/a_repeat')
self.assertEqual(repeat_question['group'], '/data/a_repeat')
def test_fixture_references(self):
form_with_fixtures = self.app.new_form(
self.app.get_module(0).id,
name="Form with Fixtures",
lang='en',
attachment=self.get_xml('form_with_fixtures').decode('utf-8')
)
questions = form_with_fixtures.get_questions(['en'], include_fixtures=True)
self.assertEqual(questions[0], {
"comment": None,
"constraint": None,
"data_source": {
"instance_id": "country",
"instance_ref": "jr://fixture/item-list:country",
"nodeset": "instance('country')/country_list/country",
"label_ref": "name",
"value_ref": "id",
},
"group": None,
"hashtagValue": "#form/lookup-table",
"is_group": False,
"label": "I'm a lookup table!",
"label_ref": "lookup-table-label",
"options": [],
"relevant": None,
"repeat": None,
"required": False,
"setvalue": None,
"tag": "select1",
"type": "Select",
"value": "/data/lookup-table"
})
| dimagi/commcare-hq | corehq/apps/app_manager/tests/test_get_questions.py | Python | bsd-3-clause | 10,624 |
"""
Class for encoding variable-length flanking and peptides to
fixed-size numerical matrices
"""
from __future__ import (
print_function, division, absolute_import, )
from six import string_types
from collections import namedtuple
import logging
from .encodable_sequences import EncodingError, EncodableSequences
import numpy
import pandas
EncodingResult = namedtuple(
"EncodingResult", ["array", "peptide_lengths"])
class FlankingEncoding(object):
"""
Encode peptides and optionally their N- and C-flanking sequences into fixed
size numerical matrices. Similar to EncodableSequences but with support
for flanking sequences and the encoding scheme used by the processing
predictor.
Instances of this class have an immutable list of peptides with
flanking sequences. Encodings are cached in the instances for faster
performance when the same set of peptides needs to encoded more than once.
"""
unknown_character = "X"
def __init__(self, peptides, n_flanks, c_flanks):
"""
Constructor. Sequences of any lengths can be passed.
Parameters
----------
peptides : list of string
Peptide sequences
n_flanks : list of string [same length as peptides]
Upstream sequences
c_flanks : list of string [same length as peptides]
Downstream sequences
"""
self.dataframe = pandas.DataFrame({
"peptide": peptides,
"n_flank": n_flanks,
"c_flank": c_flanks,
}, dtype=str)
self.encoding_cache = {}
def __len__(self):
"""
Number of peptides.
"""
return len(self.dataframe)
def vector_encode(
self,
vector_encoding_name,
peptide_max_length,
n_flank_length,
c_flank_length,
allow_unsupported_amino_acids=True,
throw=True):
"""
Encode variable-length sequences to a fixed-size matrix.
Parameters
----------
vector_encoding_name : string
How to represent amino acids. One of "BLOSUM62", "one-hot", etc.
See `amino_acid.available_vector_encodings()`.
peptide_max_length : int
Maximum supported peptide length.
n_flank_length : int
Maximum supported N-flank length
c_flank_length : int
Maximum supported C-flank length
allow_unsupported_amino_acids : bool
If True, non-canonical amino acids will be replaced with the X
character before encoding.
throw : bool
Whether to raise exception on unsupported peptides
Returns
-------
numpy.array with shape (num sequences, length, m)
where
- num sequences is number of peptides, i.e. len(self)
- length is peptide_max_length + n_flank_length + c_flank_length
- m is the vector encoding length (usually 21).
"""
cache_key = (
"vector_encode",
vector_encoding_name,
peptide_max_length,
n_flank_length,
c_flank_length,
allow_unsupported_amino_acids,
throw)
if cache_key not in self.encoding_cache:
result = self.encode(
vector_encoding_name=vector_encoding_name,
df=self.dataframe,
peptide_max_length=peptide_max_length,
n_flank_length=n_flank_length,
c_flank_length=c_flank_length,
allow_unsupported_amino_acids=allow_unsupported_amino_acids,
throw=throw)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
@staticmethod
def encode(
vector_encoding_name,
df,
peptide_max_length,
n_flank_length,
c_flank_length,
allow_unsupported_amino_acids=False,
throw=True):
"""
Encode variable-length sequences to a fixed-size matrix.
Helper function. Users should use `vector_encode`.
Parameters
----------
vector_encoding_name : string
df : pandas.DataFrame
peptide_max_length : int
n_flank_length : int
c_flank_length : int
allow_unsupported_amino_acids : bool
throw : bool
Returns
-------
numpy.array
"""
error_df = df.loc[
(df.peptide.str.len() > peptide_max_length) |
(df.peptide.str.len() < 1)
]
if len(error_df) > 0:
message = (
"Sequence '%s' (length %d) unsupported. There are %d "
"total peptides with this length." % (
error_df.iloc[0].peptide,
len(error_df.iloc[0].peptide),
len(error_df)))
if throw:
raise EncodingError(
message,
supported_peptide_lengths=(1, peptide_max_length + 1))
logging.warning(message)
# Replace invalid peptides with X's. The encoding will be set to
# NaNs for these peptides farther below.
df.loc[error_df.index, "peptide"] = "X" * peptide_max_length
if n_flank_length > 0:
n_flanks = df.n_flank.str.pad(
n_flank_length,
side="left",
fillchar="X").str.slice(-n_flank_length).str.upper()
else:
n_flanks = pandas.Series([""] * len(df))
c_flanks = df.c_flank.str.pad(
c_flank_length,
side="right",
fillchar="X").str.slice(0, c_flank_length).str.upper()
peptides = df.peptide.str.upper()
concatenated = n_flanks + peptides + c_flanks
encoder = EncodableSequences.create(concatenated.values)
array = encoder.variable_length_to_fixed_length_vector_encoding(
vector_encoding_name=vector_encoding_name,
alignment_method="right_pad",
max_length=n_flank_length + peptide_max_length + c_flank_length,
allow_unsupported_amino_acids=allow_unsupported_amino_acids)
array = array.astype("float32") # So NaNs can be used.
if len(error_df) > 0:
array[error_df.index] = numpy.nan
result = EncodingResult(
array, peptide_lengths=peptides.str.len().values)
return result
| hammerlab/mhcflurry | mhcflurry/flanking_encoding.py | Python | apache-2.0 | 6,536 |
from backend import BackendBase, registry
class HaskellBackend(BackendBase):
def compile(self, argv=None, env={}):
if not argv: argv = ['ghc', 'main.hs']
return self.popen(argv, env)
def run(self, argv=None, env={}):
if not argv: argv = ['./main']
return self.popen(argv, env)
registry.append({
'name': 'haskell',
'class': HaskellBackend,
'description': 'the purely-functional programming language'
})
| 4poc/rccvm | rccvmd/backends/haskell.py | Python | apache-2.0 | 459 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiHow Datasets."""
import csv
import os
import re
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@misc{koupaee2018wikihow,
title={WikiHow: A Large Scale Text Summarization Dataset},
author={Mahnaz Koupaee and William Yang Wang},
year={2018},
eprint={1810.09305},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
WikiHow is a new large-scale dataset using the online WikiHow
(http://www.wikihow.com/) knowledge base.
There are two features:
- text: wikihow answers texts.
- headline: bold lines as summary.
There are two separate versions:
- all: consisting of the concatenation of all paragraphs as the articles and
the bold lines as the reference summaries.
- sep: consisting of each paragraph and its summary.
Download "wikihowAll.csv" and "wikihowSep.csv" from
https://github.com/mahnazkoupaee/WikiHow-Dataset and place them in manual folder
https://www.tensorflow.org/datasets/api_docs/python/tfds/download/DownloadConfig.
Train/validation/test splits are provided by the authors.
Preprocessing is applied to remove short articles
(abstract length < 0.75 article length) and clean up extra commas.
"""
_DOCUMENT = "text"
_SUMMARY = "headline"
_URLS = {
"train":
"https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_train.txt",
"validation":
"https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_val.txt",
"test":
"https://raw.githubusercontent.com/mahnazkoupaee/WikiHow-Dataset/master/all_test.txt"
}
class WikihowConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Wikihow."""
def __init__(self, *, filename=None, **kwargs):
"""BuilderConfig for Wikihow.
Args:
filename: filename of different configs for the dataset.
**kwargs: keyword arguments forwarded to super.
"""
# Version 1.1.0 remove empty document and summary strings.
# Version 1.2.0 add train validation test split, add cleaning & filtering.
super(WikihowConfig, self).__init__(
version=tfds.core.Version("1.2.0"), **kwargs)
self.filename = filename
class Wikihow(tfds.core.GeneratorBasedBuilder):
"""WikiHow: A Large Scale Text Summarization Dataset."""
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Links to files can be found on https://github.com/mahnazkoupaee/WikiHow-Dataset
Please download both wikihowAll.csv and wikihowSep.csv.
"""
BUILDER_CONFIGS = [
WikihowConfig(
name="all",
filename="wikihowAll.csv",
description="Use the concatenation of all paragraphs as the articles"
" and the bold lines as the reference summaries"),
WikihowConfig(
name="sep",
filename="wikihowSep.csv",
description="use each paragraph and its summary.")
]
def _info(self):
feature_names = [_DOCUMENT, _SUMMARY, "title"]
if self.builder_config.name == "sep":
feature_names.extend(["overview", "sectionLabel"])
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{k: tfds.features.Text() for k in feature_names}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/mahnazkoupaee/WikiHow-Dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download(_URLS)
titles = {k: set() for k in dl_path}
for k, path in dl_path.items():
with tf.io.gfile.GFile(path) as f:
for line in f:
titles[k].add(line.strip())
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"path":
os.path.join(dl_manager.manual_dir,
self.builder_config.filename),
"title_set":
titles["train"],
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"path":
os.path.join(dl_manager.manual_dir,
self.builder_config.filename),
"title_set":
titles["validation"],
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"path":
os.path.join(dl_manager.manual_dir,
self.builder_config.filename),
"title_set":
titles["test"],
},
)
]
def _generate_examples(self, path=None, title_set=None):
"""Yields examples."""
with tf.io.gfile.GFile(path) as f:
reader = csv.reader(f)
headers = next(reader)
if self.builder_config.name == "all" and headers != [
"headline", "title", "text"
]:
raise ValueError("Mismatched header in WikiAll.txt")
if self.builder_config.name == "sep" and headers != [
"overview", "headline", "text", "sectionLabel", "title"
]:
raise ValueError("Mismatched header in WikiSep.txt")
key2id = {key: i for i, key in enumerate(headers)}
for i, line in enumerate(reader):
# skip empty line or insufficient line.
if len(line) == len(key2id):
summary = line[key2id[_SUMMARY]].strip()
document = line[key2id[_DOCUMENT]].strip()
summary, document = _filter_and_clean(summary, document)
if summary and document:
if line[key2id["title"]].strip().replace(" ", "") in title_set:
d = {
k: line[v].strip()
for k, v in key2id.items()
if k not in [_SUMMARY, _DOCUMENT]
}
d[_DOCUMENT] = document
d[_SUMMARY] = summary
yield i, d
# This functions follow data processing acoording to original paper at
# https://github.com/mahnazkoupaee/WikiHow-Dataset/blob/master/process.py
def _filter_and_clean(abstract, article):
"""Remove short article and clean up commas in abstract and article."""
# a threshold is used to remove short articles with long summaries
# as well as articles with no summary
if len(abstract) < (0.75 * len(article)):
# remove extra commas in abstracts
abstract = abstract.replace(".,", ".")
# remove extra commas in articles
article = re.sub(r"[.]+[\n]+[,]", ".\n", article)
return abstract, article
else:
return "", ""
| tensorflow/datasets | tensorflow_datasets/summarization/wikihow.py | Python | apache-2.0 | 7,225 |
__author__ = 'gulce'
import sys
from Airline import *
def canRedeem(current, goal, pathForMiles, airlinesVisited, network):
if(current == goal):
pathForMiles.append(current)
return True
elif(airlinesVisited.count(current)!=0):
return False
else:
airlinesVisited.append(current)
pathForMiles.append(current)
pos = -1
index = 0
while (pos == -1 and index < len(network)):
if(network[index].getName() == current):
pos = index
index += 1
if(pos == -1):
return False
index = 0
partners = network[pos].getPartners()
foundPath = False
while (not foundPath and index < len(partners)):
foundPath = canRedeem(partners[index], goal, pathForMiles, airlinesVisited, network)
index += 1
if( not foundPath ):
pathForMiles.remove(pathForMiles[len(pathForMiles)-1])
return foundPath
try:
scannerToReadAirlines = open("airlines.txt","r")
except:
print ("Could not connect to file airlines.txt")
sys.exit(0)
if (scannerToReadAirlines != None):
airlinesPartnersNetwork = []
airlinesPartnersNetworkpr = []
for line in scannerToReadAirlines:
lineFromFile = line.strip("\n")
airlineNames = lineFromFile.split(",")
newAirline = Airline(airlineNames)
airlinesPartnersNetwork.append(newAirline)
airlinesPartnersNetworkpr.append(newAirline.toString())
print(airlinesPartnersNetworkpr)
start = input("Enter airline miles are on: ")
goal = input("Enter goal airline: ")
pathForMiles = []
airlinesVisited = []
if( canRedeem(start, goal, pathForMiles, airlinesVisited, airlinesPartnersNetwork)):
print("Path to redeem miles: ", pathForMiles)
else:
print("Cannot convert miles from ", start, " to ", goal, ".");
scannerToReadAirlines.close()
| gulcegunal/dagitik | odev02/airlineproblem.py | Python | gpl-2.0 | 2,128 |
# test builtin slice
# print slice
class A:
def __getitem__(self, idx):
print(idx)
A()[1:2:3]
| utopiaprince/micropython | tests/basics/builtin_slice.py | Python | mit | 107 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests lesson 03 task 12."""
# Import Python libs
import unittest
from decimal import Decimal
from fractions import Fraction
# Import student file
import task_12
class L03T12TestCase(unittest.TestCase):
"""
Tests for lesson 03 task 12.
"""
def test_intval(self):
"""
Tests that the intval variable has the correct value.
"""
self.assertIs(task_12.INTVAL, 1)
def test_floatval(self):
"""
Tests that the floatval variable has the correct value.
"""
self.assertEqual(task_12.FLOATVAL, 0.1)
def test_decval(self):
"""
Tests that the decval variable has the correct value.
"""
self.assertEqual(task_12.DECVAL, Decimal('0.1'))
def test_fracval(self):
"""
Tests that the fracval variable has the correct value.
"""
self.assertEqual(task_12.FRACVAL, Fraction(1, 10))
if __name__ == '__main__':
unittest.main()
| gracehyemin/is210-week-03-warmup | tests/test_task_12.py | Python | mpl-2.0 | 1,021 |
import os
import sys
import re
import numpy as np
import logging
import struct
import random
import math
import wave
import shutil
import collections
import functools
import operator
import heapq
from collections import namedtuple
from contextlib import contextmanager
# special vocabulary symbols
_BOS = '<S>'
_EOS = '</S>'
_UNK = '<UNK>'
_KEEP = '<KEEP>'
_DEL = '<DEL>'
_INS = '<INS>'
_SUB = '<SUB>'
_NONE = '<NONE>'
_START_VOCAB = [_BOS, _EOS, _UNK, _KEEP, _DEL, _INS, _SUB, _NONE]
BOS_ID = 0
EOS_ID = 1
UNK_ID = 2
KEEP_ID = 3
DEL_ID = 4
INS_ID = 5
SUB_ID = 6
NONE_ID = 7
class FinishedTrainingException(Exception):
def __init__(self):
debug('finished training')
class CheckpointException(Exception):
pass
class EvalException(Exception):
pass
@contextmanager
def open_files(names, mode='r'):
""" Safely open a list of files in a context manager.
Example:
>>> with open_files(['foo.txt', 'bar.csv']) as (f1, f2):
... pass
"""
files = []
try:
for name_ in names:
if name_ is None:
file_ = sys.stdin if 'r' in mode else sys.stdout
else:
file_ = open(name_, mode=mode)
files.append(file_)
yield files
finally:
for file_ in files:
file_.close()
class AttrDict(dict):
"""
Dictionary whose keys can be accessed as attributes.
Example:
>>> d = AttrDict(x=1, y=2)
>>> d.x
1
>>> d.y = 3
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self # dark magic
def __getattr__(self, item):
return self.__dict__.get(item)
def reverse_edits(source, edits, fix=True, strict=False):
if len(edits) == 1: # transform list of edits as a list of (op, word) tuples
edits = edits[0]
for i, edit in enumerate(edits):
if edit in (_KEEP, _DEL, _INS, _SUB):
edit = (edit, edit)
elif edit.startswith(_INS + '_'):
edit = (_INS, edit[len(_INS + '_'):])
elif edit.startswith(_SUB + '_'):
edit = (_SUB, edit[len(_SUB + '_'):])
else:
edit = (_INS, edit)
edits[i] = edit
else:
edits = zip(*edits)
src_words = source
target = []
consistent = True
i = 0
for op, word in edits:
if strict and not consistent:
break
if op in (_DEL, _KEEP, _SUB):
if i >= len(src_words):
consistent = False
continue
if op == _KEEP:
target.append(src_words[i])
elif op == _SUB:
target.append(word)
i += 1
else: # op is INS
target.append(word)
if fix:
target += src_words[i:]
return target
def initialize_vocabulary(vocabulary_path):
"""
Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {'dog': 0, 'cat': 1}, and a reversed vocabulary ['dog', 'cat'].
:param vocabulary_path: path to the file containing the vocabulary.
:return:
the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
"""
if os.path.exists(vocabulary_path):
rev_vocab = []
with open(vocabulary_path) as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.rstrip('\n') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return namedtuple('vocab', 'vocab reverse')(vocab, rev_vocab)
else:
raise ValueError("vocabulary file %s not found", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, character_level=False):
"""
Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
:param sentence: a string, the sentence to convert to token-ids
:param vocabulary: a dictionary mapping tokens to integers
:param character_level: treat sentence as a string of characters, and
not as a string of words
:return: a list of integers, the token-ids for the sentence.
"""
sentence = sentence.rstrip('\n') if character_level else sentence.split()
return [vocabulary.get(w, UNK_ID) for w in sentence]
def get_filenames(data_dir, model_dir, extensions, train_prefix, dev_prefix, vocab_prefix, name=None,
ref_ext=None, binary=None, decode=None, eval=None, align=None, **kwargs):
"""
Get a bunch of file prefixes and extensions, and output the list of filenames to be used
by the model.
:param data_dir: directory where all the the data is stored
:param extensions: list of file extensions, in the right order (last extension is always the target)
:param train_prefix: name of the training corpus (usually 'train')
:param dev_prefix: name of the dev corpus (usually 'dev')
:param vocab_prefix: prefix of the vocab files (usually 'vocab')
:param kwargs: optional contains an additional 'decode', 'eval' or 'align' parameter
:return: namedtuple containing the filenames
"""
train_path = os.path.join(data_dir, train_prefix)
dev_path = [os.path.join(data_dir, prefix) for prefix in dev_prefix]
train = ['{}.{}'.format(train_path, ext) for ext in extensions]
dev_extensions = list(extensions)
if ref_ext is not None and ref_ext != extensions[-1]:
dev_extensions.append(ref_ext)
dev = [['{}.{}'.format(path, ext) for ext in dev_extensions] for path in dev_path]
vocab_path = os.path.join(data_dir, vocab_prefix)
vocab_src = ['{}.{}'.format(vocab_path, ext) for ext in extensions]
data = 'data' if name is None else 'data_{}'.format(name)
vocab_path = os.path.join(model_dir, data, 'vocab')
vocab = ['{}.{}'.format(vocab_path, ext) for ext in extensions]
os.makedirs(os.path.dirname(vocab_path), exist_ok=True)
binary = binary or [False] * len(vocab)
for src, dest, binary_ in zip(vocab_src, vocab, binary):
if not binary_ and not os.path.exists(dest):
debug('copying vocab to {}'.format(dest))
shutil.copy(src, dest)
exts = list(extensions)
if decode is not None: # empty list means we decode from standard input
test = decode
exts.pop(-1)
elif eval is not None:
if ref_ext is not None:
exts[-1] = ref_ext
test = eval or dev_prefix[:1]
else:
test = align or dev_prefix[:1]
if len(test) == 1 and not (decode and os.path.exists(test[0])):
corpus_path = os.path.join(data_dir, test[0]) if not os.path.dirname(test[0]) else test[0]
test = ['{}.{}'.format(corpus_path, ext) for ext in exts]
filenames = namedtuple('filenames', ['train', 'dev', 'test', 'vocab'])
return filenames(train, dev, test, vocab)
def read_dataset(paths, extensions, vocabs, max_size=None, character_level=None, sort_by_length=False,
max_seq_len=None, from_position=None, binary=None):
data_set = []
if from_position is not None:
debug('reading from position: {}'.format(from_position))
line_reader = read_lines_from_position(paths, from_position=from_position, binary=binary)
character_level = character_level or {}
positions = None
for inputs, positions in line_reader:
if len(data_set) > 0 and len(data_set) % 100000 == 0:
debug(" lines read: {}".format(len(data_set)))
lines = [
input_ if binary_ else
sentence_to_token_ids(input_, vocab.vocab, character_level=character_level.get(ext))
for input_, vocab, binary_, ext in zip(inputs, vocabs, binary, extensions)
]
if not all(lines): # skip empty inputs
continue
# skip lines that are too long
if max_seq_len and any(len(line) > max_seq_len[ext] for line, ext in zip(lines, extensions)):
continue
data_set.append(lines)
if max_size and len(data_set) >= max_size:
break
debug('files: {}'.format(' '.join(paths)))
debug('lines reads: {}'.format(len(data_set)))
if sort_by_length:
data_set.sort(key=lambda lines: list(map(len, lines)))
return data_set, positions
def random_batch_iterator(data, batch_size):
"""
The most basic form of batch iterator.
:param data: the dataset to segment into batches
:param batch_size: the size of a batch
:return: an iterator which yields random batches (indefinitely)
"""
while True:
yield random.sample(data, batch_size)
def basic_batch_iterator(data, batch_size, shuffle=False, allow_smaller=True):
if shuffle:
random.shuffle(data)
batch_count = len(data) // batch_size
if allow_smaller and batch_count * batch_size < len(data):
batch_count += 1
for i in range(batch_count):
yield data[i * batch_size:(i + 1) * batch_size]
def cycling_batch_iterator(data, batch_size, shuffle=True, allow_smaller=True):
"""
Indefinitely cycle through a dataset and yield batches (the dataset is shuffled
at each new epoch)
:param data: the dataset to segment into batches
:param batch_size: the size of a batch
:return: an iterator which yields batches (indefinitely)
"""
while True:
iterator = basic_batch_iterator(data, batch_size, shuffle=shuffle, allow_smaller=allow_smaller)
for batch in iterator:
yield batch
def read_ahead_batch_iterator(data, batch_size, read_ahead=10, shuffle=True, allow_smaller=True,
mode='standard', cycle=True, crash_test=False, **kwargs):
"""
Same iterator as `cycling_batch_iterator`, except that it reads a number of batches
at once, and sorts their content according to their size.
This is useful for training, where all the sequences in one batch need to be padded
to the same length as the longest sequence in the batch.
:param data: the dataset to segment into batches
:param batch_size: the size of a batch
:param read_ahead: number of batches to read ahead of time and sort (larger numbers
mean faster training, but less random behavior)
:return: an iterator which yields batches (indefinitely)
"""
if not cycle:
iterator = basic_batch_iterator(data, batch_size, shuffle=shuffle, allow_smaller=allow_smaller)
elif mode == 'random':
iterator = random_batch_iterator(data, batch_size)
else:
iterator = cycling_batch_iterator(data, batch_size, shuffle=shuffle, allow_smaller=allow_smaller)
if crash_test:
n = batch_size // 2
dummy_batch = heapq.nlargest(n, data, key=lambda p: len(p[0]))
dummy_batch += heapq.nlargest(batch_size - n, data, key=lambda p: len(p[1]))
while True:
yield dummy_batch
if read_ahead is None or read_ahead <= 1:
yield from iterator
while True:
batches = []
for batch in iterator:
batches.append(batch)
if len(batches) >= read_ahead:
break
data_ = sorted(sum(batches, []), key=lambda lines: len(lines[-1]))
batches = [data_[i * batch_size:(i + 1) * batch_size] for i in range(read_ahead)]
batches = [batch for batch in batches if batch] # filter empty batches
if not any(batches):
break
if shuffle: # TODO: enable shuffling here without epoch shuffling
random.shuffle(batches)
for batch in batches:
yield batch
def get_batch_iterator(paths, extensions, vocabs, batch_size, max_size=None, character_level=None,
sort_by_length=False, max_seq_len=None, read_ahead=10, shuffle=True,
binary=None, mode='standard', crash_test=False):
read_shard = functools.partial(read_dataset,
paths=paths, extensions=extensions, vocabs=vocabs, max_size=max_size, max_seq_len=max_seq_len,
character_level=character_level, sort_by_length=sort_by_length, binary=binary)
batch_iterator = functools.partial(read_ahead_batch_iterator, batch_size=batch_size, read_ahead=read_ahead,
shuffle=shuffle, mode=mode, crash_test=crash_test)
# FIXME: crash test only for first shard
with open(paths[-1]) as f: # count lines
line_count = sum(1 for _ in f)
debug('total line count: {}'.format(line_count))
shard, position = read_shard()
if not max_size or line_count <= max_size:
# training set is small enough to fit entirely into memory (single shard)
return batch_iterator(shard), line_count
else:
batch_iterator = functools.partial(batch_iterator, cycle=False)
def generator(position, shard):
while True:
if len(shard) < max_size:
# last shard, start again from the beginning of the dataset
position = None
size = 0
for batch in batch_iterator(shard):
size += len(batch)
yield batch
if size >= len(shard): # cycle through this shard only once, then read next shard
shard, position = read_shard(from_position=position)
break
return generator(position, shard), line_count
def get_batches(data, batch_size, batches=0, allow_smaller=True):
"""
Segment `data` into a given number of fixed-size batches. The dataset is automatically shuffled.
This function is for smaller datasets, when you need access to the entire dataset at once (e.g. dev set).
For larger (training) datasets, where you may want to lazily iterate over batches
and cycle several times through the entire dataset, prefer batch iterators
(such as `cycling_batch_iterator`).
:param data: the dataset to segment into batches (a list of data points)
:param batch_size: the size of a batch
:param batches: number of batches to return (0 for the largest possible number)
:param allow_smaller: allow the last batch to be smaller
:return: a list of batches (which are lists of `batch_size` data points)
"""
if not allow_smaller:
max_batches = len(data) // batch_size
else:
max_batches = int(math.ceil(len(data) / batch_size))
if batches < 1 or batches > max_batches:
batches = max_batches
random.shuffle(data)
batches = [data[i * batch_size:(i + 1) * batch_size] for i in range(batches)]
return batches
def read_binary_features(filename, from_position=None):
"""
Reads a binary file containing vector features. First two (int32) numbers correspond to
number of entries (lines), and dimension of the vectors.
Each entry is a numpy array of shape (nframes, dim).
Use `scripts/speech/extract-audio-features.py` or `scripts/speech/extract.py` to create such a file for audio (MFCCs).
:param filename: path to the binary file containing the features
:return: list of arrays of shape (frames, dimension)
"""
with open(filename, 'rb') as f:
lines, dim = np.load(f)
if from_position is not None:
f.seek(from_position)
for _ in range(lines):
try:
feats = np.load(f)
yield list(feats), f.tell()
except OSError:
pass
def read_lines(paths, binary=None):
binary = binary or [False] * len(paths)
return zip(*[sys.stdin if path is None else
map(operator.itemgetter(0), read_binary_features(path)) if binary_
else open(path)
for path, binary_ in zip(paths, binary)])
def read_text_from_position(filename, from_position=None):
with open(filename) as f:
if from_position is not None:
f.seek(from_position)
while True:
line = f.readline()
if not line:
break
yield line, f.tell()
def read_lines_from_position(paths, from_position=None, binary=None):
binary = binary or [False] * len(paths)
from_position = from_position or [None] * len(paths)
iterators = [
read_binary_features(path, from_position_) if binary_ else
read_text_from_position(path, from_position_)
for path, binary_, from_position_ in zip(paths, binary, from_position)
]
for data in zip(*iterators):
yield tuple(zip(*data))
def create_logger(log_file=None):
"""
Initialize global logger and return it.
:param log_file: log to this file, or to standard output if None
:return: created logger
"""
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S')
if log_file is not None:
os.makedirs(os.path.dirname(log_file), exist_ok=True)
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
return logger
def log(msg, level=logging.INFO):
logging.getLogger(__name__).log(level, msg)
def debug(msg): log(msg, level=logging.DEBUG)
def warn(msg): log(msg, level=logging.WARN)
def heatmap(xlabels=None, ylabels=None, weights=None, output_file=None, reverse=False):
"""
Draw a heatmap showing the alignment between two sequences.
:param xlabels: input words
:param ylabels: output words
:param weights: numpy array of shape (len(xlabels), len(ylabels))
:param output_file: write the figure to this file, or show it into a window if None
"""
import matplotlib
from matplotlib import pyplot as plt
if reverse and not ylabels:
matplotlib.rcParams.update({'font.size': 18})
def prettify(token):
token_mapping = {
'"': '"',
''': '\'',
'&': '&',
'@@': '_'
}
for x, y in token_mapping.items():
token = token.replace(x, y)
return token
xlabels = xlabels or []
ylabels = ylabels or []
xlabels = list(map(prettify, xlabels))
ylabels = list(map(prettify, ylabels))
if reverse:
xlabels, ylabels = ylabels, xlabels
weights = weights.T
fig, ax = plt.subplots()
plt.autoscale(enable=True, axis='x', tight=True)
#ax.pcolor(weights, cmap=plt.cm.Greys)
ax.pcolor(weights, cmap=plt.cm.Greys)
ax.set_frame_on(False)
# plt.colorbar(mappable=heatmap_)
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(weights.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(weights.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(xlabels, minor=False)
ax.set_yticklabels(ylabels, minor=False)
ax.tick_params(axis='both', which='both', length=0)
if not reverse:
plt.xticks(rotation=90)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.subplots_adjust(wspace=0, hspace=0)
if not reverse or ylabels:
plt.tight_layout()
ax.set_aspect('equal')
ax.grid(True)
xsize = max(2.0 + len(xlabels) / 3, 8.0)
ysize = max(2.0 + len(ylabels) / 3, 8.0)
fig.set_size_inches(xsize, ysize, forward=True)
if output_file is None:
plt.show()
else:
plt.savefig(output_file, bbox_inches='tight')
def alignment_to_text(xlabels=None, ylabels=None, weights=None, output_file=None):
"""
:param xlabels: input words
:param ylabels: output words
:param weights: numpy array of shape (len(xlabels), len(ylabels))
:param output_file: write the matrix in this file
"""
with open(output_file.replace('svg', 'txt').replace('jpg', 'txt'), 'w') as output_file:
output_file.write(' \t' + '\t'.join(xlabels) + '\n')
for i in range(len(ylabels)):
output_file.write(ylabels[i])
for j in range(len(xlabels)):
output_file.write('\t' + str(weights[i][j]))
output_file.write('\n')
| eske/seq2seq | translate/utils.py | Python | apache-2.0 | 20,579 |
from django.contrib import admin
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from waldur_core.core import admin as core_admin
from . import models, tasks
class ProfileAdmin(core_admin.ExtraActionsMixin, admin.ModelAdmin):
list_display = ('username', 'user', 'is_active', 'agreement_date')
readonly_fields = ('username', 'user', 'is_active', 'agreement_date')
list_filter = ('is_active',)
search_fields = ('username',)
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
if request.user.is_staff:
return True
return False
def get_extra_actions(self):
return [
self.sync_groups,
self.sync_names,
self.sync_gecos,
]
def sync_groups(self, request):
tasks.schedule_sync()
self.message_user(request, _('Groups synchronization has been scheduled.'))
return redirect(reverse('admin:waldur_freeipa_profile_changelist'))
def sync_names(self, request):
tasks.schedule_sync_names()
self.message_user(request, _('Names synchronization has been scheduled.'))
return redirect(reverse('admin:waldur_freeipa_profile_changelist'))
def sync_gecos(self, request):
tasks.schedule_sync_gecos()
self.message_user(request, _('GECOS synchronization has been scheduled.'))
return redirect(reverse('admin:waldur_freeipa_profile_changelist'))
admin.site.register(models.Profile, ProfileAdmin)
| opennode/nodeconductor-assembly-waldur | src/waldur_freeipa/admin.py | Python | mit | 1,621 |
from __future__ import absolute_import
import csv
import functools
import logging
import os
import sys
import tempfile
import warnings
from pip._vendor import pkg_resources
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.compat import get_stdlib
from pip.exceptions import UninstallationError
from pip.locations import (
bin_py, bin_user,
)
from pip.utils import (
rmtree, ask, dist_in_usersite, is_local,
egg_link_path, FakeFile,
renames, normalize_path, dist_is_local,
)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
def _script_names(dist, script_name, is_gui):
"""Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
"""
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
@classmethod
def from_dist(cls, dist):
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
return cls(dist)
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
return cls(dist)
paths_to_remove = cls(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(
dist.project_name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, dist.project_name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
_scripts_to_remove = []
console_scripts = dist.get_entry_map(group='console_scripts')
for name in console_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, False))
# find gui_scripts
gui_scripts = dist.get_entry_map(group='gui_scripts')
for name in gui_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, True))
for s in _scripts_to_remove:
paths_to_remove.add(s)
return paths_to_remove
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| sigmavirus24/pip | pip/req/req_uninstall.py | Python | mit | 14,576 |
from __future__ import absolute_import
from sentry import tsdb, ratelimits
from sentry.api.serializers import serialize
from sentry.plugins.base import Plugin
from sentry.plugins.base.configuration import react_plugin_config
from sentry.plugins.status import PluginStatus
class DataForwardingPlugin(Plugin):
status = PluginStatus.BETA
def configure(self, project, request):
return react_plugin_config(self, project, request)
def has_project_conf(self):
return True
def get_rate_limit(self):
# number of requests, number of seconds (window)
return (50, 1)
def forward_event(self, payload):
"""
Forward the event and return a boolean if it was successful.
"""
raise NotImplementedError
def get_event_payload(self, event):
return serialize(event)
def get_plugin_type(self):
return 'data-forwarding'
def post_process(self, event, **kwargs):
rl_key = '{}:{}'.format(
self.conf_key,
event.project.organization_id,
)
# limit segment to 50 requests/second
limit, window = self.get_rate_limit()
if limit and window and ratelimits.is_limited(rl_key, limit=limit, window=window):
return
payload = self.get_event_payload(event)
success = self.forward_event(event, payload)
if success is False:
# TODO(dcramer): record failure
pass
tsdb.incr(tsdb.models.project_total_forwarded, event.project.id, count=1)
| JackDanger/sentry | src/sentry/plugins/bases/data_forwarding.py | Python | bsd-3-clause | 1,548 |
# Copyright (c) 2012 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Utilities to make logging consistent and easy for WSGI-style requests and
responses as well as more general HTTP logs.
"""
from __future__ import absolute_import
from functools import partial
from .log import logline
__all__ = ['HttpLogger']
class HttpLogger(object):
"""Provides a limited set of log methods that :mod:`slimta` packages may
use. This prevents free-form logs from mixing in with standard, machine-
parseable logs.
:param log: :py:class:`logging.Logger` object to log through.
"""
def __init__(self, log):
self.log = partial(logline, log.debug, 'http')
def _get_method_from_environ(self, environ):
return environ['REQUEST_METHOD'].upper()
def _get_path_from_environ(self, environ):
return environ.get('PATH_INFO', None)
def _get_headers_from_environ(self, environ):
ret = []
for key, value in environ.items():
if key == 'CONTENT_TYPE':
ret.append(('Content-Type', value))
elif key == 'CONTENT_LENGTH':
ret.append(('Content-Length', value))
elif key.startswith('HTTP_'):
parts = key.split('_')
name = '-'.join([part.capitalize() for part in parts[1:]])
ret.append((name, value))
return ret
def wsgi_request(self, environ):
"""Logs a WSGI-style request. This method pulls the appropriate info
from ``environ`` and passes it to :meth:`.request`.
:param environ: The environment data.
"""
method = self._get_method_from_environ(environ)
path = self._get_path_from_environ(environ)
headers = self._get_headers_from_environ(environ)
self.request(environ, method, path, headers, is_client=False)
def wsgi_response(self, environ, status, headers):
"""Logs a WSGI-style response. This method passes its given info along
to :meth:`.response`.
:param environ: The environment data.
:param status: The status line given to the client, e.g.
``404 Not Found``.
:param headers: The headers returned in the response.
"""
self.response(environ, status, headers, is_client=False)
def request(self, conn, method, path, headers, is_client=True):
"""Logs an HTTP request.
:param conn: The same object should be passed in this parameter to both
this method and to its corresponding :meth:`.response`.
There are no constraints on its type or value.
:type conn: :py:class:`object`
:param method: The request method string.
:param path: The path string.
:param headers: A list of ``(name, value)`` header tuples given in the
request.
:param is_client: Whether or not the log line should be identified as a
client- or server-side request.
:type is_client: :py:class:`bool`
"""
type = 'client_request' if is_client else 'server_request'
self.log(id(conn), type, method=method, path=path, headers=headers)
def response(self, conn, status, headers, is_client=True):
"""Logs an HTTP response.
:param conn: The same object should be passed in this parameter to both
this method and to its corresponding :meth:`.request`.
There are no constraints on its type or value.
:type conn: :py:class:`object`
:param status: The status string of the response, e.g. ``200 OK``.
:param headers: A list of ``(name, value)`` header tuples given in the
response.
:param is_client: Whether or not the log line should be identified as a
client- or server-side request.
:type is_client: :py:class:`bool`
"""
type = 'client_response' if is_client else 'server_response'
self.log(id(conn), type, status=status, headers=headers)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| slimta/python-slimta | slimta/logging/http.py | Python | mit | 5,161 |
from django.db import models
from simple_history.models import HistoricalRecords
from django.core.exceptions import ValidationError
from adminsortable.models import SortableMixin
from academics.models import Enrollment, Grade
# Create your models here.
class Ethnicity(models.Model):
ethnicity = models.CharField(max_length=200)
history = HistoricalRecords()
class Meta:
ordering = ['ethnicity']
def __str__(self):
return self.ethnicity
class SeatingStudent(models.Model):
ALLERGYCHOICES = (('', 'No Allergies'), ('ALLERGY', 'Allergy'), ('EPIPEN', 'Allergy (EpiPen)'))
enrollment = models.ForeignKey(Enrollment)
ethnicity = models.ForeignKey(Ethnicity, null=True, blank=True)
food_allergy = models.CharField(max_length=max([len(a[0]) for a in ALLERGYCHOICES]),
choices=ALLERGYCHOICES, verbose_name="Food allergy status", default="", blank=True)
#Some properties so I don't have to dig 8 levels in to get basic information,
#also to make porting easier
@property
def first_name(self):
if self.enrollment.student.nickname:
return self.enrollment.student.nickname
return self.enrollment.student.first_name
@property
def last_name(self):
return self.enrollment.student.last_name
@property
def gender(self):
return self.enrollment.student.gender
@property
def flaggedName(self):
if self.food_allergy == "ALLERGY":
template = "{first:} {last:}**"
elif self.food_allergy == "EPIPEN":
template = "{first:} {last:}**E"
else:
template = "{first:} {last:}"
return template.format(first=self.first_name, last=self.last_name)
class Meta:
ordering = ['enrollment__student__last_name', 'enrollment__student__first_name']
def __str__(self):
return str(self.enrollment.student)
class MealTime(SortableMixin):
name = models.CharField(max_length=200)
history = HistoricalRecords()
include_grades = models.ManyToManyField(Grade)
include_boarding_students = models.BooleanField(default=False)
include_day_students = models.BooleanField(default=False)
order = models.PositiveIntegerField(default=0, editable=False, db_index=True)
class Meta:
ordering = ['order']
def allStudents(self):
students = SeatingStudent.objects.filter(enrollment__grade__in=self.include_grades.all())
#Boarding only
if self.include_boarding_students and not self.include_day_students:
#Exclude day students
students = students.exclude(enrollment__boarder=False)
#Day only
elif not self.include_boarding_students and self.include_day_students:
#Exclude boarding students
students = students.exclude(enrollment__boarder=True)
#No students
elif not self.include_boarding_students and not self.include_day_students:
students = None
#Boarding and day students, existing queryset
else:
pass
return students
def __str__(self):
return str(self.name)
class Table(models.Model):
description = models.CharField(max_length=200)
for_meals = models.ManyToManyField(MealTime)
capacity = models.IntegerField()
history = HistoricalRecords()
def __str__(self):
return "Table %s (%s)" % (self.description, ", ".join(map(str, self.for_meals.all())))
class SeatFiller(models.Model):
description = models.CharField(max_length=200, blank=True)
seats = models.IntegerField()
table = models.ForeignKey(Table)
meal_time = models.ManyToManyField(MealTime)
display = models.BooleanField(default=False)
def clean(self):
if not self.seats and not self.display:
raise ValidationError("No seats are being taken up and the entry isn't being displayed. What point does it serve?")
if self.display and not self.description:
raise ValidationError("Description must not be blank if the seat filler is being displayed")
history = HistoricalRecords()
def __str__(self):
if self.description:
return self.description
if self.id:
return "SeatFiller %d" % self.id
return "SeatFiller"
class PinnedStudent(models.Model):
student = models.ForeignKey(SeatingStudent)
table = models.ForeignKey(Table)
meal_time = models.ForeignKey(MealTime)
history = HistoricalRecords()
class Meta:
unique_together = (('student', 'meal_time'), )
def __str__(self):
return "%s to %s for %s" % (self.student.student.name, self.table.description, self.meal_time.name)
class TableAssignment(models.Model):
meal_time = models.ForeignKey(MealTime)
student = models.ForeignKey(SeatingStudent)
table = models.ForeignKey(Table)
waitor = models.BooleanField(default=False)
history = HistoricalRecords()
class Meta:
unique_together = (('meal_time', 'student'), )
permissions = (
("view_table_assignments", "Can view table assignments"),
("edit_table_assignments", "Can edit table assignments"),
)
class Layout(models.Model):
name = models.CharField(max_length=25)
left_print = models.ForeignKey(MealTime, related_name="+")
right_print = models.ForeignKey(MealTime, blank=True, null=True, related_name="+")
def __str__(self):
return self.name | rectory-school/rectory-apps | seating_charts/models.py | Python | mit | 5,535 |
# -*- coding: utf-8 -*-
"""
A simple microservice framework for ZMQ Messaging.
"""
import weakref
import logging
import threading
import six
import binascii
import collections
import uuid
from ..exc import DispatcherError
from .protocol import ZMQCauldronErrorResponse
from .thread import ZMQThread, ZMQThreadError
FRAMEBLANK = six.binary_type(b"\x01")
FRAMEFAIL = six.binary_type(b"\x02")
FRAMEDELIMITER = six.binary_type(b"")
__all__ = ['ZMQMicroservice', 'FRAMEBLANK', 'FRAMEFAIL', 'FRAMEDELIMITER']
class ZMQMicroservice(ZMQThread):
"""A ZMQ Responder tool."""
def __init__(self, context, address, name="microservice", timeout=5):
super(ZMQMicroservice, self).__init__(name=six.text_type(name), context=context)
self.timeout = float(timeout)
self.address = address
def check(self, timeout=0.1):
"""Check for errors."""
try:
super(ZMQMicroservice, self).check(timeout)
except ZMQThreadError as exc:
raise DispatcherError(exc.msg)
def thread_target(self):
"""Main function should call respond."""
self.respond()
def handle(self, message):
"""Handle a message, raising an error if appropriate."""
try:
method_name = "handle_{0:s}".format(message.command)
if not hasattr(self, method_name):
message.raise_error_response("Bad command '{0:s}'!".format(message.command))
response_payload = getattr(self, method_name)(message)
except ZMQCauldronErrorResponse as e:
return e.message
except Exception as e:
self.log.exception("Error handling '{0}': {1!r}".format(message.command, e))
return message.error_response("{0!r}".format(e))
else:
response = message.response(response_payload)
return response
| alexrudy/Cauldron | Cauldron/zmq/microservice.py | Python | bsd-3-clause | 1,919 |
"""Manual plugin."""
import os
import logging
import pipes
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import zope.component
import zope.interface
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import common
logger = logging.getLogger(__name__)
class ManualAuthenticator(common.Plugin):
"""Manual Authenticator.
.. todo:: Support for `~.challenges.DVSNI`.
"""
zope.interface.implements(interfaces.IAuthenticator)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Manual Authenticator"
MESSAGE_TEMPLATE = """\
Make sure your web server displays the following content at
{uri} before continuing:
{achall.token}
Content-Type header MUST be set to {ct}.
If you don't have HTTP server configured, you can run the following
command on the target server (as root):
{command}
"""
# "cd /tmp/letsencrypt" makes sure user doesn't serve /root,
# separate "public_html" ensures that cert.pem/key.pem are not
# served and makes it more obvious that Python command will serve
# anything recursively under the cwd
HTTP_TEMPLATE = """\
mkdir -p {root}/public_html/{response.URI_ROOT_PATH}
cd {root}/public_html
echo -n {validation} > {response.URI_ROOT_PATH}/{encoded_token}
# run only once per server:
python -c "import BaseHTTPServer, SimpleHTTPServer; \\
SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\
s = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\
s.serve_forever()" """
"""Non-TLS command template."""
# https://www.piware.de/2011/01/creating-an-https-server-in-python/
HTTPS_TEMPLATE = """\
mkdir -p {root}/public_html/{response.URI_ROOT_PATH}
cd {root}/public_html
echo -n {validation} > {response.URI_ROOT_PATH}/{encoded_token}
# run only once per server:
openssl req -new -newkey rsa:4096 -subj "/" -days 1 -nodes -x509 -keyout ../key.pem -out ../cert.pem
python -c "import BaseHTTPServer, SimpleHTTPServer, ssl; \\
SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\
s = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\
s.socket = ssl.wrap_socket(s.socket, keyfile='../key.pem', certfile='../cert.pem'); \\
s.serve_forever()" """
"""TLS command template.
According to the ACME specification, "the ACME server MUST ignore
the certificate provided by the HTTPS server", so the first command
generates temporary self-signed certificate.
"""
def __init__(self, *args, **kwargs):
super(ManualAuthenticator, self).__init__(*args, **kwargs)
self.template = (self.HTTP_TEMPLATE if self.config.no_simple_http_tls
else self.HTTPS_TEMPLATE)
self._root = (tempfile.mkdtemp() if self.conf("test-mode")
else "/tmp/letsencrypt")
self._httpd = None
@classmethod
def add_parser_arguments(cls, add):
add("test-mode", action="store_true",
help="Test mode. Executes the manual command in subprocess. "
"Requires openssl to be installed unless --no-simple-http-tls.")
def prepare(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return """\
This plugin requires user's manual intervention in setting up a HTTP
server for solving SimpleHTTP challenges and thus does not need to be
run as a privilidged process. Alternatively shows instructions on how
to use Python's built-in HTTP server and, in case of HTTPS, openssl
binary for temporary key/certificate generation.""".replace("\n", "")
def get_chall_pref(self, domain):
# pylint: disable=missing-docstring,no-self-use,unused-argument
return [challenges.SimpleHTTP]
def perform(self, achalls): # pylint: disable=missing-docstring
responses = []
# TODO: group achalls by the same socket.gethostbyname(_ex)
# and prompt only once per server (one "echo -n" per domain)
for achall in achalls:
responses.append(self._perform_single(achall))
return responses
def _perform_single(self, achall):
# same path for each challenge response would be easier for
# users, but will not work if multiple domains point at the
# same server: default command doesn't support virtual hosts
response, validation = achall.gen_response_and_validation(
tls=(not self.config.no_simple_http_tls))
command = self.template.format(
root=self._root, achall=achall, response=response,
validation=pipes.quote(validation.json_dumps()),
encoded_token=achall.chall.encode("token"),
ct=response.CONTENT_TYPE, port=(
response.port if self.config.simple_http_port is None
else self.config.simple_http_port))
if self.conf("test-mode"):
logger.debug("Test mode. Executing the manual command: %s", command)
try:
self._httpd = subprocess.Popen(
command,
# don't care about setting stdout and stderr,
# we're in test mode anyway
shell=True,
# "preexec_fn" is UNIX specific, but so is "command"
preexec_fn=os.setsid)
except OSError as error: # ValueError should not happen!
logger.debug(
"Couldn't execute manual command: %s", error, exc_info=True)
return False
logger.debug("Manual command running as PID %s.", self._httpd.pid)
# give it some time to bootstrap, before we try to verify
# (cert generation in case of simpleHttpS might take time)
time.sleep(4) # XXX
if self._httpd.poll() is not None:
raise errors.Error("Couldn't execute manual command")
else:
self._notify_and_wait(self.MESSAGE_TEMPLATE.format(
achall=achall, response=response,
uri=response.uri(achall.domain, achall.challb.chall),
ct=response.CONTENT_TYPE, command=command))
if response.simple_verify(
achall.chall, achall.domain,
achall.account_key.public_key(), self.config.simple_http_port):
return response
else:
if self.conf("test-mode") and self._httpd.poll() is not None:
# simply verify cause command failure...
return False
return None
def _notify_and_wait(self, message): # pylint: disable=no-self-use
# TODO: IDisplay wraps messages, breaking the command
#answer = zope.component.getUtility(interfaces.IDisplay).notification(
# message=message, height=25, pause=True)
sys.stdout.write(message)
raw_input("Press ENTER to continue")
def cleanup(self, achalls):
# pylint: disable=missing-docstring,no-self-use,unused-argument
if self.conf("test-mode"):
assert self._httpd is not None, (
"cleanup() must be called after perform()")
if self._httpd.poll() is None:
logger.debug("Terminating manual command process")
os.killpg(self._httpd.pid, signal.SIGTERM)
else:
logger.debug("Manual command process already terminated "
"with %s code", self._httpd.returncode)
shutil.rmtree(self._root)
| solidgoldbomb/letsencrypt | letsencrypt/plugins/manual.py | Python | apache-2.0 | 7,692 |
# coding: utf-8
# (c) 2015-11-23 Teruhisa Okada
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import shutil
import os.path
import romspy
romspy.cmap('jet')
def get_vstd(ncfile_ens, ncfile_tru, N, vnames, plot=False):
ini = netCDF4.Dataset(ncfile_tru, 'r')
var = {}
for i in range(N):
ncfile = ncfile_ens.format(i)
print ncfile, vnames
nc = netCDF4.Dataset(ncfile, 'r')
for vname in vnames:
v = nc.variables[vname]
vini = ini.variables[vname]
if v.ndim == 4:
if i == 0:
tend, kend, jend, iend = v.shape
var[vname] = np.zeros([N, kend, jend, iend])
var[vname][i,:,:,:] = v[-1,:,:,:] - vini[-1,:,:,:]
vstd = {}
for vname in var.keys():
vstd[vname] = np.std(var[vname], axis=0)
if plot is True:
plt.figure(figsize=[12,5])
plt.subplot(121)
vmax = np.max(vstd[vname])
vmin = np.min(vstd[vname])
plt.pcolor(vstd[vname][19,:,:], vmax=vmax, vmin=vmin)
plt.colorbar()
plt.title('surface '+vname)
plt.subplot(122)
plt.pcolor(vstd[vname][0,:,:], vmax=vmax, vmin=vmin)
plt.colorbar()
plt.title('bottom '+vname)
plt.show()
return vstd
def make_std_file(stdfile, vstd, base=None):
if base is not None:
print base, '=>', stdfile
shutil.copyfile(base, stdfile)
for vname in vstd.keys():
romspy.edit_nc_var(stdfile, vname, vstd[vname], t=-1)
if __name__ == '__main__':
case = '21_NLS3'
if case == '1_NLS2':
ncfile_ens = '/home/okada/ism-i/apps/OB500P/case21/NLS2/runs/run{}/ob500_rst.nc'
ncfile_tru = '/home/okada/ism-i/apps/OB500P/case21/NLS2/ob500_rst.nc'
ncfile_std = '/home/okada/ism-i/data/ob500_std_i_case21_NLS2_100.nc'
N = 100
elif case == '21_NLS3':
ncfile_ens = '/home/okada/ism-i/apps/OB500P/case21/NLS3/runs/run{}/ob500_rst.nc'
ncfile_tru= '/home/okada/ism-i/apps/OB500P/case21/NLS3/ob500_rst.nc'
ncfile_std = '/home/okada/ism-i/data/ob500_std_i_case21_NLS3_100.nc'
N = 100
vnames = ['temp', 'salt', 'chlorophyll', 'oxygen', 'NH4', 'NO3', 'PO4', 'LdetritusN', 'SdetritusN', 'LdetritusP', 'SdetritusP', 'phytoplankton', 'zooplankton']
#vnames = ['salt']
vstd = get_vstd(ncfile_ens, ncfile_tru, N, vnames) # , plot=True)
basefile = '/home/okada/ism-i/data/ob500_ini_zero_0101_grd-11_3.nc'
make_std_file(ncfile_std, vstd, base=basefile)
| okadate/romspy | romspy/make/make_std_file_ensemble.py | Python | mit | 2,618 |
# Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
def _list_users_with_params(self, params, key, expected, not_expected):
# Helper method to list users filtered with params and
# assert the response based on expected and not_expected
# expected: user expected in the list response
# not_expected: user, which should not be present in list response
_, body = self.client.get_users(params)
self.assertIn(expected[key], map(lambda x: x[key], body))
self.assertNotIn(not_expected[key],
map(lambda x: x[key], body))
@classmethod
def resource_setup(cls):
super(UsersV3TestJSON, cls).resource_setup()
alt_user = data_utils.rand_name('test_user')
alt_password = data_utils.rand_name('pass')
cls.alt_email = alt_user + '@testmail.tm'
cls.data.setup_test_domain()
# Create user with Domain
u1_name = data_utils.rand_name('test_user')
_, cls.domain_enabled_user = cls.client.create_user(
u1_name, password=alt_password,
email=cls.alt_email, domain_id=cls.data.domain['id'])
cls.data.v3_users.append(cls.domain_enabled_user)
# Create default not enabled user
u2_name = data_utils.rand_name('test_user')
_, cls.non_domain_enabled_user = cls.client.create_user(
u2_name, password=alt_password,
email=cls.alt_email, enabled=False)
cls.data.v3_users.append(cls.non_domain_enabled_user)
@test.attr(type='gate')
def test_list_user_domains(self):
# List users with domain
params = {'domain_id': self.data.domain['id']}
self._list_users_with_params(params, 'domain_id',
self.domain_enabled_user,
self.non_domain_enabled_user)
@test.attr(type='gate')
def test_list_users_with_not_enabled(self):
# List the users with not enabled
params = {'enabled': False}
self._list_users_with_params(params, 'enabled',
self.non_domain_enabled_user,
self.domain_enabled_user)
@test.attr(type='gate')
def test_list_users_with_name(self):
# List users with name
params = {'name': self.domain_enabled_user['name']}
self._list_users_with_params(params, 'name',
self.domain_enabled_user,
self.non_domain_enabled_user)
@test.attr(type='gate')
def test_list_users(self):
# List users
_, body = self.client.get_users()
fetched_ids = [u['id'] for u in body]
missing_users = [u['id'] for u in self.data.v3_users
if u['id'] not in fetched_ids]
self.assertEqual(0, len(missing_users),
"Failed to find user %s in fetched list" %
', '.join(m_user for m_user in missing_users))
@test.attr(type='gate')
def test_get_user(self):
# Get a user detail
_, user = self.client.get_user(self.data.v3_users[0]['id'])
self.assertEqual(self.data.v3_users[0]['id'], user['id'])
self.assertEqual(self.data.v3_users[0]['name'], user['name'])
self.assertEqual(self.alt_email, user['email'])
self.assertEqual(self.data.domain['id'], user['domain_id'])
class UsersV3TestXML(UsersV3TestJSON):
_interface = 'xml'
| queria/my-tempest | tempest/api/identity/admin/v3/test_list_users.py | Python | apache-2.0 | 4,289 |
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import PyKDL
import xml.etree.ElementTree as ET
class CollisionModel:
class Collision:
def __init__(self):
self.T_L_O = None
self.type = None
self.radius = None
self.length = None
class Link:
def __init__(self):
self.name = None
self.col = None
def readUrdfSrdf(self, urdf_filename, srdf_filename):
def parseGeometryCollision2Element(col, geometry_elem):
for child in geometry_elem:
if child.tag == "sphere":
col.type = "sphere"
col.radius = float(child.attrib["radius"])
elif child.tag == "capsule":
col.type = "capsule"
col.radius = float(child.attrib["radius"])
col.length = float(child.attrib["length"])
else:
print "ERROR: parseGeometryCollision2Element: unknown element:", child.tag
def parseCollision2Element(link, collision2_elem):
col = CollisionModel.Collision()
for child in collision2_elem:
if child.tag == "origin":
rpy_str = child.attrib["rpy"].split()
xyz_str = child.attrib["xyz"].split()
col.T_L_O = PyKDL.Frame( PyKDL.Rotation.RPY(float(rpy_str[0]), float(rpy_str[1]), float(rpy_str[2])), PyKDL.Vector(float(xyz_str[0]), float(xyz_str[1]), float(xyz_str[2])) )
elif child.tag == "geometry":
parseGeometryCollision2Element(col, child)
return col
def parseLinkElement(link_elem):
link = CollisionModel.Link()
link.name = link_elem.attrib["name"]
for child in link_elem:
if child.tag == "self_collision_checking":
col = parseCollision2Element(link, child)
if link.col == None:
link.col = []
link.col.append( col )
return link
# read the urdf file for convex collision primitives
self.links = []
self.link_map = {}
tree = ET.parse(urdf_filename)
root = tree.getroot()
for child in root:
if child.tag == "link":
link = parseLinkElement(child)
self.links.append( link )
self.link_map[link.name] = link
# read the srdf file
tree = ET.parse(srdf_filename)
root = tree.getroot()
self.disabled_collision_pairs = []
for child in root:
if child.tag == "disable_collisions":
self.disabled_collision_pairs.append( (child.attrib["link1"], child.attrib["link2"]) )
self.collision_pairs = []
for link1_idx in range(len(self.links)):
if self.links[link1_idx].col == None or len(self.links[link1_idx].col) == 0:
continue
for link2_idx in range(link1_idx+1, len(self.links)):
if self.links[link2_idx].col == None or len(self.links[link2_idx].col) == 0:
continue
pair1 = (self.links[link1_idx].name, self.links[link2_idx].name)
pair2 = (self.links[link2_idx].name, self.links[link1_idx].name)
if not pair1 in self.disabled_collision_pairs and not pair2 in self.disabled_collision_pairs:
self.collision_pairs.append(pair1)
def __init__(self):
self.links = None
self.link_map = None
self.disabled_collision_pairs = None
self.collision_pairs = None
| dseredyn/velma_planners | scripts/collision_model.py | Python | gpl-2.0 | 5,365 |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest.lib.exceptions import CommandFailed
from designateclient.functionaltests.base import BaseDesignateTest
from designateclient.functionaltests.datagen import random_a_recordset_name
from designateclient.functionaltests.datagen import random_zone_name
from designateclient.functionaltests.v2.fixtures import RecordsetFixture
from designateclient.functionaltests.v2.fixtures import ZoneFixture
class TestRecordset(BaseDesignateTest):
def setUp(self):
super(TestRecordset, self).setUp()
self.ensure_tld_exists('com')
self.zone = self.useFixture(ZoneFixture(
name=random_zone_name(),
email='test@example.com',
)).zone
name = random_a_recordset_name(self.zone.name)
self.recordset = self.useFixture(RecordsetFixture(
zone_id=self.zone.id,
name=name,
records='1.2.3.4',
description='An a recordset',
type='A',
ttl=1234,
)).recordset
self.assertEqual(self.recordset.name, name)
self.assertEqual(self.recordset.records, '1.2.3.4')
self.assertEqual(self.recordset.description, 'An a recordset')
self.assertEqual(self.recordset.type, 'A')
self.assertEqual(self.recordset.ttl, '1234')
def test_recordset_list(self):
rsets = self.clients.recordset_list(self.zone.id)
self.assertGreater(len(rsets), 0)
def test_recordset_create_and_show(self):
rset = self.clients.recordset_show(self.zone.id, self.recordset.id)
self.assertTrue(hasattr(self.recordset, 'action'))
self.assertTrue(hasattr(rset, 'action'))
self.assertEqual(self.recordset.created_at, rset.created_at)
self.assertEqual(self.recordset.description, rset.description)
self.assertEqual(self.recordset.id, rset.id)
self.assertEqual(self.recordset.name, rset.name)
self.assertEqual(self.recordset.records, rset.records)
self.assertEqual(self.recordset.status, rset.status)
self.assertEqual(self.recordset.ttl, rset.ttl)
self.assertEqual(self.recordset.type, rset.type)
self.assertEqual(self.recordset.updated_at, rset.updated_at)
self.assertEqual(self.recordset.version, rset.version)
self.assertEqual(self.recordset.zone_id, self.zone.id)
def test_recordset_delete(self):
rset = self.clients.recordset_delete(self.zone.id, self.recordset.id)
self.assertEqual(rset.action, 'DELETE')
self.assertEqual(rset.status, 'PENDING')
def test_recordset_set(self):
rset = self.clients.recordset_set(
self.zone.id,
self.recordset.id,
records='2.3.4.5',
ttl=2345,
description='Updated description',
)
self.assertEqual(rset.records, '2.3.4.5')
self.assertEqual(rset.ttl, '2345')
self.assertEqual(rset.description, 'Updated description')
def test_recordset_set_clear_ttl_and_description(self):
rset = self.clients.recordset_set(
self.zone.id,
self.recordset.id,
no_description=True,
no_ttl=True,
)
self.assertEqual(rset.description, 'None')
self.assertEqual(rset.ttl, 'None')
class TestRecordsetNegative(BaseDesignateTest):
def test_invalid_option_on_recordset_create(self):
cmd = 'recordset create de47d30b-41c5-4e38-b2c5-e0b908e19ec7 ' \
'aaa.desig.com. --type A --records 1.2.3.4 ' \
'--invalid "not valid"'
self.assertRaises(CommandFailed, self.clients.openstack, cmd)
def test_invalid_recordset_command(self):
cmd = 'recordset hopefullynotvalid'
self.assertRaises(CommandFailed, self.clients.openstack, cmd)
| openstack/python-designateclient | designateclient/functionaltests/v2/test_recordsets.py | Python | apache-2.0 | 4,339 |
def getMinimumDifference(a, b):
ans = [-1] * max(len(a), len(b))
if not a or not b:
return ans
i = 0
for w1, w2 in zip(a, b):
if len(w1) != len(w2):
i += 1
continue
tmp = 0
lookup1, lookup2 = [0] * 26, [0] * 26
for char in w1:
lookup1[ord(char) - ord('a')] += 1
for char in w2:
lookup2[ord(char) - ord('a')] += 1
for j in range(26):
tmp = tmp + abs(lookup1[j] - lookup2[j])
ans[i] = tmp
i += 1
return ans
| TanakritBenz/leetcode-adventure | min_diff_to_make_anagram.py | Python | gpl-2.0 | 557 |
# -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
from pdb import set_trace
import logging
import unittest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
)
import bs4.dammit
from bs4.dammit import (
EntitySubstitution,
UnicodeDammit,
EncodingDetector,
)
from bs4.testing import (
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError as e:
LXML_PRESENT = False
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = "<h1>éé</h1>"
soup = self.soup(data)
self.assertEqual("éé", soup.h1.string)
def test_embedded_null(self):
data = "<h1>foo\0bar</h1>"
soup = self.soup(data)
self.assertEqual("foo\0bar", soup.h1.string)
def test_exclude_encodings(self):
utf8_data = "Räksmörgås".encode("utf-8")
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual("windows-1252", soup.original_encoding)
class TestWarnings(SoupTest):
def _no_parser_specified(self, s, is_there=True):
v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80])
self.assertTrue(v)
def test_warning_if_no_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_warning_if_parser_specified_too_vague(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_no_warning_if_explicit_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html.parser")
self.assertEqual([], w)
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
class TestWarnings(SoupTest):
def test_disk_file_warning(self):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
try:
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
msg = str(w[0].message)
self.assertTrue("looks like a filename" in msg)
finally:
filehandle.close()
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
self.assertEqual(0, len(w))
def test_url_warning_with_bytes_url(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/")
# Be aware this isn't the only warning that can be raised during
# execution..
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_url(self):
with warnings.catch_warnings(record=True) as warning_list:
# note - this url must differ from the bytes one otherwise
# python's warnings system swallows the second warning
soup = self.soup("http://www.crummyunicode.com/")
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_bytes_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup("http://www.crummyuncode.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"&Aacute;T&T")
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml_containing_entities("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, str))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, 'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, 'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = "I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
self.assertEqual(dammit.unicode_markup, markup)
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, "<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup, 'Sacr\xe9 bleu! \N{SNOWMAN}')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, '\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_exclude_encodings(self):
# This is UTF-8.
utf8_data = "Räksmörgås".encode("utf-8")
# But if we exclude UTF-8 from consideration, the guess is
# Windows-1252.
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual(dammit.original_encoding.lower(), 'windows-1252')
# And if we exclude that, there is no valid guess at all.
dammit = UnicodeDammit(
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
self.assertEqual(dammit.original_encoding, None)
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
detected = EncodingDetector(
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
encodings = list(detected.encodings)
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue("\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual("<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
| tedlaz/pyted | sms/bs4/tests/test_soup.py | Python | gpl-3.0 | 20,313 |
# -*- coding: utf-8 -*-
import json
import sys
import subprocess
TEMPLATE = """\
--pretty=format:'{"commit":"%H","author":"%an <%ae>","date":"%ad","message":"%B"},'
"""
class GitFrontend(object):
def __init__(self, git_path):
self.git_path = git_path
def get_rev_msg(self, rev, format):
cmd = [
'git', 'show', '--no-patch', '--date=iso', '--pretty=format:%s' % format, rev
]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=self.git_path)
out, err = p.communicate()
out = safe_decode(out)
return out
def get_rev_msg_obj(self, rev):
r = {}
r['revnum'] = rev
r['node'] = self.get_rev_msg(rev, '%H')
r['date'] = self.get_rev_msg(rev, '%ad')
r['email'] = self.get_rev_msg(rev, '%ae')
r['desc'] = self.get_rev_msg(rev, '%B')
return r
def get_changelog(self):
cmd = ['git', 'log', '--date-order', '--reverse', '--pretty=format:%H']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=self.git_path)
out, err = p.communicate()
out = safe_decode(out)
return [l.strip() for l in out.splitlines() if l.strip()]
def safe_decode(s):
if isinstance(s, unicode):
return s
for e in ('utf-8', 'mbcs'):
try:
return unicode(s, e)
except UnicodeDecodeError:
pass
return unicode(s, 'utf-8', 'replace')
def to_console(git_path):
f = GitFrontend(git_path)
messages = (f.get_rev_msg(rev, TEMPLATE) for rev in f.get_changelog())
for m in messages:
print m
def to_json(git_path, outfile):
f = GitFrontend(git_path)
message_count = len(f.get_changelog())
messages = (f.get_rev_msg_obj(r) for r in f.get_changelog())
class StreamArray(list):
def __len__(self):
return message_count
def __iter__(self):
return messages
json.dump({'messages': StreamArray()}, open(outfile, 'w'), indent=4)
if __name__ == '__main__':
try:
repo_path, outfile = sys.argv[1:3]
except (ValueError, IndexError):
print('Usage:\n {} repository_path output.json'.format(sys.argv[0]))
sys.exit(-1)
to_json(repo_path, outfile)
# to_console(repo_path)
| transcode-de/bitbucket_issue_migration | gitlog2json.py | Python | gpl-3.0 | 2,279 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2009-2010 Gary Burton
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# internationalization
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from ..views.treemodels.placemodel import PlaceListModel
from .baseselector import BaseSelector
#-------------------------------------------------------------------------
#
# SelectPlace
#
#-------------------------------------------------------------------------
class SelectPlace(BaseSelector):
def _local_init(self):
"""
Perform local initialisation for this class
"""
self.width_key = 'interface.place-sel-width'
self.height_key = 'interface.place-sel-height'
def get_window_title(self):
return _("Select Place")
def get_model_class(self):
return PlaceListModel
def get_column_titles(self):
return [
(_('Title'), 350, BaseSelector.TEXT, 0),
(_('ID'), 75, BaseSelector.TEXT, 1),
(_('Street'), 75, BaseSelector.TEXT, 2),
(_('Locality'), 75, BaseSelector.TEXT, 3),
(_('City'), 75, BaseSelector.TEXT, 4),
(_('County'), 75, BaseSelector.TEXT, 5),
(_('State'), 75, BaseSelector.TEXT, 6),
(_('Country'), 75, BaseSelector.TEXT, 7),
(_('Parish'), 75, BaseSelector.TEXT, 9),
]
def get_from_handle_func(self):
return self.db.get_place_from_handle
| Forage/Gramps | gramps/gui/selectors/selectplace.py | Python | gpl-2.0 | 2,617 |
import dj_database_url
from .common import *
DEBUG = True
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| shivamMg/malvo | malvo/settings/heroku.py | Python | gpl-3.0 | 231 |
import os
import base64
import requests
from urllib.parse import parse_qs
from flask import Flask, request, send_from_directory, redirect, render_template
from github import Github
from .add_files import add_files, make_file_contents
GITHUB_AUTH_URL = 'https://github.com/login/oauth/authorize'
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
HACKLIST_REPO = 'dotastro/hacks-collector'
CLIENT_ID = os.environ['CLIENT_ID']
CLIENT_SECRET = os.environ['CLIENT_SECRET']
app = Flask(__name__)
@app.route("/")
def index():
return redirect(github_authorize())
def github_authorize():
data = {'client_id': CLIENT_ID,
'scope': 'repo'}
pr = requests.Request('GET', GITHUB_AUTH_URL, params=data).prepare()
return pr.url
@app.route("/submit", methods=['GET'])
def submit():
data = {'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'code': request.args['code']}
res = requests.post(GITHUB_TOKEN_URL, data=data)
token = parse_qs(res.text)['access_token'][0]
return render_template('form-validation.html', access_token=token)
@app.route("/assets/<path:filename>")
def assets(filename):
return send_from_directory('form_assets', filename)
@app.route("/create", methods=['POST'])
def create_file():
token = request.form['access_token']
title = request.form['title'].lower().replace(' ', '-')
dotastronumber = request.form['dotastronumber']
files = {}
# Process file upload
if 'pic' in request.files and request.files['pic'].filename != "":
content = request.files['pic'].stream.read()
mimetype = request.files['pic'].mimetype
if mimetype.startswith('image/'):
extension = mimetype.split('/')[1]
else:
raise Exception("Unknown mimetype: {0}".format(mimetype))
content = base64.encodebytes(content).decode('ascii')
image_filename = "{0}.{1}".format(title, extension)
files["dotastro{}/{}".format(dotastronumber, image_filename)] = content, 'base64'
else:
image_filename = ""
gh = Github(token)
main_repo = gh.get_repo(HACKLIST_REPO)
user_repo = gh.get_user().create_fork(main_repo)
branches = [b.name for b in user_repo.get_branches()]
newbranchname = title
if newbranchname in branches:
i = 1
newbranchname = title + '-' + str(i)
while newbranchname in branches:
i += 1
newbranchname = title + '-' + str(i)
filename = title + '.yml'
files['dotastro{}/{}'.format(dotastronumber, filename)] = make_file_contents(request, image_filename), 'utf-8'
add_files(user_repo, newbranchname,
'Auto-generated entry for "{}"'.format(filename), files)
prtitle = 'Added entry for hack "{}"'.format(request.form['title'])
prbody = 'This is a PR auto-generated by a form to record information about the dotAstronomy {} hack "{}"'.format(dotastronumber, request.form['title'])
base = main_repo.default_branch
head = gh.get_user().login + ':' + newbranchname
pr = main_repo.create_pull(title=prtitle, body=prbody, base=base, head=head)
pr_branch_name = pr.head.label.split(':')[1]
pr_branch_url = pr.head.repo.html_url + '/tree/' + pr_branch_name
return render_template('done.html', pr_url=pr.html_url,
pr_branch_name=pr_branch_name,
pr_branch_url=pr_branch_url)
| dotastro/hack-list-submission-app | hack_submission/webapp.py | Python | mit | 3,486 |
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
import sys
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls): #1
for arg in sys.argv: #2
if 'liveserver' in arg: #3
cls.server_url = 'http://' + arg.split('=')[1] #4
return #5
super().setUpClass() #6
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def get_item_input_box(self):
return self.browser.find_element_by_id('id_text')
| gajimenezmaggiora/gus-django | functional_tests/base.py | Python | mit | 1,086 |
#!/usr/bin/env python
import sys, os
from datetime import datetime
import argparse
import config
haiti_tiles = [
"CA/n15w070",
"CA/n15w075"
]
haiti_seams = [
[
"AF/n15w071",
[-71, 15, -69, 20]
]
]
namibia_tiles = [
"AF/s20e015",
"AF/s20e020",
"AF/s20e025",
"AF/s25e015",
"AF/s25e020",
"AF/s25e025"
]
# Given these tiles we want to generate HAND for theses seams
namibia_seams = [
[
"AF/s25e019",
[19, -25, 21, -15]
],
[
"AF/s25e024",
[24, -25, 26, -15]
],
[
"AF/s21e015",
[15, -21, 30, -19]
]
]
hand_dir = config.HANDS_DIR #"/shared/production/proddata"
hydrosheds_dir = config.HYDROSHEDS_DIR #os.path.join(hand_dir, "HydroSHEDS" )
area = config.HANDS_AREA
zone = config.HANDS_ZONE
dem_vrt = os.path.join(hand_dir, area + "_dem.vrt" )
dir_vrt = os.path.join(hand_dir, area + "_dir.vrt" )
seams = eval(area + "_seams")
tile_list = eval(area + "_tiles")
# Make sure the vrts exist
if( not os.path.exists(dem_vrt)):
print "vrt does not exist", dem_vrt
cmd = "gdalbuildvrt " + dem_vrt
files = " "
for name in tile_list:
ar = name.split('/')
zone = ar[0]
tile = ar[1]
dir_file = os.path.join(hydrosheds_dir, zone, tile, tile+"_dem_bil", tile+"_dem_4326.tif" )
files += dir_file + " "
cmd += files
print str(datetime.now()), cmd
err = os.system(cmd)
if( not os.path.exists(dir_vrt)):
print "vrt does not exist", dir_vrt
cmd = "gdalbuildvrt " + dir_vrt
files = " "
for name in tile_list:
ar = name.split('/')
zone = ar[0]
tile = ar[1]
dir_file = os.path.join(hydrosheds_dir, zone, tile, tile+"_dir_bil", tile+"_dir_4326.tif" )
files += dir_file + " "
cmd += files
print str(datetime.now()), cmd
err = os.system(cmd)
for s in seams:
tileName = s[0]
tileBBox = s[1]
ar = tileName.split('/')
zone = ar[0]
tile = ar[1]
folder = os.path.join(hydrosheds_dir, zone, tile )
if( not os.path.exists(folder)):
os.mkdir(folder)
dem_folder = os.path.join(folder, tile+"_dem_bil" )
if( not os.path.exists(dem_folder)):
os.mkdir(dem_folder)
dir_folder = os.path.join(folder, tile+"_dir_bil" )
if( not os.path.exists(dir_folder)):
os.mkdir(dir_folder)
tile_dem_data = os.path.join(dem_folder, tile+"_dem_4326.tif" )
if( not os.path.exists(tile_dem_data)):
cmd = "gdalwarp -te "+ " ".join(str(x) for x in tileBBox) + " " + dem_vrt + " " + tile_dem_data
print cmd
err = os.system(cmd)
tile_dir_data = os.path.join(dir_folder, tile+"_dir_4326.tif" )
if( not os.path.exists(tile_dir_data)):
cmd = "gdalwarp -te "+ " ".join(str(x) for x in tileBBox) + " " + dir_vrt + " " + tile_dir_data
print cmd
err = os.system(cmd)
hand_data = os.path.join(hand_dir, tile+"_hand.tif" )
if( not os.path.exists(hand_data)):
cmd = "hand.py -m " + str(config.HANDS_HEIGHT) + " --zone " + zone + " --tile " + tile + " --proj 4326 -v"
print cmd
err = os.system(cmd)
#
# Now we need to mosaic it back together
#
mergedFileName = os.path.join(hand_dir, config.HANDS_AREA + "_hand_merged.tif" )
mergedLZWFileName = os.path.join(hand_dir, config.HANDS_AREA + "_hand_merged_lzw.tif" )
cmd = "gdalwarp "
for name in tile_list:
fullName = os.path.join(hand_dir, name+"_hand.tif" )
cmd += fullName + " "
for s in seams:
name = s[0]
fullName = os.path.join(hand_dir, name+"_hand.tif" )
cmd += fullName + " "
cmd += mergedFileName
print cmd
err = os.system(cmd)
cmd = "gdal_translate -co compress=lzw " + mergedFileName + " " + mergedLZWFileName
print cmd
err = os.system(cmd)
# Cleanup
cmd = "rm " + mergedFileName
print cmd
err = os.system(cmd)
| vightel/FloodMapsWorkshop | python/hand_overlap.py | Python | apache-2.0 | 3,569 |
#!/usr/bin/python
import os,sys,time
ipaddr = "128.114."
x = 0
y = 0
for i in xrange(255,0,-1):
for j in xrange(255,1,-1):
print "scanning: " + ipaddr + str(x) + "." + str(y)
Z = "host " + ipaddr + str(x) + "." + str(y) + " >> hosts.txt"
os.system(Z)
y += 1
x+= 1
y = 1
| lthurlow/Network-Grapher | proj/st1_host.py | Python | mit | 295 |
# pylint: disable=R0903,W0232,missing-docstring, useless-object-inheritance
"""test detection of method which could be a function"""
from __future__ import print_function
class Toto(object):
"""bla bal abl"""
def __init__(self):
self.aaa = 2
def regular_method(self):
"""this method is a real method since it access to self"""
self.function_method()
def function_method(self): # [no-self-use]
"""this method isn' a real method since it doesn't need self"""
print('hello')
class Base(object):
"""an abstract class"""
def __init__(self):
self.aaa = 2
def check(self, arg):
"""an abstract method, could not be a function"""
raise NotImplementedError
class Sub(Base):
"""a concrete class"""
def check(self, arg):
"""a concrete method, could not be a function since it need
polymorphism benefits
"""
return arg == 0
class Super(object):
"""same as before without abstract"""
attr = 1
def method(self):
"""regular"""
print(self.attr)
class Sub1(Super):
"""override method with need for self"""
def method(self):
"""no i can not be a function"""
print(42)
def __len__(self):
"""no i can not be a function"""
print(42)
def __cmp__(self, other):
"""no i can not be a function"""
print(42)
def __copy__(self):
return 24
def __getstate__(self):
return 42
class Prop(object):
@property
def count(self):
"""Don't emit no-self-use for properties.
They can't be functions and they can be part of an
API specification.
"""
return 42
| kczapla/pylint | pylint/test/functional/no_self_use.py | Python | gpl-2.0 | 1,735 |
# -*- coding:utf-8 -*-
# Copyright (c) 2009-2016 - Simon Conseil
# Copyright (c) 2013 - Christophe-Marie Duquesne
# Copyright (c) 2014 - Jonas Kaufmann
# Copyright (c) 2015 - François D.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import fnmatch
import logging
import multiprocessing
import os
import random
import sys
import zipfile
from click import progressbar, get_terminal_size
from collections import defaultdict
from datetime import datetime
from itertools import cycle
from os.path import isfile, join, splitext
from . import image, video, signals
from .compat import PY2, UnicodeMixin, strxfrm, url_quote, text_type, pickle
from .image import process_image, get_exif_tags, get_exif_data, get_size
from .settings import get_thumb
from .utils import (Devnull, copy, check_or_create_dir, url_from_path,
read_markdown, cached_property, is_valid_html5_video,
get_mime)
from .video import process_video
from .writer import Writer
class Media(UnicodeMixin):
"""Base Class for media files.
Attributes:
- ``type``: ``"image"`` or ``"video"``.
- ``filename``: Filename of the resized image.
- ``thumbnail``: Location of the corresponding thumbnail image.
- ``big``: If not None, location of the unmodified image.
- ``exif``: If not None contains a dict with the most common tags. For more
information, see :ref:`simple-exif-data`.
- ``raw_exif``: If not ``None``, it contains the raw EXIF tags.
"""
type = ''
extensions = ()
def __init__(self, filename, path, settings):
self.src_filename = self.filename = self.url = filename
self.path = path
self.settings = settings
self.ext = os.path.splitext(filename)[1].lower()
self.src_path = join(settings['source'], path, filename)
self.dst_path = join(settings['destination'], path, filename)
self.thumb_name = get_thumb(self.settings, self.filename)
self.thumb_path = join(settings['destination'], path, self.thumb_name)
self.logger = logging.getLogger(__name__)
self._get_metadata()
signals.media_initialized.send(self)
def __repr__(self):
return "<%s>(%r)" % (self.__class__.__name__, str(self))
def __unicode__(self):
return join(self.path, self.filename)
@property
def big(self):
"""Path to the original image, if ``keep_orig`` is set (relative to the
album directory). Copy the file if needed.
"""
if self.settings['keep_orig']:
s = self.settings
if s['use_orig']:
# The image *is* the original, just use it
return self.filename
orig_path = join(s['destination'], self.path, s['orig_dir'])
check_or_create_dir(orig_path)
big_path = join(orig_path, self.src_filename)
if not isfile(big_path):
copy(self.src_path, big_path,
symlink=s['orig_link'])
return url_from_path(join(s['orig_dir'], self.src_filename))
@property
def thumbnail(self):
"""Path to the thumbnail image (relative to the album directory)."""
if not isfile(self.thumb_path):
# if thumbnail is missing (if settings['make_thumbs'] is False)
if self.type == 'image':
generator = image.generate_thumbnail
elif self.type == 'video':
generator = video.generate_thumbnail
self.logger.debug('Generating thumbnail for %r', self)
path = (self.dst_path if os.path.exists(self.dst_path)
else self.src_path)
try:
generator(path, self.thumb_path, self.settings['thumb_size'],
self.settings['thumb_video_delay'],
fit=self.settings['thumb_fit'])
except Exception as e:
self.logger.error('Failed to generate thumbnail: %s', e)
return
return url_from_path(self.thumb_name)
def _get_metadata(self):
""" Get image metadata from filename.md: title, description, meta."""
self.description = ''
self.meta = {}
self.title = ''
descfile = splitext(self.src_path)[0] + '.md'
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
def _get_file_date(self):
stat = os.stat(self.src_path)
return datetime.fromtimestamp(stat.st_mtime)
class Image(Media):
"""Gather all informations on an image file."""
type = 'image'
extensions = ('.jpg', '.jpeg', '.png', '.gif')
@cached_property
def date(self):
return self.exif and self.exif.get('dateobj', None) or self._get_file_date()
@cached_property
def exif(self):
return (get_exif_tags(self.raw_exif)
if self.raw_exif and self.ext in ('.jpg', '.jpeg') else None)
@cached_property
def raw_exif(self):
try:
return (get_exif_data(self.src_path)
if self.ext in ('.jpg', '.jpeg') else None)
except Exception:
self.logger.warning(u'Could not read EXIF data from %s',
self.src_path)
@cached_property
def size(self):
return get_size(self.dst_path)
@cached_property
def thumb_size(self):
return get_size(self.thumb_path)
def has_location(self):
return self.exif is not None and 'gps' in self.exif
class Video(Media):
"""Gather all informations on a video file."""
type = 'video'
extensions = ('.mov', '.avi', '.mp4', '.webm', '.ogv', '.3gp')
def __init__(self, filename, path, settings):
super(Video, self).__init__(filename, path, settings)
base, ext = splitext(filename)
self.src_filename = filename
self.date = self._get_file_date()
if not settings['use_orig'] or not is_valid_html5_video(ext):
video_format = settings['video_format']
ext = '.' + video_format
self.filename = self.url = base + ext
self.mime = get_mime(ext)
self.dst_path = join(settings['destination'], path, base + ext)
else:
self.mime = get_mime(ext)
class Album(UnicodeMixin):
"""Gather all informations on an album.
Attributes:
:var description_file: Name of the Markdown file which gives information
on an album
:ivar index_url: URL to the index page.
:ivar output_file: Name of the output HTML file
:ivar meta: Meta data from the Markdown file.
:ivar description: description from the Markdown file.
For details how to annotate your albums with meta data, see
:doc:`album_information`.
"""
description_file = "index.md"
def __init__(self, path, settings, dirnames, filenames, gallery):
self.path = path
self.name = path.split(os.path.sep)[-1]
self.gallery = gallery
self.settings = settings
self.subdirs = dirnames
self.output_file = settings['output_filename']
self._thumbnail = None
if path == '.':
self.src_path = settings['source']
self.dst_path = settings['destination']
else:
self.src_path = join(settings['source'], path)
self.dst_path = join(settings['destination'], path)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# optionally add index.html to the URLs
self.url_ext = self.output_file if settings['index_in_url'] else ''
self.index_url = url_from_path(os.path.relpath(
settings['destination'], self.dst_path)) + '/' + self.url_ext
#: List of all medias in the album (:class:`~sigal.gallery.Image` and
#: :class:`~sigal.gallery.Video`).
self.medias = medias = []
self.medias_count = defaultdict(int)
for f in filenames:
ext = splitext(f)[1]
if ext.lower() in Image.extensions:
media = Image(f, self.path, settings)
elif ext.lower() in Video.extensions:
media = Video(f, self.path, settings)
else:
continue
self.medias_count[media.type] += 1
medias.append(media)
signals.album_initialized.send(self)
def __repr__(self):
return "<%s>(path=%r, title=%r)" % (self.__class__.__name__, self.path,
self.title)
def __unicode__(self):
return (u"{} : ".format(self.path) +
', '.join("{} {}s".format(count, _type)
for _type, count in self.medias_count.items()))
def __len__(self):
return len(self.medias)
def __iter__(self):
return iter(self.medias)
def _get_metadata(self):
"""Get album metadata from `description_file` (`index.md`):
-> title, thumbnail image, description
"""
descfile = join(self.src_path, self.description_file)
self.description = ''
self.meta = {}
# default: get title from directory name
self.title = os.path.basename(self.path if self.path != '.'
else self.src_path)
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
try:
self.author = self.meta['author'][0]
except KeyError:
self.author = self.settings.get('author')
def create_output_directories(self):
"""Create output directories for thumbnails and original images."""
check_or_create_dir(self.dst_path)
if self.medias:
check_or_create_dir(join(self.dst_path,
self.settings['thumb_dir']))
if self.medias and self.settings['keep_orig']:
self.orig_path = join(self.dst_path, self.settings['orig_dir'])
check_or_create_dir(self.orig_path)
def sort_subdirs(self, albums_sort_attr):
if self.subdirs:
if albums_sort_attr:
root_path = self.path if self.path != '.' else ''
if albums_sort_attr.startswith("meta."):
meta_key = albums_sort_attr.split(".", 1)[1]
key = lambda s: strxfrm(
self.gallery.albums[join(root_path, s)].meta.get(meta_key, [''])[0])
else:
key = lambda s: strxfrm(getattr(
self.gallery.albums[join(root_path, s)], albums_sort_attr))
else:
key = strxfrm
self.subdirs.sort(key=key,
reverse=self.settings['albums_sort_reverse'])
signals.albums_sorted.send(self)
def sort_medias(self, medias_sort_attr):
if self.medias:
if medias_sort_attr == 'date':
key = lambda s: s.date or datetime.now()
elif medias_sort_attr.startswith('meta.'):
meta_key = medias_sort_attr.split(".", 1)[1]
key = lambda s: strxfrm(s.meta.get(meta_key, [''])[0])
else:
key = lambda s: strxfrm(getattr(s, medias_sort_attr))
self.medias.sort(key=key,
reverse=self.settings['medias_sort_reverse'])
signals.medias_sorted.send(self)
@property
def images(self):
"""List of images (:class:`~sigal.gallery.Image`)."""
for media in self.medias:
if media.type == 'image':
yield media
@property
def videos(self):
"""List of videos (:class:`~sigal.gallery.Video`)."""
for media in self.medias:
if media.type == 'video':
yield media
@property
def albums(self):
"""List of :class:`~sigal.gallery.Album` objects for each
sub-directory.
"""
root_path = self.path if self.path != '.' else ''
return [self.gallery.albums[join(root_path, path)]
for path in self.subdirs]
@property
def url(self):
"""URL of the album, relative to its parent."""
url = self.name.encode('utf-8')
return url_quote(url) + '/' + self.url_ext
@property
def thumbnail(self):
"""Path to the thumbnail of the album."""
if self._thumbnail:
# stop if it is already set
return url_from_path(self._thumbnail)
# Test the thumbnail from the Markdown file.
thumbnail = self.meta.get('thumbnail', [''])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = join(self.name, get_thumb(self.settings,
thumbnail))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return url_from_path(self._thumbnail)
else:
# find and return the first landscape image
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in Image.extensions:
# Use f.size if available as it is quicker (in cache), but
# fallback to the size of src_path if dst_path is missing
size = f.size
if size is None:
size = get_size(f.src_path)
if size['width'] > size['height']:
self._thumbnail = join(self.name, f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r :"
" %s", self, self._thumbnail)
return url_from_path(self._thumbnail)
# else simply return the 1st media file
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = join(self.name, media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return url_from_path(self._thumbnail)
# use the thumbnail of their sub-directories
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = join(self.name, album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return url_from_path(self._thumbnail)
self.logger.error('Thumbnail not found for %r', self)
return None
@property
def random_thumbnail(self):
try :
return url_from_path(join(self.name, random.choice(self.medias).thumbnail))
except IndexError:
return self.thumbnail
@property
def breadcrumb(self):
"""List of ``(url, title)`` tuples defining the current breadcrumb
path.
"""
if self.path == '.':
return []
path = self.path
breadcrumb = [((self.url_ext or '.'), self.title)]
while True:
path = os.path.normpath(os.path.join(path, '..'))
if path == '.':
break
url = (url_from_path(os.path.relpath(path, self.path)) + '/' +
self.url_ext)
breadcrumb.append((url, self.gallery.albums[path].title))
breadcrumb.reverse()
return breadcrumb
@property
def show_map(self):
"""Check if we have at least one photo with GPS location in the album
"""
return any(image.has_location() for image in self.images)
@cached_property
def zip(self):
"""Make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
zip_gallery = self.settings['zip_gallery']
if zip_gallery and len(self) > 0:
zip_gallery = zip_gallery.format(album=self)
archive_path = join(self.dst_path, zip_gallery)
if self.settings.get('zip_skip_if_exists', False) and isfile(archive_path):
self.logger.debug("Archive %s already created, passing", archive_path)
return zip_gallery
archive = zipfile.ZipFile(archive_path, 'w', allowZip64=True)
attr = ('src_path' if self.settings['zip_media_format'] == 'orig'
else 'dst_path')
for p in self:
path = getattr(p, attr)
try:
archive.write(path, os.path.split(path)[1])
except OSError as e:
self.logger.warn('Failed to add %s to the ZIP: %s', p, e)
archive.close()
self.logger.debug('Created ZIP archive %s', archive_path)
return zip_gallery
class Gallery(object):
def __init__(self, settings, ncpu=None):
self.settings = settings
self.logger = logging.getLogger(__name__)
self.stats = defaultdict(int)
self.init_pool(ncpu)
check_or_create_dir(settings['destination'])
# Build the list of directories with images
albums = self.albums = {}
src_path = self.settings['source']
ignore_dirs = settings['ignore_directories']
ignore_files = settings['ignore_files']
progressChars = cycle(["/", "-", "\\", "|"])
if self.logger.getEffectiveLevel() >= logging.WARNING:
self.progressbar_target = None
else:
self.progressbar_target = Devnull()
for path, dirs, files in os.walk(src_path, followlinks=True,
topdown=False):
if self.logger.getEffectiveLevel() >= logging.WARNING:
print("\rCollecting albums " + next(progressChars), end="")
relpath = os.path.relpath(path, src_path)
# Test if the directory match the ignore_dirs settings
if ignore_dirs and any(fnmatch.fnmatch(relpath, ignore)
for ignore in ignore_dirs):
self.logger.info('Ignoring %s', relpath)
continue
# Remove files that match the ignore_files settings
if ignore_files:
files_path = {join(relpath, f) for f in files}
for ignore in ignore_files:
files_path -= set(fnmatch.filter(files_path, ignore))
self.logger.debug('Files before filtering: %r', files)
files = [os.path.split(f)[1] for f in files_path]
self.logger.debug('Files after filtering: %r', files)
# Remove sub-directories that have been ignored in a previous
# iteration (as topdown=False, sub-directories are processed before
# their parent
for d in dirs[:]:
path = join(relpath, d) if relpath != '.' else d
if path not in albums.keys():
dirs.remove(d)
album = Album(relpath, settings, dirs, files, self)
if not album.medias and not album.albums:
self.logger.info('Skip empty album: %r', album)
else:
album.create_output_directories()
albums[relpath] = album
with progressbar(albums.values(), label="Sorting albums",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_subdirs(settings['albums_sort_attr'])
with progressbar(albums.values(), label="Sorting media",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_medias(settings['medias_sort_attr'])
self.logger.debug('Albums:\n%r', albums.values())
signals.gallery_initialized.send(self)
@property
def title(self):
"""Title of the gallery."""
return self.settings['title'] or self.albums['.'].title
def init_pool(self, ncpu):
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
if ncpu is None:
ncpu = cpu_count
else:
try:
ncpu = int(ncpu)
except ValueError:
self.logger.error('ncpu should be an integer value')
ncpu = cpu_count
self.logger.info("Using %s cores", ncpu)
if ncpu > 1:
self.pool = multiprocessing.Pool(processes=ncpu)
else:
self.pool = None
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir]
def build(self, force=False):
"Create the image gallery"
if not self.albums:
self.logger.warning("No albums found.")
return
def log_func(x):
# 63 is the total length of progressbar, label, percentage, etc
available_length = get_terminal_size()[0] - 64
if x and available_length > 10:
text = text_type(x.name)[:available_length]
if PY2:
text = text.encode('utf-8')
return text
else:
return ""
try:
with progressbar(self.albums.values(), label="Collecting files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
media_list = [f for album in albums
for f in self.process_dir(album, force=force)]
except KeyboardInterrupt:
sys.exit('Interrupted')
bar_opt = {'label': "Processing files",
'show_pos': True,
'file': self.progressbar_target}
failed_files = []
if self.pool:
try:
with progressbar(length=len(media_list), **bar_opt) as bar:
for res in self.pool.imap_unordered(worker, media_list):
if res:
failed_files.append(res)
next(bar)
self.pool.close()
self.pool.join()
except KeyboardInterrupt:
self.pool.terminate()
sys.exit('Interrupted')
except pickle.PicklingError:
self.logger.critical(
"Failed to process files with the multiprocessing feature."
" This can be caused by some module import or object "
"defined in the settings file, which can't be serialized.",
exc_info=True)
sys.exit('Abort')
else:
with progressbar(media_list, **bar_opt) as medias:
for media_item in medias:
res = process_file(media_item)
if res:
failed_files.append(res)
if failed_files:
self.remove_files(failed_files)
print('')
if self.settings['write_html']:
writer = Writer(self.settings, index_title=self.title)
for album in self.albums.values():
writer.write(album)
signals.gallery_build.send(self)
def remove_files(self, files):
self.logger.error('Some files have failed to be processed:')
for path, filename in files:
self.logger.error(' - %s/%s', path, filename)
album = self.albums[path]
for f in album.medias:
if f.filename == filename:
self.stats[f.type + '_failed'] += 1
album.medias.remove(f)
break
self.logger.error('You can run sigal in verbose (--verbose) or debug '
'(--debug) mode to get more details.')
def process_dir(self, album, force=False):
"""Process a list of images in a directory."""
for f in album:
if isfile(f.dst_path) and not force:
self.logger.info("%s exists - skipping", f.filename)
self.stats[f.type + '_skipped'] += 1
else:
self.stats[f.type] += 1
yield (f.type, f.path, f.filename, f.src_path, album.dst_path,
self.settings)
def process_file(args):
# args => ftype, path, filename, src_path, dst_path, settings
processor = process_image if args[0] == 'image' else process_video
ret = processor(*args[3:])
# If the processor return an error (ret != 0), then we return the path and
# filename of the failed file to the parent process.
return args[1:3] if ret else None
def worker(args):
try:
return process_file(args)
except KeyboardInterrupt:
pass
| xouillet/sigal | sigal/gallery.py | Python | mit | 26,861 |
# from datetime import datetime
#
# def get_seconds():
# """Return current seconds"""
# return datetime.now().second
#
#
# get_seconds()
#
# print(get_seconds.__doc__)
# print(get_seconds.__name__)
#=========================================================
# def split_tags(tag_string):
# tag_list = []
# for tag in tag_string.split(','):
# tag_list.append(tag.strip())
# return tag_list
#
# print(split_tags('python, coursera, mooc'))
#
# def add(x:int, y:int) -> int:
# return x + y
#
# print(add(10, 11))
# print(add('still ', 'works'))
#=========================================================
# def extender(source_list, extend_list):
# source_list.extend(extend_list)
#
#
# values = [1, 2, 3]
# extender(values, [4, 5, 6])
# print(values)
#
#
# def replacer(source_tuple, replace_with):
# source_tuple = replace_with
#
#
# user_info = ('Guido', '31/01')
# replacer(user_info, ('Larry', '27/09'))
# print(user_info)
#=========================================================
name_list = ['John', 'Bill', 'Amy']
print(*name_list)
def printer(**kwargs):
print(type(kwargs))
for key, value in kwargs.items():
print('{}: {}'.format(key, value))
print(printer(a=10, b =11))
| Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/Week_2/7_functions.py | Python | apache-2.0 | 1,245 |
import sys
from setuptools import setup
options = dict(
name='igor',
version='1.1.1',
author='nvms',
packages=['igor'],
entry_points={
'console_scripts': [
'igor = igor.__main__:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha'
]
)
setup(**options)
| nvms/igor | setup.py | Python | mit | 329 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/util/memmapped_file_system.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/util/memmapped_file_system.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n0tensorflow/core/util/memmapped_file_system.proto\x12\ntensorflow\"C\n#MemmappedFileSystemDirectoryElement\x12\x0e\n\x06offset\x18\x01 \x01(\x04\x12\x0c\n\x04name\x18\x02 \x01(\t\"`\n\x1cMemmappedFileSystemDirectory\x12@\n\x07\x65lement\x18\x01 \x03(\x0b\x32/.tensorflow.MemmappedFileSystemDirectoryElementB\x03\xf8\x01\x01\x62\x06proto3')
)
_MEMMAPPEDFILESYSTEMDIRECTORYELEMENT = _descriptor.Descriptor(
name='MemmappedFileSystemDirectoryElement',
full_name='tensorflow.MemmappedFileSystemDirectoryElement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='offset', full_name='tensorflow.MemmappedFileSystemDirectoryElement.offset', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.MemmappedFileSystemDirectoryElement.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=131,
)
_MEMMAPPEDFILESYSTEMDIRECTORY = _descriptor.Descriptor(
name='MemmappedFileSystemDirectory',
full_name='tensorflow.MemmappedFileSystemDirectory',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element', full_name='tensorflow.MemmappedFileSystemDirectory.element', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=229,
)
_MEMMAPPEDFILESYSTEMDIRECTORY.fields_by_name['element'].message_type = _MEMMAPPEDFILESYSTEMDIRECTORYELEMENT
DESCRIPTOR.message_types_by_name['MemmappedFileSystemDirectoryElement'] = _MEMMAPPEDFILESYSTEMDIRECTORYELEMENT
DESCRIPTOR.message_types_by_name['MemmappedFileSystemDirectory'] = _MEMMAPPEDFILESYSTEMDIRECTORY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MemmappedFileSystemDirectoryElement = _reflection.GeneratedProtocolMessageType('MemmappedFileSystemDirectoryElement', (_message.Message,), dict(
DESCRIPTOR = _MEMMAPPEDFILESYSTEMDIRECTORYELEMENT,
__module__ = 'tensorflow.core.util.memmapped_file_system_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemmappedFileSystemDirectoryElement)
))
_sym_db.RegisterMessage(MemmappedFileSystemDirectoryElement)
MemmappedFileSystemDirectory = _reflection.GeneratedProtocolMessageType('MemmappedFileSystemDirectory', (_message.Message,), dict(
DESCRIPTOR = _MEMMAPPEDFILESYSTEMDIRECTORY,
__module__ = 'tensorflow.core.util.memmapped_file_system_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemmappedFileSystemDirectory)
))
_sym_db.RegisterMessage(MemmappedFileSystemDirectory)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
| ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/core/util/memmapped_file_system_pb2.py | Python | mit | 4,351 |
#!/usr/bin/env python
import sys
import time
import logging
import websocket
import ujson as json
import set_path
from tilde.core.settings import settings
logging.basicConfig(level=logging.INFO)
START_TIME = time.time()
class RespHandler(object):
@classmethod
def on_open(self, ws):
logging.debug("Client connected")
to_send = {'act': 'login'}
ws.send(json.dumps(to_send))
@classmethod
def on_message(self, ws, message):
logging.debug("Client got: %s" % message)
message = json.loads(message)
if message['act'] == 'login':
to_send = {'act': 'sleep', 'req': 4}
ws.send(json.dumps(to_send))
else:
logging.info("Client done in: %1.2f sc" % (time.time() - START_TIME))
ws.close()
sys.exit(0)
@classmethod
def on_error(self, ws, error):
logging.error(error)
@classmethod
def on_close(self, ws):
logging.debug("Client finished")
ws.close()
if __name__ == "__main__":
websocket.enableTrace(False)
ws = websocket.WebSocketApp("ws://localhost:%s/websocket" % settings['webport'],
on_open = RespHandler.on_open,
on_message = RespHandler.on_message,
on_error = RespHandler.on_error,
on_close = RespHandler.on_close)
ws.run_forever()
| tilde-lab/tilde | tests/berlinium/asleep_client.py | Python | mit | 1,445 |
"""
Scoring Metrics for KDD Cup 2012, Track 2
Reads in a solution/subission files
Scores on the following three metrics:
-NWMAE
-WRMSE
-AUC
Author: Ben Hamner (kdd2012@benhamner.com)
"""
def scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, elementwise_metric):
"""
Calculates an elementwise error metric
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
elementwise_metric : a function such as MSE that evaluates the error on a single instance, given the clicks, impressions, and p_ctr
Returns
-------
score : the error on the elementwise metric over the set
"""
score = 0.0
weight_sum = 0.0
for clicks, impressions, p_ctr in zip(num_clicks, num_impressions, predicted_ctr):
score += elementwise_metric(clicks, impressions, p_ctr)*impressions
weight_sum += impressions
score = score / weight_sum
return score
def scoreWRMSE(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the Weighted Root Mean Squared Error (WRMSE)
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
wrmse : the weighted root mean squared error
"""
import math
mse = lambda clicks, impressions, p_ctr: math.pow(clicks/impressions-p_ctr,2.0)
wmse = scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, mse)
wrmse = math.sqrt(wmse)
return wrmse
def scoreNWMAE(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the normalized weighted mean absolute error
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
nwmae : the normalized weighted mean absolute error
"""
mae = lambda clicks, impressions, p_ctr: abs(clicks/impressions-p_ctr)
nwmae = scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, mae)
return nwmae
def scoreClickAUC(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the area under the ROC curve (AUC) for click rates
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
auc : the area under the ROC curve (AUC) for click rates
"""
i_sorted = sorted(range(len(predicted_ctr)),key=lambda i: predicted_ctr[i],
reverse=True)
auc_temp = 0.0
click_sum = 0.0
old_click_sum = 0.0
no_click = 0.0
no_click_sum = 0.0
# treat all instances with the same predicted_ctr as coming from the
# same bucket
last_ctr = predicted_ctr[i_sorted[0]] + 1.0
#last_ctr = float("nan")
for i in range(len(predicted_ctr)):
if last_ctr != predicted_ctr[i_sorted[i]]:
auc_temp += (click_sum+old_click_sum) * no_click / 2.0
old_click_sum = click_sum
no_click = 0.0
last_ctr = predicted_ctr[i_sorted[i]]
no_click += num_impressions[i_sorted[i]] - num_clicks[i_sorted[i]]
no_click_sum += num_impressions[i_sorted[i]] - num_clicks[i_sorted[i]]
click_sum += num_clicks[i_sorted[i]]
auc_temp += (click_sum+old_click_sum) * no_click / 2.0
auc = auc_temp / (click_sum * no_click_sum)
return auc
def read_solution_file(f_sol_name):
"""
Reads in a solution file
Parameters
----------
f_sol_name : submission file name
Returns
-------
num_clicks : a list of clicks
num_impressions : a list of impressions
"""
f_sol = open(f_sol_name)
num_clicks = []
num_impressions = []
i = 0
for line in f_sol:
line = line.strip().split(",")
try:
clicks = float(line[0])
impressions = float(line[1])
except ValueError as e:
# skip over header
if(i!=0):
print("parse error at line: %d" % i)
print(e)
continue
num_clicks.append(clicks)
num_impressions.append(impressions)
i += 1
print("submission length=%d" % i)
return (num_clicks, num_impressions)
def read_submission_file(f_sub_name):
"""
Reads in a submission file
Parameters
----------
f_sub_name : submission file name
Returns
-------
predicted_ctr : a list of predicted click-through rates
"""
f_sub = open(f_sub_name)
predicted_ctr = []
for line in f_sub:
line = line.strip().split(",")
predicted_ctr.append(float(line[0]))
#predicted_ctr.append(float(line))
return predicted_ctr
def main():
import sys
if len(sys.argv) != 3:
print("Usage: python scoreKDD.py solution_file.csv submission_file.csv")
sys.exit(2)
num_clicks, num_impressions = read_solution_file(sys.argv[1])
predicted_ctr = read_submission_file(sys.argv[2])
print("num_clicks : %d" % len(num_clicks))
print("num_impressions : %d" % len(num_impressions))
print("num_predicted_ctrs: %d" % len(predicted_ctr))
auc = scoreClickAUC(num_clicks, num_impressions, predicted_ctr)
print("AUC : %f" % auc)
nwmae = scoreNWMAE(num_clicks, num_impressions, predicted_ctr)
print("NWMAE: %f" % nwmae)
wrmse = scoreWRMSE(num_clicks, num_impressions, predicted_ctr)
print("WRMSE: %f" % wrmse)
if __name__=="__main__":
main()
| naritta/hivemall | resources/examples/kddtrack2/scoreKDD.py | Python | apache-2.0 | 5,940 |
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Asynchronous queues for coroutines. These classes are very similar
to those provided in the standard library's `asyncio package
<https://docs.python.org/3/library/asyncio-queue.html>`_.
.. warning::
Unlike the standard library's `queue` module, the classes defined here
are *not* thread-safe. To use these queues from another thread,
use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
before calling any queue methods.
"""
from __future__ import absolute_import, division, print_function
import collections
import heapq
from tornado import gen, ioloop
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.locks import Event
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
class QueueEmpty(Exception):
"""Raised by `.Queue.get_nowait` when the queue has no items."""
pass
class QueueFull(Exception):
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
pass
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
if not future.done():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
class _QueueIterator(object):
def __init__(self, q):
self.q = q
def __anext__(self):
return self.q.get()
class Queue(object):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue
q = Queue(maxsize=2)
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
await gen.sleep(0.01)
finally:
q.task_done()
async def producer():
for item in range(5):
await q.put(item)
print('Put %s' % item)
async def main():
# Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
await producer() # Wait for producer to put all tasks.
await q.join() # Wait for consumer to finish all tasks.
print('Done')
IOLoop.current().run_sync(main)
.. testoutput::
Put 0
Put 1
Doing work on 0
Put 2
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
In versions of Python without native coroutines (before 3.5),
``consumer()`` could be written as::
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, maxsize=0):
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # Futures.
self._putters = collections.deque([]) # Pairs of (item, Future).
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
def empty(self):
return not self._queue
def full(self):
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future()
try:
self.put_nowait(item)
except QueueFull:
self._putters.append((item, future))
_set_timeout(future, timeout)
else:
future.set_result(None)
return future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(getter, self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.util.TimeoutError` after a timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(putter, None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
def __aiter__(self):
return _QueueIterator(self)
# These three are overridable in subclasses.
def _init(self):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self):
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self):
return '<%s at %s %s>' % (
type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, '_queue', None):
result += ' queue=%r' % self._queue
if self._getters:
result += ' getters[%s]' % len(self._getters)
if self._putters:
result += ' putters[%s]' % len(self._putters)
if self._unfinished_tasks:
result += ' tasks=%s' % self._unfinished_tasks
return result
class PriorityQueue(Queue):
"""A `.Queue` that retrieves entries in priority order, lowest first.
Entries are typically tuples like ``(priority number, data)``.
.. testcode::
from tornado.queues import PriorityQueue
q = PriorityQueue()
q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item'))
q.put((10, 'low-priority item'))
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
(0, 'high-priority item')
(1, 'medium-priority item')
(10, 'low-priority item')
"""
def _init(self):
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(Queue):
"""A `.Queue` that retrieves the most recently put items first.
.. testcode::
from tornado.queues import LifoQueue
q = LifoQueue()
q.put(3)
q.put(2)
q.put(1)
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
1
2
3
"""
def _init(self):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
| hhru/tornado | tornado/queues.py | Python | apache-2.0 | 10,980 |
#
# Copyright 2014 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Converters for producing hardware sensor data sample messages from
notification events.
"""
from oslo import messaging
from oslo_config import cfg
from ceilometer.agent import plugin_base
from ceilometer.openstack.common import log
from ceilometer import sample
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('ironic_exchange',
default='ironic',
help='Exchange name for Ironic notifications.'),
]
cfg.CONF.register_opts(OPTS)
# Map unit name to SI
UNIT_MAP = {
'Watts': 'W',
'Volts': 'V',
}
def validate_reading(data):
"""Some sensors read "Disabled"."""
return data != 'Disabled'
def transform_id(data):
return data.lower().replace(' ', '_')
def parse_reading(data):
try:
volume, unit = data.split(' ', 1)
unit = unit.rsplit(' ', 1)[-1]
return float(volume), UNIT_MAP.get(unit, unit)
except ValueError:
raise InvalidSensorData('unable to parse sensor reading: %s' %
data)
class InvalidSensorData(ValueError):
pass
class SensorNotification(plugin_base.NotificationBase):
"""A generic class for extracting samples from sensor data notifications.
A notification message can contain multiple samples from multiple
sensors, all with the same basic structure: the volume for the sample
is found as part of the value of a 'Sensor Reading' key. The unit
is in the same value.
Subclasses exist solely to allow flexibility with stevedore configuration.
"""
event_types = ['hardware.ipmi.*']
metric = None
@staticmethod
def get_targets(conf):
"""oslo.messaging.TargetS for this plugin."""
return [messaging.Target(topic=topic,
exchange=conf.ironic_exchange)
for topic in conf.notification_topics]
def _get_sample(self, message):
try:
return (payload for _, payload
in message['payload'][self.metric].items())
except KeyError:
return []
@staticmethod
def _package_payload(message, payload):
# NOTE(chdent): How much of the payload should we keep?
payload['node'] = message['payload']['node_uuid']
info = {'publisher_id': message['publisher_id'],
'timestamp': message['payload']['timestamp'],
'event_type': message['payload']['event_type'],
'user_id': message['payload'].get('user_id'),
'project_id': message['payload'].get('project_id'),
'payload': payload}
return info
def process_notification(self, message):
"""Read and process a notification.
The guts of a message are in dict value of a 'payload' key
which then itself has a payload key containing a dict of
multiple sensor readings.
If expected keys in the payload are missing or values
are not in the expected form for transformations,
KeyError and ValueError are caught and the current
sensor payload is skipped.
"""
payloads = self._get_sample(message['payload'])
for payload in payloads:
try:
# Provide a fallback resource_id in case parts are missing.
resource_id = 'missing id'
try:
resource_id = '%(nodeid)s-%(sensorid)s' % {
'nodeid': message['payload']['node_uuid'],
'sensorid': transform_id(payload['Sensor ID'])
}
except KeyError as exc:
raise InvalidSensorData('missing key in payload: %s' % exc)
info = self._package_payload(message, payload)
try:
sensor_reading = info['payload']['Sensor Reading']
except KeyError as exc:
raise InvalidSensorData(
"missing 'Sensor Reading' in payload"
)
if validate_reading(sensor_reading):
volume, unit = parse_reading(sensor_reading)
yield sample.Sample.from_notification(
name='hardware.ipmi.%s' % self.metric.lower(),
type=sample.TYPE_GAUGE,
unit=unit,
volume=volume,
resource_id=resource_id,
message=info,
user_id=info['user_id'],
project_id=info['project_id'])
except InvalidSensorData as exc:
LOG.warn(
'invalid sensor data for %(resource)s: %(error)s' %
dict(resource=resource_id, error=exc)
)
continue
class TemperatureSensorNotification(SensorNotification):
metric = 'Temperature'
class CurrentSensorNotification(SensorNotification):
metric = 'Current'
class FanSensorNotification(SensorNotification):
metric = 'Fan'
class VoltageSensorNotification(SensorNotification):
metric = 'Voltage'
| Juniper/ceilometer | ceilometer/ipmi/notifications/ironic.py | Python | apache-2.0 | 5,705 |
#!/usr/bin/python
import os
from xml.sax.saxutils import escape
def read_file(filename):
with open(filename, "r") as f:
result = f.read()
return result
def build_template(src_template_path, build_script_path, dst_template_path):
print "Rebuilding %s" % dst_template_path
template = read_file(src_template_path).split('[INSERT BUILD SCRIPT HERE]')
build_script = read_file(build_script_path)
result = template[0] + escape(build_script) + template[1]
with open(dst_template_path, "w") as f:
f.write(result)
def build_fake_template():
script_dir = os.path.dirname(os.path.realpath(__file__))
src_template_path = script_dir + "/src/FakeFrameworkTemplateInfo.plist"
build_script_path = script_dir + "/src/BuildFW.py"
dst_template_path = os.path.abspath(script_dir + "/../Fake Framework/Templates/Framework & Library/Fake Static iOS Framework.xctemplate/TemplateInfo.plist")
build_template(src_template_path, build_script_path, dst_template_path)
def build_real_template():
script_dir = os.path.dirname(os.path.realpath(__file__))
src_template_path = script_dir + "/src/RealFrameworkTemplateInfo.plist"
build_script_path = script_dir + "/src/BuildFW.py"
dst_template_path = os.path.abspath(script_dir + "/../Real Framework/Templates/Framework & Library/Static iOS Framework.xctemplate/TemplateInfo.plist")
build_template(src_template_path, build_script_path, dst_template_path)
if __name__ == "__main__":
build_fake_template()
build_real_template()
| DZamataev/TelegramAppKit | iOS-Universal-Framework/devel/build.py | Python | mit | 1,543 |
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
from __future__ import with_statement
import os
import sys
# set version numbers
stable_version = '0.2a1'
target_version = '0.2a2'
is_release = False
# check if easy_install is available
try:
# import __force_distutils__ #XXX: uncomment to force use of distutills
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
has_setuptools = False
# generate version number
if os.path.exists('mystic/info.py'):
# is a source distribution, so use existing version
#from mystic.info import this_version #FIXME?
this_version = stable_version
elif stable_version == target_version:
# we are building a stable release
this_version = stable_version
else:
# we are building a distribution
this_version = target_version + '.dev0'
if is_release:
from datetime import date
today = "".join(date.isoformat(date.today()).split('-'))
this_version += "-" + today
# get the license info
with open('LICENSE') as file:
license_text = file.read()
# generate the readme text
long_description = \
"""------------------------------------------------------
mystic: a simple model-independent inversion framework
------------------------------------------------------
The mystic framework provides a collection of optimization algorithms
and tools that allows the user to more robustly (and readily) solve
optimization problems. All optimization algorithms included in mystic
provide workflow at the fitting layer, not just access to the algorithms
as function calls. Mystic gives the user fine-grained power to both
monitor and steer optimizations as the fit processes are running.
Where possible, mystic optimizers share a common interface, and thus can
be easily swapped without the user having to write any new code. Mystic
solvers all conform to a solver API, thus also have common method calls
to configure and launch an optimization job. For more details, see
`mystic.abstract_solver`. The API also makes it easy to bind a favorite
3rd party solver into the mystic framework.
By providing a robust interface designed to allow the user to easily
configure and control solvers, mystic reduces the barrier to implementing
a target fitting problem as stable code. Thus the user can focus on
building their physical models, and not spend time hacking together an
interface to optimization code.
Mystic is in the early development stages, and any user feedback is
highly appreciated. Contact Mike McKerns [mmckerns at caltech dot edu]
with comments, suggestions, and any bugs you may find. A list of known
issues is maintained at http://dev.danse.us/trac/mystic/query.
Major Features
==============
Mystic provides a stock set of configurable, controllable solvers with::
- a common interface
- the ability to impose solver-independent bounds constraints
- the ability to apply solver-independent monitors
- the ability to configure solver-independent termination conditions
- a control handler yielding: [pause, continue, exit, and user_callback]
- ease in selecting initial conditions: [initial_guess, random]
- ease in selecting mutation strategies (for differential evolution)
To get up and running quickly, mystic also provides infrastructure to::
- easily generate a fit model (several example models are included)
- configure and auto-generate a cost function from a model
- extend fit jobs to parallel & distributed resources
- couple models with optimization parameter constraints [COMING SOON]
Current Release
===============
The latest stable release version is mystic-%(relver)s. You can download it here.
The latest stable version of mystic is always available at:
http://dev.danse.us/trac/mystic
Development Release
===================
If you like living on the edge, and don't mind the promise
of a little instability, you can get the latest development
release with all the shiny new features at:
http://dev.danse.us/packages.
Installation
============
Mystic is packaged to install from source, so you must
download the tarball, unzip, and run the installer::
[download]
$ tar -xvzf mystic-%(thisver)s.tgz
$ cd mystic-%(thisver)s
$ python setup py build
$ python setup py install
You will be warned of any missing dependencies and/or settings
after you run the "build" step above. Mystic depends on dill, numpy
and sympy, so you should install them first. There are several
functions within mystic where scipy is used if it is available;
however, scipy is an optional dependency. Having matplotlib installed
is necessary for running several of the examples, and you should
probably go get it even though it's not required. Matplotlib is
also required by mystic's "analysis viewers".
Alternately, mystic can be installed with easy_install::
[download]
$ easy_install -f . mystic
For Windows users, source code and examples are available in zip format.
A binary installer is also provided::
[download]
[double-click]
Requirements
============
Mystic requires::
- python, version >= 2.5, version < 3.0
- numpy, version >= 1.0
- sympy, version >= 0.6.7
- dill, version >= 0.2.3
- klepto, version >= 0.1.1
Optional requirements::
- setuptools, version >= 0.6
- matplotlib, version >= 0.91
- scipy, version >= 0.6.0
- pathos, version >= 0.2a1.dev0
- pyina, version >= 0.2a1.dev0
Usage Notes
===========
Probably the best way to get started is to look at a few of the
examples provided within mystic. See `mystic.examples` for a
set of scripts that demonstrate the configuration and launching of
optimization jobs for one of the sample models in `mystic.models`.
Many of the included examples are standard optimization test problems.
Instr1ctions on building a new model are in `mystic.models.abstract_model`.
Mystic provides base classes for two types of models::
- AbstractFunction [evaluates f(x) for given evaluation points x]
- AbstractModel [generates f(x,p) for given coefficients p]
It is, however, not necessary to use the base classes in your own model.
Mystic also provides some convienence functions to help you build a
model instance and a cost function instance on-the-fly. For more
information, see `mystic.mystic.forward_model`.
All mystic solvers are highly configurable, and provide a robust set of
methods to help customize the solver for your particular optimization
problem. For each solver, a minimal interface is also provided for users
who prefer to configure their solvers in a single function call. For more
information, see `mystic.mystic.abstract_solver` for the solver API, and
each of the individual solvers for their minimal (non-API compliant)
interface.
Mystic extends the solver API to parallel computing by providing a solver
class that utilizes the parallel map-reduce algorithm. Mystic includes
a set of defaults in `mystic.mystic.python_map` that mirror the behavior
of serial python and the built-in python map function. Mystic solvers
built on map-reduce can utilize the distributed and parallel tools provided
by the `pathos` package, and thus with little new code solvers are
extended to high-performance computing. For more information, see
`mystic.mystic.abstract_map_solver`, `mystic.mystic.abstract_ensemble_solver`,
and the pathos documentation at http://dev.danse.us/trac/pathos.
Important classes and functions are found here::
- mystic.mystic.solvers [solver optimization algorithms]
- mystic.mystic.termination [solver termination conditions]
- mystic.mystic.strategy [solver population mutation strategies]
- mystic.mystic.monitors [optimization monitors]
- mystic.mystic.tools [function wrappers, etc]
- mystic.mystic.forward_model [cost function generator]
- mystic.models [a collection of standard models]
- mystic.math [some mathematical functions and tools]
Solver and model API definitions are found here::
- mystic.mystic.abstract_solver [the solver API definition]
- mystic.mystic.abstract_map_solver [the parallel solver API]
- mystic.mystic.abstract_ensemble_solver [the ensemble solver API]
- mystic.models.abstract_model [the model API definition]
License
=======
Mystic is distributed under a 3-clause BSD license.
>>> import mystic
>>> print mystic.license()
Citation
========
If you use mystic to do research that leads to publication,
we ask that you acknowledge use of mystic by citing the
following in your publication::
M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
"Building a framework for predictive science", Proceedings of
the 10th Python in Science Conference, 2011;
http://arxiv.org/pdf/1202.1056
Michael McKerns, Patrick Hung, and Michael Aivazis,
"mystic: a simple model-independent inversion framework", 2009- ;
http://dev.danse.us/trac/mystic
More Information
================
Please see http://dev.danse.us/trac/mystic or http://arxiv.org/pdf/1202.1056 for further information.
""" % {'relver' : stable_version, 'thisver' : this_version}
# write readme file
with open('README', 'w') as file:
file.write(long_description)
# generate 'info' file contents
def write_info_py(filename='mystic/info.py'):
contents = """# THIS FILE GENERATED FROM SETUP.PY
this_version = '%(this_version)s'
stable_version = '%(stable_version)s'
readme = '''%(long_description)s'''
license = '''%(license_text)s'''
"""
with open(filename, 'w') as file:
file.write(contents % {'this_version' : this_version,
'stable_version' : stable_version,
'long_description' : long_description,
'license_text' : license_text })
return
# write info file
write_info_py()
# build the 'setup' call
setup_code = """
setup(name='mystic',
version='%s',
description='a simple interactive inversion analysis framework',
long_description = '''%s''',
author = 'Mike McKerns',
maintainer = 'Mike McKerns',
maintainer_email = 'mmckerns@caltech.edu',
license = 'BSD',
platforms = ['any'],
url = 'http://www.cacr.caltech.edu/~mmckerns',
classifiers = ('Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Physics Programming'),
packages = ['mystic','mystic.models','mystic.math','mystic.cache'],
package_dir = {'mystic':'mystic','mystic.models':'models',
'mystic.math':'_math','mystic.cache':'cache'},
""" % (target_version, long_description)
# add dependencies
if sys.version_info[:2] < (2.6):
numpy_version = '>=1.0, <1.8.0'
sympy_version = '>=0.6.7'#, <0.7.1'
else:
numpy_version = '>=1.0'
sympy_version = '>=0.6.7'#, <0.7.4'
dill_version = '>=0.2.3'
klepto_version = '>=0.1.1'
scipy_version = '>=0.6.0'
matplotlib_version = '>=0.91'
if has_setuptools:
setup_code += """
zip_safe=False,
dependency_links = ['http://dev.danse.us/packages/'],
install_requires = ('numpy%s', 'sympy%s', 'klepto%s', 'dill%s'),
""" % (numpy_version, sympy_version, klepto_version, dill_version)
# add the scripts, and close 'setup' call
setup_code += """
scripts=['scripts/mystic_log_reader.py',
'scripts/mystic_model_plotter.py',
'scripts/support_convergence.py',
'scripts/support_hypercube.py',
'scripts/support_hypercube_measures.py',
'scripts/support_hypercube_scenario.py'])
"""
# exec the 'setup' code
exec setup_code
# if dependencies are missing, print a warning
try:
import numpy
import sympy
import klepto
import dill
#import scipy
#import matplotlib #XXX: has issues being zip_safe
except ImportError:
print "\n***********************************************************"
print "WARNING: One of the following dependencies is unresolved:"
print " numpy %s" % numpy_version
print " sympy %s" % sympy_version
print " klepto %s" % klepto_version
print " dill %s" % dill_version
print " scipy %s (optional)" % scipy_version
print " matplotlib %s (optional)" % matplotlib_version
print "***********************************************************\n"
if __name__=='__main__':
pass
# end of file
| jcfr/mystic | setup.py | Python | bsd-3-clause | 12,858 |
# ===========================================================================
# eXe config
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
The StandAlone config overrides the Config class with Standalone specific
configuration
"""
import sys, os
from exe.engine.config import Config
from exe.engine.path import Path
# ===========================================================================
class StandaloneConfig(Config):
"""
The StandaloneConfig overrides the Config class with ready-to-run specific
configuration
"""
def _overrideDefaultVals(self):
"""
Setup with our default settings
"""
self.exePath = Path(sys.argv[0])
if self.exePath.isfile():
self.exePath = self.exePath.dirname()
exePath = self.exePath
# Override the default settings
self.webDir = exePath
self.configDir = exePath/'config'
self.localeDir = exePath/'locale'
self.stylesDir = Path(exePath/'style').abspath()
self.styles = []
self.lastDir = exePath
def _getConfigPathOptions(self):
"""
Returns the best places for a linux config file
"""
return [self.configDir/'exe.conf']
# ===========================================================================
| mclois/iteexe | exe/engine/standaloneconfig.py | Python | gpl-2.0 | 2,184 |
from qtpy.QtWidgets import QDialog
from qtpy import QtGui
from addie.utilities import load_ui
class SaveSqDialogMessageDialog(QDialog):
def __init__(self, main_window=None):
self.main_window = main_window
QDialog.__init__(self, parent=main_window)
self.ui = load_ui('save_sq_information_dialog.ui', baseinstance=self)
self.init_widgets()
def init_widgets(self):
self.ui.message_label.setPixmap(QtGui.QPixmap(":/preview/save_sq_selection_image.png"))
| neutrons/FastGR | addie/calculate_gr/save_sq_dialog_message.py | Python | mit | 504 |
import logging
import sys
import os
import unittest
class OsConfLoggingTest(unittest.TestCase):
def tearDown(self):
for key in os.environ.keys():
if key.startswith('SIPPERS_LOGGING'):
os.environ.pop(key)
def test_default_level_is_logging(self):
from sippers import logger
reload(sys.modules['sippers'])
self.assertEqual(logger.level, logging.INFO)
def test_config_level_from_environment(self):
os.environ['SIPPERS_LOGGING_LEVEL'] = 'DEBUG'
# Reload the module to recreate sippers.logger
reload(sys.modules['sippers'])
from sippers import logger
self.assertEqual(logger.level, logging.DEBUG)
def test_config_file_from_environement(self):
import tempfile
logfile = tempfile.mkstemp(prefix='sippers-test-')[1]
os.environ['SIPPERS_LOGGING_LOGFILE'] = logfile
# Reload the module to recreate sippers.logger
reload(sys.modules['sippers'])
from sippers import logger
logger.info('Foo')
with open(logfile, 'r') as f:
logcontent = f.read()
self.assertRegexpMatches(logcontent, "\[[0-9]{4}-[0-9]{2}-[0-9]{2} "
"[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}\] INFO .*: Foo")
| gisce/sippers | tests/tests_logging.py | Python | gpl-3.0 | 1,270 |
from flask_webapi import status
from unittest import TestCase
class TestStatus(TestCase):
def test_is_informational(self):
self.assertFalse(status.is_informational(99))
self.assertFalse(status.is_informational(200))
for i in range(100, 199):
self.assertTrue(status.is_informational(i))
def test_is_success(self):
self.assertFalse(status.is_success(199))
self.assertFalse(status.is_success(300))
for i in range(200, 299):
self.assertTrue(status.is_success(i))
def test_is_redirect(self):
self.assertFalse(status.is_redirect(299))
self.assertFalse(status.is_redirect(400))
for i in range(300, 399):
self.assertTrue(status.is_redirect(i))
def test_is_client_error(self):
self.assertFalse(status.is_client_error(399))
self.assertFalse(status.is_client_error(500))
for i in range(400, 499):
self.assertTrue(status.is_client_error(i))
def test_is_server_error(self):
self.assertFalse(status.is_server_error(499))
self.assertFalse(status.is_server_error(600))
for i in range(500, 599):
self.assertTrue(status.is_server_error(i))
| viniciuschiele/flask-webapi | tests/test_status.py | Python | mit | 1,233 |
# -*- code: utf-8 -*-
from __future__ import unicode_literals
from . import index # noqa pyflakes:ignore
from . import channel # noqa pyflakes:ignore
from . import article # noqa pyflakes:ignore
from . import account # noqa pyflakes:ignore
__all__ = ['index', 'channel', 'article', 'account']
| qisanstudio/qsapp-riitc | src/riitc/views/__init__.py | Python | mit | 305 |
import os
import sys
import json
import socket
import logging
import traceback
from datetime import datetime
import amqp
import redis
from kuyruk import signals
CONFIG = {
"REDIS_HOST": "localhost",
"REDIS_PORT": 6379,
"REDIS_DB": 0,
"REDIS_PASSWORD": None,
}
REDIS_KEY = "failed_tasks"
logger = logging.getLogger(__name__)
class Requeue(object):
def __init__(self, kuyruk):
self.kuyruk = kuyruk
self.kuyruk.extensions["requeue"] = self
self.redis = redis.StrictRedis(
host=kuyruk.config.REDIS_HOST,
port=kuyruk.config.REDIS_PORT,
db=kuyruk.config.REDIS_DB,
password=kuyruk.config.REDIS_PASSWORD)
if "sentry" in kuyruk.extensions:
sig = kuyruk.extensions["sentry"].on_exception
else:
sig = signals.worker_failure
sig.connect(self._handle_failure, sender=kuyruk, weak=False)
def _handle_failure(self, sender, description=None, task=None,
args=None, kwargs=None, exc_info=None, worker=None,
queue=None, **extra):
failure = {
"description": description,
"queue": queue,
"worker_hostname": socket.gethostname(),
"worker_pid": os.getpid(),
"worker_cmd": ' '.join(sys.argv),
"worker_timestamp": datetime.utcnow().isoformat()[:19],
"exception_traceback": traceback.format_exception(*exc_info),
"exception_type": "%s.%s" % (
exc_info[0].__module__, exc_info[0].__name__),
}
self.redis.hset(REDIS_KEY, description['id'], json.dumps(failure))
def requeue_failed_tasks(self):
tasks = self.redis.hvals(REDIS_KEY)
with self.kuyruk.channel() as channel:
for task in tasks:
task = task.decode('utf-8')
task = json.loads(task)
logger.info("Requeueing task: %r", task)
self.requeue_task(task, channel=channel)
logger.info("%i failed tasks have been requeued.", len(tasks))
def requeue_task(self, failed, channel=None):
if channel:
_requeue_failed_task(failed, channel, self.redis)
else:
with self.kuyruk.channel() as channel:
_requeue_failed_task(failed, channel, self.redis)
def _requeue_failed_task(failed, channel, redis):
description = failed['description']
queue_name = failed['queue']
count = description.get('requeue_count', 0)
description['requeue_count'] = count + 1
body = json.dumps(description)
msg = amqp.Message(body=body)
channel.queue_declare(queue_name, durable=True, auto_delete=False)
channel.basic_publish(msg, exchange="", routing_key=queue_name)
redis.hdel(REDIS_KEY, description['id'])
def requeue(kuyruk, args):
r = Requeue(kuyruk)
r.requeue_failed_tasks()
help_text = "requeue failed tasks"
command = (requeue, help_text, None)
| cenkalti/kuyruk-requeue | kuyruk_requeue.py | Python | mit | 3,030 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.