code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""Post data to S3."""
import boto3
import os
import pickle
from src.config import CACHE_PATH, FINDINGS_S3_BUCKET
from src.single_layer_network import list_findings
from src.training_data import load_all_training_tiles, tag_with_locations
from src.training_visualization import render_results_for_analysis
def post_findings_to_s3(raster_data_paths, model, training_info, bands, render_results):
"""Aggregate findings from all NAIPs into a pickled list, post to S3."""
findings = []
for path in raster_data_paths:
labels, images = load_all_training_tiles(path, bands)
if len(labels) == 0 or len(images) == 0:
print("WARNING, there is a borked naip image file")
continue
false_positives, fp_images = list_findings(labels, images, model)
path_parts = path.split('/')
filename = path_parts[len(path_parts) - 1]
print("FINDINGS: {} false pos of {} tiles, from {}".format(
len(false_positives), len(images), filename))
if render_results:
# render JPEGs showing findings
render_results_for_analysis([path], false_positives, fp_images, training_info['bands'],
training_info['tile_size'])
# combine findings for all NAIP images analyzedfor the region
[findings.append(f) for f in tag_with_locations(fp_images, false_positives,
training_info['tile_size'],
training_info['naip_state'])]
# dump combined findings to disk as a pickle
try:
os.mkdir(CACHE_PATH + training_info['naip_state'])
except:
pass
naip_path_in_cache_dir = training_info['naip_state'] + '/' + 'findings.pickle'
local_path = CACHE_PATH + naip_path_in_cache_dir
with open(local_path, 'w') as outfile:
pickle.dump(findings, outfile)
# push pickle to S3
s3_client = boto3.client('s3')
s3_client.upload_file(local_path, FINDINGS_S3_BUCKET, naip_path_in_cache_dir)
|
andrewljohnson/Deep-Trails
|
src/s3_client_deeposm.py
|
Python
|
mit
| 2,080
|
from django.db import models
from datetime import datetime
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.contrib.auth.models import User
import urllib, hashlib, binascii
class Message(models.Model):
user = models.CharField(max_length=200)
message = models.TextField(max_length=200)
time = models.DateTimeField(auto_now_add=True)
gravatar = models.CharField(max_length=300)
def __unicode__(self):
return self.user
# def save(self):
# if self.time == None:
# self.time = datetime.now()
# super(Message, self).save()
def generate_avatar(email):
a = "http://www.gravatar.com/avatar/"
a+=hashlib.md5(email.lower()).hexdigest()
a+='?d=identicon'
return a
def hash_username(username):
a = binascii.crc32(username)
return a
class ChatUser(models.Model):
user = models.OneToOneField(User)
userID = models.IntegerField()
username = models.CharField(max_length=300)
is_chat_user = models.BooleanField(default=False)
gravatar_url = models.CharField(max_length=300)
last_accessed = models.DateTimeField(auto_now_add=True)
User.profile = property(lambda u: ChatUser.objects.get_or_create(user=u,defaults={'gravatar_url':generate_avatar(u.email),'username':u.username,'userID':hash_username(u.username)})[0])
|
bardia-heydarinejad/Graph
|
chat/models.py
|
Python
|
mit
| 1,269
|
# -*- coding: utf-8 -*-
import abc
class walker(object):
"""Basic API specification of the walker object.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Instantiate a walker object.
"""
return None
@abc.abstractmethod
def get_position(self):
"""Return the postiton of the walker.
The return array is provided in the format [x1, y1, z1, x2, ...].
Returns
---------
numpy.ndarray
A one-dimensional array of the current coordinates.
"""
return None
@abc.abstractmethod
def set_position(self, config):
"""Set the configuration of the system.
Takes an input array in the form of [x1, y1, z1, x2,..].
Parameters
--------------
configuration : numpy.ndarray
A one-dimentional numpy array of system coordinates.
"""
return None
@abc.abstractmethod
def get_velocity(self):
"""Return the velocities of the system.
The velocities are returned in a format that matches the getConfig() routine, specifically [v_x1, v_y1, v_z1, v_x2, v_y2, v_z2, ...] where v_x1 represents the x component of the velocity on the first particle, etc.
Returns
------------
numpy.ndarray
A one dimensional numpy array of the current velocities.
"""
return None
@abc.abstractmethod
def set_velocity(self, velocity):
"""Set the velocities of the system.
Takes a one dimensional array of the velocities in the format [v_x1, v_y1, v_z1, v_x2, v_y2, v_z2, ...] where v_x1 represents the x component of the velocity on the first particle, etc.
Parameters
-------------
velocity : numpy.ndarray
A one-dimentional numpy array of velocities.
"""
return None
@abc.abstractmethod
def draw_velocity(self, distType='uniform'):
"""Draw a new value of the velocities.
Redraws velocities from a specified distribtion.
Parameters
------------
distType : string, optional
Specifies the type of distribution from which to draw the velocities. Currently supports 'uniform' and 'gaussian'.
"""
return None
@abc.abstractmethod
def reverse_velocity(self, multFactor=-1.0):
"""Reverse the velocity of the walker.
Sets the velocity to multFactor * vel.
Parameters
------------
multFactor : float
Factor to scale the velocities. Takes -1.0 as default.
"""
return None
@abc.abstractmethod
def equilibrate(self, center, restraint, numSteps):
"""
"""
return None
@abc.abstractmethod
def get_colvars(self):
"""Return the location of the walker in the collective variable space.
Returns
---------
numpy.ndarray
A one-dimensional numpy array of the current collective variables.
"""
return None
@abc.abstractmethod
def add_colvars(self):
"""
"""
return None
@abc.abstractmethod
def propagate(self, nSteps):
"""Integrate the dynamics of the model forward in time.
Parameters
-----------
nSteps : int
The number of time steps to integrate forward in time.
"""
return None
@abc.abstractmethod
def close(self):
"""Destroy the walker.
"""
return None
@abc.abstractmethod
def set_temperature(self, temp):
"""Set the temperature of the system.
"""
return None
@abc.abstractmethod
def set_timestep(self, timestep):
"""Set the number of timesteps.
"""
return None
@abc.abstractmethod
def get_time(self):
"""Return the time in number of model timesteps.
"""
return None
@abc.abstractmethod
def set_time(self, time):
"""Set the time in number of model timesteps.
"""
return None
|
jtempkin/enhanced-sampling-toolkit
|
est/walker/walker_base.py
|
Python
|
mit
| 4,100
|
#!/usr/bin/env python
# Copyright (c) 2014-2018 Florian Brucker (www.florianbrucker.de)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import errno
import logging
from logging.handlers import SysLogHandler
import os
import os.path
import signal
import socket
import sys
import threading
import time
from daemon import DaemonContext
from pid import PidFile
import setproctitle
__version__ = '0.6.0'
__all__ = ['find_syslog', 'Service']
# Custom log level below logging.DEBUG for logging internal debug
# messages.
SERVICE_DEBUG = logging.DEBUG - 1
def _detach_process():
"""
Detach daemon process.
Forks the current process into a parent and a detached child. The
child process resides in its own process group, has no controlling
terminal attached and is cleaned up by the init process.
Returns ``True`` for the parent and ``False`` for the child.
"""
# To detach from our process group we need to call ``setsid``. We
# can only do that if we aren't a process group leader. Therefore
# we fork once, which makes sure that the new child process is not
# a process group leader.
pid = os.fork()
if pid > 0:
# Parent process
# Use waitpid to "collect" the child process and avoid Zombies
os.waitpid(pid, 0)
return True
os.setsid()
# We now fork a second time and let the second's fork parent exit.
# This makes the second fork's child process an orphan. Orphans are
# cleaned up by the init process, so we won't end up with a zombie.
# In addition, the second fork's child is no longer a session
# leader and can therefore never acquire a controlling terminal.
pid = os.fork()
if pid > 0:
os._exit(os.EX_OK)
return False
class _PIDFile(object):
"""
A lock file that stores the PID of the owning process.
The PID is stored when the lock is acquired, not when it is created.
"""
def __init__(self, path):
self._path = path
self._lock = None
def _make_lock(self):
directory, filename = os.path.split(self._path)
return PidFile(filename,
directory,
register_term_signal_handler=False,
register_atexit=False)
def acquire(self):
self._make_lock().create()
def release(self):
self._make_lock().close()
try:
os.remove(self._path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def read_pid(self):
"""
Return the PID of the process owning the lock.
Returns ``None`` if no lock is present.
"""
try:
with open(self._path, 'r') as f:
s = f.read().strip()
if not s:
return None
return int(s)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise
def find_syslog():
"""
Find Syslog.
Returns Syslog's location on the current system in a form that can
be passed on to :py:class:`logging.handlers.SysLogHandler`::
handler = SysLogHandler(address=find_syslog(),
facility=SysLogHandler.LOG_DAEMON)
"""
for path in ['/dev/log', '/var/run/syslog']:
if os.path.exists(path):
return path
return ('127.0.0.1', 514)
def _block(predicate, timeout):
"""
Block until a predicate becomes true.
``predicate`` is a function taking no arguments. The call to
``_block`` blocks until ``predicate`` returns a true value. This
is done by polling ``predicate``.
``timeout`` is either ``True`` (block indefinitely) or a timeout
in seconds.
The return value is the value of the predicate after the
timeout.
"""
if timeout:
if timeout is True:
timeout = float('Inf')
timeout = time.time() + timeout
while not predicate() and time.time() < timeout:
time.sleep(0.1)
return predicate()
class Service(object):
"""
A background service.
This class provides the basic framework for running and controlling
a background daemon. This includes methods for starting the daemon
(including things like proper setup of a detached deamon process),
checking whether the daemon is running, asking the daemon to
terminate and for killing the daemon should that become necessary.
.. py:attribute:: logger
A :py:class:`logging.Logger` instance.
.. py:attribute:: files_preserve
A list of file handles that should be preserved by the daemon
process. File handles of built-in Python logging handlers
attached to :py:attr:`logger` are automatically preserved.
"""
def __init__(self, name, pid_dir='/var/run', signals=None):
"""
Constructor.
``name`` is a string that identifies the daemon. The name is
used for the name of the daemon process, the PID file and for
the messages to syslog.
``pid_dir`` is the directory in which the PID file is stored.
``signals`` list of operating signals, that should be available
for use with :py:meth:`.send_signal`, :py:meth:`.got_signal`,
:py:meth:`.wait_for_signal`, and :py:meth:`.check_signal`. Note
that SIGTERM is always supported, and that SIGTTIN, SIGTTOU, and
SIGTSTP are never supported.
"""
self.name = name
self.pid_file = _PIDFile(os.path.join(pid_dir, name + '.pid'))
self._signal_events = {int(s): threading.Event()
for s in ((signals or []) + [signal.SIGTERM])}
self.logger = logging.getLogger(name)
if not self.logger.handlers:
self.logger.addHandler(logging.NullHandler())
self.files_preserve = []
def _debug(self, msg):
"""
Log an internal debug message.
Logs a debug message with the :py:data:SERVICE_DEBUG logging
level.
"""
self.logger.log(SERVICE_DEBUG, msg)
def _get_logger_file_handles(self):
"""
Find the file handles used by our logger's handlers.
"""
handles = []
for handler in self.logger.handlers:
# The following code works for logging's SysLogHandler,
# StreamHandler, SocketHandler, and their subclasses.
for attr in ['sock', 'socket', 'stream']:
try:
handle = getattr(handler, attr)
if handle:
handles.append(handle)
break
except AttributeError:
continue
return handles
def is_running(self):
"""
Check if the daemon is running.
"""
pid = self.get_pid()
if pid is None:
return False
# The PID file may still exist even if the daemon isn't running,
# for example if it has crashed.
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
# In this case the PID file shouldn't have existed in
# the first place, so we remove it
self.pid_file.release()
return False
# We may also get an exception if we're not allowed to use
# kill on the process, but that means that the process does
# exist, which is all we care about here.
return True
def get_pid(self):
"""
Get PID of daemon process or ``None`` if daemon is not running.
"""
return self.pid_file.read_pid()
def _get_signal_event(self, s):
'''
Get the event for a signal.
Checks if the signal has been enabled and raises a
``ValueError`` if not.
'''
try:
return self._signal_events[int(s)]
except KeyError:
raise ValueError('Signal {} has not been enabled'.format(s))
def send_signal(self, s):
"""
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
"""
self._get_signal_event(s) # Check if signal has been enabled
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
os.kill(pid, s)
def got_signal(self, s):
"""
Check if a signal was received.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
Returns ``True`` if the daemon process has received the signal
(for example because :py:meth:`stop` was called in case of
SIGTERM, or because :py:meth:`send_signal` was used) and
``False`` otherwise.
.. note::
This function always returns ``False`` for enabled signals
when it is not called from the daemon process.
"""
return self._get_signal_event(s).is_set()
def clear_signal(self, s):
"""
Clears the state of a signal.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
"""
self._get_signal_event(s).clear()
def wait_for_signal(self, s, timeout=None):
"""
Wait until a signal has been received.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
This function blocks until the daemon process has received the
signal (for example because :py:meth:`stop` was called in case
of SIGTERM, or because :py:meth:`send_signal` was used).
If ``timeout`` is given and not ``None`` it specifies a timeout
for the block.
The return value is ``True`` if the signal was received and
``False`` otherwise (the latter occurs if a timeout was given
and the signal was not received).
.. warning::
This function blocks indefinitely (or until the given
timeout) for enabled signals when it is not called from the
daemon process.
"""
return self._get_signal_event(s).wait(timeout)
def got_sigterm(self):
"""
Check if SIGTERM signal was received.
Returns ``True`` if the daemon process has received the SIGTERM
signal (for example because :py:meth:`stop` was called).
.. note::
This function always returns ``False`` when it is not called
from the daemon process.
"""
return self.got_signal(signal.SIGTERM)
def wait_for_sigterm(self, timeout=None):
"""
Wait until a SIGTERM signal has been received.
This function blocks until the daemon process has received the
SIGTERM signal (for example because :py:meth:`stop` was called).
If ``timeout`` is given and not ``None`` it specifies a timeout
for the block.
The return value is ``True`` if SIGTERM was received and
``False`` otherwise (the latter only occurs if a timeout was
given and the signal was not received).
.. warning::
This function blocks indefinitely (or until the given
timeout) when it is not called from the daemon process.
"""
return self.wait_for_signal(signal.SIGTERM, timeout)
def stop(self, block=False):
"""
Tell the daemon process to stop.
Sends the SIGTERM signal to the daemon process, requesting it
to terminate.
If ``block`` is true then the call blocks until the daemon
process has exited. This may take some time since the daemon
process will complete its on-going backup activities before
shutting down. ``block`` can either be ``True`` (in which case
it blocks indefinitely) or a timeout in seconds.
The return value is ``True`` if the daemon process has been
stopped and ``False`` otherwise.
.. versionadded:: 0.3
The ``block`` parameter
"""
self.send_signal(signal.SIGTERM)
return _block(lambda: not self.is_running(), block)
def kill(self, block=False):
"""
Kill the daemon process.
Sends the SIGKILL signal to the daemon process, killing it. You
probably want to try :py:meth:`stop` first.
If ``block`` is true then the call blocks until the daemon
process has exited. ``block`` can either be ``True`` (in which
case it blocks indefinitely) or a timeout in seconds.
Returns ``True`` if the daemon process has (already) exited and
``False`` otherwise.
The PID file is always removed, whether the process has already
exited or not. Note that this means that subsequent calls to
:py:meth:`is_running` and :py:meth:`get_pid` will behave as if
the process has exited. If you need to be sure that the process
has already exited, set ``block`` to ``True``.
.. versionadded:: 0.5.1
The ``block`` parameter
"""
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
try:
os.kill(pid, signal.SIGKILL)
return _block(lambda: not self.is_running(), block)
except OSError as e:
if e.errno == errno.ESRCH:
raise ValueError('Daemon is not running.')
raise
finally:
self.pid_file.release()
def start(self, block=False):
"""
Start the daemon process.
The daemon process is started in the background and the calling
process returns.
Once the daemon process is initialized it calls the
:py:meth:`run` method.
If ``block`` is true then the call blocks until the daemon
process has started. ``block`` can either be ``True`` (in which
case it blocks indefinitely) or a timeout in seconds.
The return value is ``True`` if the daemon process has been
started and ``False`` otherwise.
.. versionadded:: 0.3
The ``block`` parameter
"""
pid = self.get_pid()
if pid:
raise ValueError('Daemon is already running at PID %d.' % pid)
# The default is to place the PID file into ``/var/run``. This
# requires root privileges. Since not having these is a common
# problem we check a priori whether we can create the lock file.
try:
self.pid_file.acquire()
finally:
self.pid_file.release()
# Clear previously received SIGTERMs. This must be done before
# the calling process returns so that the calling process can
# call ``stop`` directly after ``start`` returns without the
# signal being lost.
self.clear_signal(signal.SIGTERM)
if _detach_process():
# Calling process returns
return _block(lambda: self.is_running(), block)
# Daemon process continues here
self._debug('Daemon has detached')
def on_signal(s, frame):
self._debug('Received signal {}'.format(s))
self._signal_events[int(s)].set()
def runner():
try:
# We acquire the PID as late as possible, since its
# existence is used to verify whether the service
# is running.
self.pid_file.acquire()
self._debug('PID file has been acquired')
self._debug('Calling `run`')
self.run()
self._debug('`run` returned without exception')
except Exception as e:
self.logger.exception(e)
except SystemExit:
self._debug('`run` called `sys.exit`')
try:
self.pid_file.release()
self._debug('PID file has been released')
except Exception as e:
self.logger.exception(e)
os._exit(os.EX_OK) # FIXME: This seems redundant
try:
setproctitle.setproctitle(self.name)
self._debug('Process title has been set')
files_preserve = (self.files_preserve +
self._get_logger_file_handles())
signal_map = {s: on_signal for s in self._signal_events}
signal_map.update({
signal.SIGTTIN: None,
signal.SIGTTOU: None,
signal.SIGTSTP: None,
})
with DaemonContext(
detach_process=False,
signal_map=signal_map,
files_preserve=files_preserve):
self._debug('Daemon context has been established')
# Python's signal handling mechanism only forwards signals to
# the main thread and only when that thread is doing something
# (e.g. not when it's waiting for a lock, etc.). If we use the
# main thread for the ``run`` method this means that we cannot
# use the synchronization devices from ``threading`` for
# communicating the reception of SIGTERM to ``run``. Hence we
# use a separate thread for ``run`` and make sure that the
# main loop receives signals. See
# https://bugs.python.org/issue1167930
thread = threading.Thread(target=runner)
thread.start()
while thread.is_alive():
time.sleep(1)
except Exception as e:
self.logger.exception(e)
# We need to shutdown the daemon process at this point, because
# otherwise it will continue executing from after the original
# call to ``start``.
os._exit(os.EX_OK)
def run(self):
"""
Main daemon method.
This method is called once the daemon is initialized and
running. Subclasses should override this method and provide the
implementation of the daemon's functionality. The default
implementation does nothing and immediately returns.
Once this method returns the daemon process automatically exits.
Typical implementations therefore contain some kind of loop.
The daemon may also be terminated by sending it the SIGTERM
signal, in which case :py:meth:`run` should terminate after
performing any necessary clean up routines. You can use
:py:meth:`got_sigterm` and :py:meth:`wait_for_sigterm` to
check whether SIGTERM has been received.
"""
pass
|
torfuspolymorphus/service
|
src/service/__init__.py
|
Python
|
mit
| 20,051
|
__author__ = 'katharine'
class ToolError(Exception):
pass
class MissingSDK(ToolError):
pass
class MissingEmulatorError(MissingSDK):
pass
class BuildError(ToolError):
pass
class PebbleProjectException(ToolError):
pass
class InvalidProjectException(PebbleProjectException):
pass
class InvalidJSONException(PebbleProjectException):
pass
class OutdatedProjectException(PebbleProjectException):
pass
class SDKInstallError(ToolError):
pass
|
gregoiresage/pebble-tool
|
pebble_tool/exceptions.py
|
Python
|
mit
| 487
|
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.jit.backend.llsupport.descr import CallDescr
class UnsupportedKind(Exception):
pass
def get_call_descr_dynamic(cpu, cif_description, extrainfo):
"""Get a call descr from the given CIF_DESCRIPTION"""
ffi_result = cif_description.rtype
try:
reskind = get_ffi_type_kind(cpu, ffi_result)
argkinds = [get_ffi_type_kind(cpu, cif_description.atypes[i])
for i in range(cif_description.nargs)]
except UnsupportedKind:
return None
if reskind == 'v':
result_size = 0
else:
result_size = intmask(ffi_result.c_size)
argkinds = ''.join(argkinds)
return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result),
result_size, extrainfo, ffi_flags=cif_description.abi)
def get_ffi_type_kind(cpu, ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
if ((not cpu.supports_floats and kind == 'f') or
(not cpu.supports_longlong and kind == 'L') or
(not cpu.supports_singlefloats and kind == 'S') or
kind == '*' or kind == '?'):
raise UnsupportedKind("Unsupported kind '%s'" % kind)
if kind == 'u':
kind = 'i'
return kind
def is_ffi_type_signed(ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
return kind != 'u'
@specialize.memo()
def _get_ffi2descr_dict(cpu):
def entry(letter, TYPE):
return (letter, cpu.arraydescrof(rffi.CArray(TYPE)), rffi.sizeof(TYPE))
#
d = {('v', 0): ('v', None, 1)}
if cpu.supports_floats:
d[('f', 0)] = entry('f', lltype.Float)
if cpu.supports_singlefloats:
d[('S', 0)] = entry('i', lltype.SingleFloat)
for SIGNED_TYPE in [rffi.SIGNEDCHAR,
rffi.SHORT,
rffi.INT,
rffi.LONG,
rffi.LONGLONG]:
key = ('i', rffi.sizeof(SIGNED_TYPE))
kind = 'i'
if key[1] > rffi.sizeof(lltype.Signed):
if not cpu.supports_longlong:
continue
key = ('L', 0)
kind = 'f'
d[key] = entry(kind, SIGNED_TYPE)
for UNSIGNED_TYPE in [rffi.UCHAR,
rffi.USHORT,
rffi.UINT,
rffi.ULONG,
rffi.ULONGLONG]:
key = ('u', rffi.sizeof(UNSIGNED_TYPE))
if key[1] > rffi.sizeof(lltype.Signed):
continue
d[key] = entry('i', UNSIGNED_TYPE)
return d
def get_arg_descr(cpu, ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
if kind == 'i' or kind == 'u':
size = rffi.getintfield(ffi_type, 'c_size')
else:
size = 0
return _get_ffi2descr_dict(cpu)[kind, size]
def calldescr_dynamic_for_tests(cpu, atypes, rtype, abiname='FFI_DEFAULT_ABI'):
from rpython.rlib import clibffi
from rpython.rlib.jit_libffi import CIF_DESCRIPTION, FFI_TYPE_PP
from rpython.jit.codewriter.effectinfo import EffectInfo
#
p = lltype.malloc(CIF_DESCRIPTION, len(atypes),
flavor='raw', immortal=True)
p.abi = getattr(clibffi, abiname)
p.nargs = len(atypes)
p.rtype = rtype
p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes),
flavor='raw', immortal=True)
for i in range(len(atypes)):
p.atypes[i] = atypes[i]
return cpu.calldescrof_dynamic(p, EffectInfo.MOST_GENERAL)
|
oblique-labs/pyVM
|
rpython/jit/backend/llsupport/ffisupport.py
|
Python
|
mit
| 3,664
|
"""
This module is used to fake the original hyperion functions
Created on 27.11.2014
@author: Fabian Hertwig
"""
import imp
import json_client
ledCount = 0
# the data as set in the hypercon application
horizontal = 0
vertical = 0
first_led_offset = 0
clockwise_direction = False
corner_leds = False
# the dictionary the hyperion effect will access
args = {}
_ledData = None
_abort = False
""" helper functions """
def init(horizontal_led_num, vertical_led_num, first_led_offset_num, leds_in_clockwise_direction, has_corner_leds):
"""
Initialise the fake hyperion configuration. The values should be identical to your hyperion configuration.
:param horizontal_led_num: the number of your horizontal leds
:param vertical_led_num: the number of your vertical leds
:param first_led_offset_num: the offset value
:param leds_in_clockwise_direction: boolean: are your leds set up clockwise or not
:param has_corner_leds: boolean: are there corner leds
"""
global ledCount, _ledData, horizontal, vertical, first_led_offset, clockwise_direction, corner_leds
ledCount = (2 * horizontal_led_num) + (2 * vertical_led_num)
horizontal = horizontal_led_num
vertical = vertical_led_num
first_led_offset = first_led_offset_num
clockwise_direction = leds_in_clockwise_direction
corner_leds = has_corner_leds
_ledData = bytearray()
for x in range(ledCount * 3):
_ledData.append(0)
def set_abort(abort_hyperion):
global _abort
_abort = abort_hyperion
def get_led_data():
led_data_copy = bytearray()
if _ledData:
imp.acquire_lock()
led_data_copy = bytearray(_ledData)
imp.release_lock()
return led_data_copy
""" fake hyperion functions """
def abort():
return _abort
def set_color(led_data):
global _ledData
imp.acquire_lock()
_ledData = bytearray(led_data)
imp.release_lock()
json_client.send_led_data(led_data)
def setColor(led_data):
set_color(led_data)
""" cant overload functions in python """
# def setColor(red, green, blue):
# acquire_lock()
# for i in range(len(_ledData) / 3):
# _ledData[3*i] = red
# _ledData[3*i + 1] = green
# _ledData[3*i + 2] = blue
# release_lock()
|
siam28/hyperion_effects_dev_kit
|
hyperion.py
|
Python
|
mit
| 2,270
|
#!/usr/bin/env python
#
# Sfml
#
# The sfml installer.
#
# Author O Wasalski - 07/06/2012 <wasalski@berkeley.edu> : First revision, new file
# Author P G Jones - 23/06/2012 <p.g.jones@qmul.ac.uk> : Refactor of Package Structure
# Author P G Jones - 22/09/2012 <p.g.jones@qmul.ac.uk> : Major refactor of snoing.
####################################################################################################
import localpackage
import installmode
import os
class Sfml(localpackage.LocalPackage):
""" Base sfml installer package."""
def __init__(self, name, system, tar_name):
""" Initialise sfml with the tar_name."""
super(Sfml, self).__init__(name, system)
self._tar_name = tar_name
self.set_install_mode(installmode.Graphical) # Only graphical installation
def get_dependencies(self):
""" Return the required dependencies."""
return ["cmake", "pthread", "opengl", "xlib", "xrandr", "freetype", "glew", "glut", "jpeg",
"sndfile", "openal"]
def _is_downloaded(self):
""" Has the tar file been downloaded."""
return self._system.file_exists(self._tar_name)
def _is_installed(self):
""" Has sfml been installed."""
lib_dir = os.path.join(self.get_install_path(), "lib")
libs = ["audio", "graphics", "network", "system", "window"]
installed = True
for lib in libs:
installed = installed and self._system.library_exists("libsfml-%s" % lib, lib_dir)
return installed
def _download(self):
""" Download the tar file."""
self._system.download_file(
"https://github.com/LaurentGomila/SFML/tarball/" + self._tar_name)
def _install(self):
""" Install sfml."""
self._system.untar_file(self._tar_name, self.get_install_path(), 1)
cmake_command = "cmake"
if self._dependency_paths["cmake"] is not None: # Special cmake installed
cmake_command = "%s/bin/cmake" % self._dependency_paths["cmake"]
self._system.execute_command(cmake_command, ["-DCMAKE_INSTALL_PREFIX:PATH=$PWD"],
cwd=self.get_install_path())
self._system.execute_command("make", cwd=self.get_install_path())
|
mjmottram/snoing
|
packages/sfml.py
|
Python
|
mit
| 2,259
|
# coding=utf-8
from decimal import Decimal
from .. import BaseProvider
localized = True
class Provider(BaseProvider):
"""
land_coords data extracted from geonames.org, under the Creative Commons Attribution 3.0 License.
Coordinates are in decimal format for mapping purposes.
Country code is in Alpha 2 format (https://www.nationsonline.org/oneworld/country_code_list.htm).
Timezones are canonical (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
"""
land_coords = (
("42.50729", "1.53414", "les Escaldes", "AD", "Europe/Andorra"),
("36.21544", "65.93249", "Sar-e Pul", "AF", "Asia/Kabul"),
("40.49748", "44.7662", "Hrazdan", "AM", "Asia/Yerevan"),
("-11.78333", "19.91667", "Luena", "AO", "Africa/Luanda"),
("-37.32167", "-59.13316", "Tandil", "AR", "America/Argentina/Buenos_Aires"),
("-34.74785", "-58.70072", "Pontevedra", "AR", "America/Argentina/Buenos_Aires"),
("-34.64966", "-58.38341", "Barracas", "AR", "America/Argentina/Buenos_Aires"),
("-54.8", "-68.3", "Ushuaia", "AR", "America/Argentina/Ushuaia"),
("-31.25033", "-61.4867", "Rafaela", "AR", "America/Argentina/Cordoba"),
("-31.4488", "-60.93173", "Esperanza", "AR", "America/Argentina/Cordoba"),
("-34.64167", "-60.47389", "Chacabuco", "AR", "America/Argentina/Buenos_Aires"),
("-27.4338", "-65.61427", "Aguilares", "AR", "America/Argentina/Tucuman"),
("47.05", "15.46667", "Sankt Peter", "AT", "Europe/Vienna"),
("48.25", "16.4", "Floridsdorf", "AT", "Europe/Vienna"),
("-31.95224", "115.8614", "Perth", "AU", "Australia/Perth"),
("-37.9", "145.18333", "Wheelers Hill", "AU", "Australia/Melbourne"),
("-33.88096", "151.07986", "Strathfield", "AU", "Australia/Sydney"),
("-34.88422", "150.60036", "Nowra", "AU", "Australia/Sydney"),
("-25.54073", "152.70493", "Maryborough", "AU", "Australia/Brisbane"),
("-34.28853", "146.05093", "Griffith", "AU", "Australia/Sydney"),
("-33.79176", "151.08057", "Eastwood", "AU", "Australia/Sydney"),
("-37.88333", "145.06667", "Carnegie", "AU", "Australia/Melbourne"),
("-33.75881", "150.99292", "Baulkham Hills", "AU", "Australia/Sydney"),
("-27.50578", "153.10236", "Carindale", "AU", "Australia/Brisbane"),
("-32.05251", "115.88782", "Willetton", "AU", "Australia/Perth"),
("-38.16604", "145.13643", "Frankston South", "AU", "Australia/Melbourne"),
("38.45598", "48.87498", "Astara", "AZ", "Asia/Baku"),
("41.09246", "45.36561", "Qazax", "AZ", "Asia/Baku"),
("44.75874", "19.21437", "Bijeljina", "BA", "Europe/Sarajevo"),
("23.9028", "89.11943", "Kushtia", "BD", "Asia/Dhaka"),
("22.83957", "91.84128", "Manikchari", "BD", "Asia/Dhaka"),
("50.8", "3.16667", "Wevelgem", "BE", "Europe/Brussels"),
("51.12794", "4.21372", "Temse", "BE", "Europe/Brussels"),
("50.71229", "4.52529", "Rixensart", "BE", "Europe/Brussels"),
("50.74497", "3.20639", "Mouscron", "BE", "Europe/Brussels"),
("51.24197", "4.82313", "Lille", "BE", "Europe/Brussels"),
("51.03427", "5.37429", "Houthalen", "BE", "Europe/Brussels"),
("50.56149", "4.69889", "Gembloux", "BE", "Europe/Brussels"),
("50.88506", "4.07601", "Denderleeuw", "BE", "Europe/Brussels"),
("51.21187", "4.25633", "Beveren", "BE", "Europe/Brussels"),
("41.57439", "24.71204", "Smolyan", "BG", "Europe/Sofia"),
("43.4125", "23.225", "Montana", "BG", "Europe/Sofia"),
("42.7", "27.25", "Aytos", "BG", "Europe/Sofia"),
("8.88649", "2.59753", "Tchaourou", "BJ", "Africa/Porto-Novo"),
("-21.44345", "-65.71875", "Tupiza", "BO", "America/La_Paz"),
("-0.71667", "-48.52333", "Soure", "BR", "America/Belem"),
("-8.05389", "-34.88111", "Recife", "BR", "America/Recife"),
("-4.42472", "-41.45861", "Pedro II", "BR", "America/Fortaleza"),
("-3.14306", "-58.44417", "Itacoatiara", "BR", "America/Manaus"),
("-4.16694", "-40.7475", "Guaraciaba do Norte", "BR", "America/Fortaleza"),
("-8.66667", "-35.71667", "Catende", "BR", "America/Recife"),
("-8.28333", "-35.03333", "Cabo", "BR", "America/Recife"),
("-4.24444", "-42.29444", "Barras", "BR", "America/Fortaleza"),
("-3.20333", "-52.20639", "Altamira", "BR", "America/Santarem"),
("-20.87306", "-48.29694", "Viradouro", "BR", "America/Sao_Paulo"),
("-22.97056", "-46.99583", "Valinhos", "BR", "America/Sao_Paulo"),
("-10.95817", "-38.79084", "Tucano", "BR", "America/Bahia"),
("-28.81833", "-52.51028", "Soledade", "BR", "America/Sao_Paulo"),
("-23.44361", "-51.87389", "Sarandi", "BR", "America/Sao_Paulo"),
("-22.45667", "-47.53028", "Santa Gertrudes", "BR", "America/Sao_Paulo"),
("-11.48472", "-37.93278", "Rio Real", "BR", "America/Bahia"),
("-19.32556", "-41.25528", "Resplendor", "BR", "America/Sao_Paulo"),
("-26.22861", "-52.67056", "Pato Branco", "BR", "America/Sao_Paulo"),
("-25.42944", "-50.00639", "Palmeira", "BR", "America/Sao_Paulo"),
("-12.91667", "-39.25", "Muritiba", "BR", "America/Bahia"),
("-21.41222", "-42.19667", "Miracema", "BR", "America/Sao_Paulo"),
("-28.44917", "-52.2", "Marau", "BR", "America/Sao_Paulo"),
("-22.92306", "-53.13722", "Loanda", "BR", "America/Sao_Paulo"),
("-10.91722", "-37.65", "Lagarto", "BR", "America/Maceio"),
("-19.72806", "-50.19556", "Iturama", "BR", "America/Sao_Paulo"),
("-21.205", "-41.88778", "Itaperuna", "BR", "America/Sao_Paulo"),
("-20.25333", "-43.80139", "Itabirito", "BR", "America/Sao_Paulo"),
("-28.24", "-48.67028", "Imbituba", "BR", "America/Sao_Paulo"),
("-22.53722", "-42.98194", "Guapimirim", "BR", "America/Sao_Paulo"),
("-19.7625", "-44.31389", "Esmeraldas", "BR", "America/Sao_Paulo"),
("-25.42778", "-49.27306", "Curitiba", "BR", "America/Sao_Paulo"),
("-14.66463", "-52.35558", "Nova Xavantina", "BR", "America/Cuiaba"),
("-29.2975", "-51.50361", "Carlos Barbosa", "BR", "America/Sao_Paulo"),
("-15.675", "-38.94722", "Canavieiras", "BR", "America/Bahia"),
("-17.74431", "-48.62789", "Caldas Novas", "BR", "America/Sao_Paulo"),
("-23.7975", "-48.59278", "Buri", "BR", "America/Sao_Paulo"),
("-10.90889", "-37.03861", "Barra dos Coqueiros", "BR", "America/Maceio"),
("-22.57306", "-47.1725", "Artur Nogueira", "BR", "America/Sao_Paulo"),
("-10.91111", "-37.07167", "Aracaju", "BR", "America/Maceio"),
("-21.42917", "-45.94722", "Alfenas", "BR", "America/Sao_Paulo"),
("-8.76194", "-63.90389", "Porto Velho", "BR", "America/Porto_Velho"),
("-21.44236", "27.46153", "Tonota", "BW", "Africa/Gaborone"),
("55.1904", "30.2049", "Vitebsk", "BY", "Europe/Minsk"),
("53.5942", "25.8191", "Novogrudok", "BY", "Europe/Minsk"),
("52.4089", "31.3237", "Dobrush", "BY", "Europe/Minsk"),
("45.43341", "-73.86586", "Beaconsfield", "CA", "America/Toronto"),
("46.23899", "-63.13414", "Charlottetown", "CA", "America/Halifax"),
("45.4473", "-73.75335", "Dorval", "CA", "America/Toronto"),
("49.88307", "-119.48568", "Kelowna", "CA", "America/Vancouver"),
("43.86682", "-79.2663", "Markham", "CA", "America/Toronto"),
("42.8334", "-80.38297", "Norfolk County", "CA", "America/Toronto"),
("45.44868", "-73.81669", "Pointe-Claire", "CA", "America/Toronto"),
("45.40008", "-73.58248", "Sainte-Catherine", "CA", "America/Toronto"),
("53.51684", "-113.3187", "Sherwood Park", "CA", "America/Edmonton"),
("50.26729", "-119.27337", "Vernon", "CA", "America/Vancouver"),
("46.1351", "-60.1831", "Sydney", "CA", "America/Glace_Bay"),
("0.76755", "24.43973", "Yangambi", "CD", "Africa/Lubumbashi"),
("-8.73508", "24.99798", "Kamina", "CD", "Africa/Lubumbashi"),
("0.49113", "29.47306", "Beni", "CD", "Africa/Lubumbashi"),
("-4.5833", "15.16554", "Kasangulu", "CD", "Africa/Kinshasa"),
("4.94273", "15.87735", "Carnot", "CF", "Africa/Bangui"),
("-4.26613", "15.28318", "Brazzaville", "CG", "Africa/Brazzaville"),
("46.18396", "6.10237", "Onex", "CH", "Europe/Zurich"),
("47.30997", "8.52462", "Adliswil", "CH", "Europe/Zurich"),
("5.84752", "-5.682", "Lakota", "CI", "Africa/Abidjan"),
("5.27247", "-3.59625", "Bonoua", "CI", "Africa/Abidjan"),
("-33.59217", "-70.6996", "San Bernardo", "CL", "America/Santiago"),
("-30.60106", "-71.19901", "Ovalle", "CL", "America/Santiago"),
("-32.45242", "-71.23106", "La Ligua", "CL", "America/Santiago"),
("-36.9256", "-73.02841", "Chiguayante", "CL", "America/Santiago"),
("4.96667", "10.7", "Tonga", "CM", "Africa/Douala"),
("3.51667", "11.5", "Mbalmayo", "CM", "Africa/Douala"),
("4.2475", "9.00472", "Idenao", "CM", "Africa/Douala"),
("46.51872", "86.00214", "Hoxtolgay", "CN", "Asia/Urumqi"),
("36.81667", "117.81667", "Zhoucun", "CN", "Asia/Shanghai"),
("34.86472", "117.55417", "Zaozhuang", "CN", "Asia/Shanghai"),
("23.73333", "114.68333", "Heyuan", "CN", "Asia/Shanghai"),
("34.65918", "109.22921", "Yanliang", "CN", "Asia/Shanghai"),
("38.40917", "112.73333", "Xinzhou", "CN", "Asia/Shanghai"),
("33.78333", "114.51667", "Wacheng", "CN", "Asia/Shanghai"),
("27.85", "112.9", "Xiangtan", "CN", "Asia/Shanghai"),
("37.19723", "122.05228", "Tianfu", "CN", "Asia/Shanghai"),
("34.85", "117.33333", "Taozhuang", "CN", "Asia/Shanghai"),
("35.64889", "117.27583", "Sishui", "CN", "Asia/Shanghai"),
("27.34089", "117.4831", "Shaowu", "CN", "Asia/Shanghai"),
("37.30553", "120.82747", "Zhuangyuan", "CN", "Asia/Shanghai"),
("35.50056", "117.63083", "Pingyi", "CN", "Asia/Shanghai"),
("27.92333", "118.53333", "Pucheng", "CN", "Asia/Shanghai"),
("24.28859", "116.11768", "Meizhou", "CN", "Asia/Shanghai"),
("37.65181", "120.33063", "Longgang", "CN", "Asia/Shanghai"),
("23.29549", "113.82465", "Licheng", "CN", "Asia/Shanghai"),
("36.19278", "117.65694", "Laiwu", "CN", "Asia/Shanghai"),
("30.35028", "112.19028", "Jingzhou", "CN", "Asia/Shanghai"),
("32.50611", "120.14278", "Jiangyan", "CN", "Asia/Shanghai"),
("30.24706", "115.04814", "Huangshi", "CN", "Asia/Shanghai"),
("37.73222", "115.70111", "Hengshui", "CN", "Asia/Shanghai"),
("28.88162", "120.03308", "Guli", "CN", "Asia/Shanghai"),
("23.02677", "113.13148", "Foshan", "CN", "Asia/Shanghai"),
("35.85", "117.7", "Dongdu", "CN", "Asia/Shanghai"),
("32.54278", "111.50861", "Danjiangkou", "CN", "Asia/Shanghai"),
("35.20889", "111.73861", "Changzhi", "CN", "Asia/Shanghai"),
("34.56861", "105.89333", "Beidao", "CN", "Asia/Shanghai"),
("29.98869", "122.20488", "Zhoushan", "CN", "Asia/Shanghai"),
("40.66482", "122.22833", "Yingkou", "CN", "Asia/Shanghai"),
("46.08333", "122.08333", "Ulanhot", "CN", "Asia/Shanghai"),
("45.35", "126.28333", "Shuangcheng", "CN", "Asia/Shanghai"),
("41.09822", "120.74792", "Nanpiao", "CN", "Asia/Shanghai"),
("41.27194", "123.17306", "Liaoyang", "CN", "Asia/Shanghai"),
("41.94175", "123.50266", "Hushitai", "CN", "Asia/Shanghai"),
("40.85158", "122.74754", "Haicheng", "CN", "Asia/Shanghai"),
("42.64031", "125.51176", "Dongfeng", "CN", "Asia/Shanghai"),
("45.75279", "130.57211", "Boli", "CN", "Asia/Shanghai"),
("31.64615", "120.74221", "Changshu City", "CN", "Asia/Shanghai"),
("7.83389", "-72.47417", "Villa del Rosario", "CO", "America/Bogota"),
("6.46838", "-73.26022", "Socorro", "CO", "America/Bogota"),
("8.79577", "-75.69947", "San Carlos", "CO", "America/Bogota"),
("10.98778", "-74.95472", "Puerto Colombia", "CO", "America/Bogota"),
("4.73245", "-74.26419", "Madrid", "CO", "America/Bogota"),
("5.20856", "-74.73584", "Honda", "CO", "America/Bogota"),
("10.15031", "-73.9614", "El Copey", "CO", "America/Bogota"),
("3.8801", "-77.03116", "Buenaventura", "CO", "America/Bogota"),
("5.6561", "-75.87877", "Andes", "CO", "America/Bogota"),
("9.92787", "-84.13722", "San Rafael", "CR", "America/Costa_Rica"),
("10.63504", "-85.43772", "Liberia", "CR", "America/Costa_Rica"),
("23.15678", "-81.24441", "Varadero", "CU", "America/Havana"),
("20.14298", "-77.43532", "Media Luna", "CU", "America/Havana"),
("23.04419", "-82.00919", "Jaruco", "CU", "America/Havana"),
("22.98212", "-80.58556", "Corralillo", "CU", "America/Havana"),
("23.0072", "-82.4017", "Boyeros", "CU", "America/Havana"),
("50.50301", "13.63617", "Most", "CZ", "Europe/Prague"),
("50.23271", "12.87117", "Karlovy Vary", "CZ", "Europe/Prague"),
("51.04962", "12.1369", "Zeitz", "DE", "Europe/Berlin"),
("52.59319", "13.32127", "Wittenau", "DE", "Europe/Berlin"),
("50.82709", "6.9747", "Wesseling", "DE", "Europe/Berlin"),
("50.9803", "11.32903", "Weimar", "DE", "Europe/Berlin"),
("52.86147", "9.5926", "Walsrode", "DE", "Europe/Berlin"),
("51.88333", "8.51667", "Verl", "DE", "Europe/Berlin"),
("48.07667", "8.64409", "Trossingen", "DE", "Europe/Berlin"),
("48.78232", "9.17702", "Stuttgart", "DE", "Europe/Berlin"),
("53.59337", "9.47629", "Stade", "DE", "Europe/Berlin"),
("50.80019", "7.20769", "Siegburg", "DE", "Europe/Berlin"),
("51.21667", "6.26667", "Schwalmtal", "DE", "Europe/Berlin"),
("54.52156", "9.5586", "Schleswig", "DE", "Europe/Berlin"),
("50.72043", "11.34046", "Rudolstadt", "DE", "Europe/Berlin"),
("48.49144", "9.20427", "Reutlingen", "DE", "Europe/Berlin"),
("51.20219", "7.36027", "Radevormwald", "DE", "Europe/Berlin"),
("48.46458", "9.22796", "Pfullingen", "DE", "Europe/Berlin"),
("51.30001", "13.10984", "Oschatz", "DE", "Europe/Berlin"),
("51.47805", "6.8625", "Oberhausen", "DE", "Europe/Berlin"),
("50.23805", "8.86704", "Nidderau", "DE", "Europe/Berlin"),
("48.73218", "11.18709", "Neuburg an der Donau", "DE", "Europe/Berlin"),
("47.98372", "10.18527", "Memmingen", "DE", "Europe/Berlin"),
("50.80904", "8.77069", "Marburg an der Lahn", "DE", "Europe/Berlin"),
("49.5099", "6.74549", "Losheim", "DE", "Europe/Berlin"),
("48.52961", "12.16179", "Landshut", "DE", "Europe/Berlin"),
("51.19139", "6.51352", "Korschenbroich", "DE", "Europe/Berlin"),
("52.2", "8.63333", "Kirchlengern", "DE", "Europe/Berlin"),
("50.23019", "8.77155", "Karben", "DE", "Europe/Berlin"),
("50.09019", "8.4493", "Hofheim am Taunus", "DE", "Europe/Berlin"),
("52.61131", "13.31783", "Hermsdorf", "DE", "Europe/Berlin"),
("48.35149", "8.96317", "Hechingen", "DE", "Europe/Berlin"),
("53.63333", "9.85", "Halstenbek", "DE", "Europe/Berlin"),
("52.21099", "7.02238", "Gronau", "DE", "Europe/Berlin"),
("52.47774", "10.5511", "Gifhorn", "DE", "Europe/Berlin"),
("48.06919", "11.37703", "Gauting", "DE", "Europe/Berlin"),
("48.35693", "10.98461", "Friedberg", "DE", "Europe/Berlin"),
("51.168", "7.973", "Finnentrop", "DE", "Europe/Berlin"),
("49.13645", "8.91229", "Eppingen", "DE", "Europe/Berlin"),
("48.28259", "9.72749", "Ehingen", "DE", "Europe/Berlin"),
("52.4581", "13.28702", "Dahlem", "DE", "Europe/Berlin"),
("51.08468", "7.11393", "Burscheid", "DE", "Europe/Berlin"),
("49.03685", "8.70745", "Bretten", "DE", "Europe/Berlin"),
("49.68369", "8.61839", "Bensheim", "DE", "Europe/Berlin"),
("53.94313", "10.30215", "Bad Segeberg", "DE", "Europe/Berlin"),
("50.64336", "7.2278", "Bad Honnef", "DE", "Europe/Berlin"),
("49.97704", "9.15214", "Aschaffenburg", "DE", "Europe/Berlin"),
("48.21644", "9.02596", "Albstadt", "DE", "Europe/Berlin"),
("52.53048", "13.29371", "Charlottenburg-Nord", "DE", "Europe/Berlin"),
("53.6052", "10.03988", "Barmbek-Nord", "DE", "Europe/Berlin"),
("11.15583", "42.7125", "'Ali Sabieh", "DJ", "Africa/Djibouti"),
("55.67938", "12.53463", "Frederiksberg", "DK", "Europe/Copenhagen"),
("18.20854", "-71.10077", "Santa Cruz de Barahona", "DO", "America/Santo_Domingo"),
("36.76639", "3.47717", "Boumerdas", "DZ", "Africa/Algiers"),
("36.72544", "3.55665", "Thenia", "DZ", "Africa/Algiers"),
("34.15429", "3.50309", "Messaad", "DZ", "Africa/Algiers"),
("35.21222", "2.31889", "Ksar Chellala", "DZ", "Africa/Algiers"),
("35.06544", "1.04945", "Frenda", "DZ", "Africa/Algiers"),
("36.06386", "4.62744", "El Achir", "DZ", "Africa/Algiers"),
("36.76775", "2.95924", "Cheraga", "DZ", "Africa/Algiers"),
("36.27462", "4.85668", "Bordj Zemoura", "DZ", "Africa/Algiers"),
("36.61954", "4.08282", "Beni Douala", "DZ", "Africa/Algiers"),
("-2.13404", "-79.59415", "Milagro", "EC", "America/Guayaquil"),
("-2.90055", "-79.00453", "Cuenca", "EC", "America/Guayaquil"),
("59.37722", "28.19028", "Narva", "EE", "Europe/Tallinn"),
("26.67319", "31.4976", "Juhaynah", "EG", "Africa/Cairo"),
("31.20176", "29.91582", "Alexandria", "EG", "Africa/Cairo"),
("39.96348", "-4.83076", "Talavera de la Reina", "ES", "Europe/Madrid"),
("37.35813", "-6.03731", "San Juan de Aznalfarache", "ES", "Europe/Madrid"),
("38.68712", "-4.10734", "Puertollano", "ES", "Europe/Madrid"),
("38.38479", "-0.76773", "Novelda", "ES", "Europe/Madrid"),
("27.76056", "-15.58602", "Maspalomas", "ES", "Atlantic/Canary"),
("38.47917", "-1.325", "Jumilla", "ES", "Europe/Madrid"),
("38.96667", "-0.18333", "Gandia", "ES", "Europe/Madrid"),
("38.10558", "-1.86343", "Caravaca", "ES", "Europe/Madrid"),
("37.49073", "-2.77259", "Baza", "ES", "Europe/Madrid"),
("42.64685", "-5.55835", "Villaquilambre", "ES", "Europe/Madrid"),
("42.06166", "-1.60452", "Tudela", "ES", "Europe/Madrid"),
("40.42386", "-3.53261", "San Fernando de Henares", "ES", "Europe/Madrid"),
("41.15612", "1.10687", "Reus", "ES", "Europe/Madrid"),
("41.91738", "3.1631", "Palafrugell", "ES", "Europe/Madrid"),
("43.32686", "-2.98884", "Leioa", "ES", "Europe/Madrid"),
("43.31667", "-2.68333", "Gernika-Lumo", "ES", "Europe/Madrid"),
("43.48961", "-8.2194", "Ferrol", "ES", "Europe/Madrid"),
("41.63976", "2.35739", "Cardedeu", "ES", "Europe/Madrid"),
("40.70995", "0.57856", "Amposta", "ES", "Europe/Madrid"),
("37.13548", "-3.67029", "Las Gabias", "ES", "Europe/Madrid"),
("42.8139", "-1.64295", "Segundo Ensanche", "ES", "Europe/Madrid"),
("41.41204", "2.18247", "el Camp de l'Arpa del Clot", "ES", "Europe/Madrid"),
("11.85", "38.01667", "Debre Tabor", "ET", "Africa/Addis_Ababa"),
("6.03333", "37.55", "Arba Minch", "ET", "Africa/Addis_Ababa"),
("65.84811", "24.14662", "Tornio", "FI", "Europe/Helsinki"),
("60.18427", "24.95034", "Kallio", "FI", "Europe/Helsinki"),
("60.2052", "24.6522", "Espoo", "FI", "Europe/Helsinki"),
("45.51667", "4.86667", "Vienne", "FR", "Europe/Paris"),
("44.92801", "4.8951", "Valence", "FR", "Europe/Paris"),
("44.80477", "-0.59543", "Talence", "FR", "Europe/Paris"),
("48.77644", "2.29026", "Sceaux", "FR", "Europe/Paris"),
("50.75", "2.25", "Saint-Omer", "FR", "Europe/Paris"),
("45.69558", "4.7934", "Saint-Genis-Laval", "FR", "Europe/Paris"),
("48.8765", "2.18967", "Rueil-Malmaison", "FR", "Europe/Paris"),
("48", "-4.1", "Quimper", "FR", "Europe/Paris"),
("43.11667", "1.6", "Pamiers", "FR", "Europe/Paris"),
("46.32313", "-0.45877", "Niort", "FR", "Europe/Paris"),
("43.61092", "3.87723", "Montpellier", "FR", "Europe/Paris"),
("48.98333", "2.61667", "Mitry-Mory", "FR", "Europe/Paris"),
("48.86667", "2.08333", "Marly-le-Roi", "FR", "Europe/Paris"),
("46.67535", "5.55575", "Lons-le-Saunier", "FR", "Europe/Paris"),
("43.32393", "5.4584", "Les Olives", "FR", "Europe/Paris"),
("48.8222", "2.12213", "Le Chesnay", "FR", "Europe/Paris"),
("48.90472", "2.2469", "La Garenne-Colombes", "FR", "Europe/Paris"),
("48.98994", "2.1699", "Herblay", "FR", "Europe/Paris"),
("48.98693", "2.44892", "Gonesse", "FR", "Europe/Paris"),
("48.79325", "2.29275", "Fontenay-aux-Roses", "FR", "Europe/Paris"),
("49.28669", "1.00288", "Elbeuf", "FR", "Europe/Paris"),
("43.71032", "-1.05366", "Dax", "FR", "Europe/Paris"),
("43.61058", "1.33467", "Colomiers", "FR", "Europe/Paris"),
("43.83125", "5.03586", "Cavaillon", "FR", "Europe/Paris"),
("45.73333", "4.91667", "Bron", "FR", "Europe/Paris"),
("48.90982", "2.45012", "Bobigny", "FR", "Europe/Paris"),
("48.77275", "5.16108", "Bar-le-Duc", "FR", "Europe/Paris"),
("43.67681", "4.63031", "Arles", "FR", "Europe/Paris"),
("41.91886", "8.73812", "Ajaccio", "FR", "Europe/Paris"),
("43.2907", "5.4384", "Marseille 11", "FR", "Europe/Paris"),
("-1.63333", "13.58357", "Franceville", "GA", "Africa/Libreville"),
("53.19146", "-2.52398", "Winsford", "GB", "Europe/London"),
("51.26", "-2.1875", "Westbury", "GB", "Europe/London"),
("51.84819", "1.26738", "Walton-on-the-Naze", "GB", "Europe/London"),
("52.41667", "0.75", "Thetford", "GB", "Europe/London"),
("51.39323", "0.47713", "Strood", "GB", "Europe/London"),
("50.79205", "-1.08593", "Southsea", "GB", "Europe/London"),
("53.78333", "-1.06667", "Selby", "GB", "Europe/London"),
("55.82885", "-4.21376", "Rutherglen", "GB", "Europe/London"),
("53.00974", "-3.05814", "Rhosllanerchrugog", "GB", "Europe/London"),
("53.83333", "-2.98333", "Poulton-le-Fylde", "GB", "Europe/London"),
("50.11861", "-5.53715", "Penzance", "GB", "Europe/London"),
("50.82882", "-0.32247", "Lancing", "GB", "Europe/London"),
("51.40148", "-1.32471", "Newbury", "GB", "Europe/London"),
("53.49389", "-1.29243", "Mexborough", "GB", "Europe/London"),
("50.75767", "-1.5443", "Lymington", "GB", "Europe/London"),
("53.69786", "-2.68758", "Leyland", "GB", "Europe/London"),
("53.7446", "-0.33525", "Kingston upon Hull", "GB", "Europe/London"),
("57.47908", "-4.22398", "Inverness", "GB", "Europe/London"),
("51.62907", "-0.74934", "High Wycombe", "GB", "Europe/London"),
("51.38673", "0.30367", "Hartley", "GB", "Europe/London"),
("52.66277", "-2.01111", "Great Wyrley", "GB", "Europe/London"),
("53.38333", "-0.76667", "Gainsborough", "GB", "Europe/London"),
("50.7236", "-3.52751", "Exeter", "GB", "Europe/London"),
("52.68333", "0.93333", "East Dereham", "GB", "Europe/London"),
("51.35084", "-1.99421", "Devizes", "GB", "Europe/London"),
("50.76306", "-1.29772", "Cowes", "GB", "Europe/London"),
("51.78967", "1.15597", "Clacton-on-Sea", "GB", "Europe/London"),
("53.46506", "-1.47217", "Chapletown", "GB", "Europe/London"),
("51.64316", "-0.36053", "Bushey", "GB", "Europe/London"),
("52.48173", "-2.12139", "Brierley Hill", "GB", "Europe/London"),
("53.81667", "-3.05", "Blackpool", "GB", "Europe/London"),
("53.0233", "-1.48119", "Belper", "GB", "Europe/London"),
("51.65", "-0.2", "Barnet", "GB", "Europe/London"),
("56.56317", "-2.58736", "Arbroath", "GB", "Europe/London"),
("57.14369", "-2.09814", "Aberdeen", "GB", "Europe/London"),
("51.39148", "-0.29825", "Surbiton", "GB", "Europe/London"),
("51.42708", "-0.91979", "Lower Earley", "GB", "Europe/London"),
("55.82737", "-4.0573", "Viewpark", "GB", "Europe/London"),
("41.82143", "41.77921", "Kobuleti", "GE", "Asia/Tbilisi"),
("5.30383", "-1.98956", "Tarkwa", "GH", "Africa/Accra"),
("7.06273", "-1.4001", "Mampong", "GH", "Africa/Accra"),
("6.46346", "-2.31938", "Bibiani", "GH", "Africa/Accra"),
("13.56667", "-15.6", "Farafenni", "GM", "Africa/Banjul"),
("9.535", "-13.68778", "Camayenne", "GN", "Africa/Conakry"),
("14.93333", "-91.11667", "Chichicastenango", "GT", "America/Guatemala"),
("22.37066", "114.10479", "Tsuen Wan", "HK", "Asia/Hong_Kong"),
("15.48131", "-86.57415", "Olanchito", "HN", "America/Tegucigalpa"),
("43.50891", "16.43915", "Split", "HR", "Europe/Zagreb"),
("18.65297", "-72.09391", "Thomazeau", "HT", "America/Port-au-Prince"),
("18.57677", "-72.22625", "Croix-des-Bouquets", "HT", "America/Port-au-Prince"),
("3.3285", "99.1625", "Tebingtinggi", "ID", "Asia/Jakarta"),
("3.7278", "98.6738", "Labuhan Deli", "ID", "Asia/Jakarta"),
("-7.51611", "109.05389", "Wangon", "ID", "Asia/Jakarta"),
("3.31332", "117.59152", "Tarakan", "ID", "Asia/Makassar"),
("-6.91806", "106.92667", "Sukabumi", "ID", "Asia/Jakarta"),
("-1.26424", "104.09701", "Simpang", "ID", "Asia/Jakarta"),
("-7.0981", "109.3243", "Randudongkal", "ID", "Asia/Jakarta"),
("0.51667", "101.44167", "Pekanbaru", "ID", "Asia/Jakarta"),
("-7.01833", "107.60389", "Pameungpeuk", "ID", "Asia/Jakarta"),
("-8.43333", "114.33333", "Muncar", "ID", "Asia/Jakarta"),
("-3.5403", "118.9707", "Majene", "ID", "Asia/Makassar"),
("-6.8048", "110.8405", "Kudus", "ID", "Asia/Jakarta"),
("-7.81667", "112.01667", "Kediri", "ID", "Asia/Jakarta"),
("-1.6", "103.61667", "Jambi City", "ID", "Asia/Jakarta"),
("-7.57897", "112.23109", "Diwek", "ID", "Asia/Jakarta"),
("-6.48167", "106.85417", "Cibinong", "ID", "Asia/Jakarta"),
("-7.73379", "113.69785", "Besuki", "ID", "Asia/Jakarta"),
("-1.26753", "116.82887", "Balikpapan", "ID", "Asia/Makassar"),
("-7.54972", "110.71639", "Ngemplak", "ID", "Asia/Jakarta"),
("53.53333", "-7.35", "An Muileann gCearr", "IE", "Europe/Dublin"),
("53.43333", "-7.95", "Athlone", "IE", "Europe/Dublin"),
("31.92923", "34.86563", "Ramla", "IL", "Asia/Jerusalem"),
("32.05971", "34.8732", "Ganei Tikva", "IL", "Asia/Jerusalem"),
("31.39547", "34.75699", "Rahat", "IL", "Asia/Jerusalem"),
("18.87813", "72.93924", "Uran", "IN", "Asia/Kolkata"),
("10.58806", "77.24779", "Udumalaippettai", "IN", "Asia/Kolkata"),
("9.82564", "78.25795", "Tiruppuvanam", "IN", "Asia/Kolkata"),
("25.49043", "85.94001", "Teghra", "IN", "Asia/Kolkata"),
("12.04161", "75.35927", "Talipparamba", "IN", "Asia/Kolkata"),
("26.11527", "86.59509", "Supaul", "IN", "Asia/Kolkata"),
("34.08565", "74.80555", "Srinagar", "IN", "Asia/Kolkata"),
("25.92493", "73.66633", "Sojat", "IN", "Asia/Kolkata"),
("14.62072", "74.83554", "Sirsi", "IN", "Asia/Kolkata"),
("25.13915", "73.06784", "Sheoganj", "IN", "Asia/Kolkata"),
("11.50526", "77.23826", "Sathyamangalam", "IN", "Asia/Kolkata"),
("21.46527", "83.97573", "Sambalpur", "IN", "Asia/Kolkata"),
("25.87498", "86.59611", "Saharsa", "IN", "Asia/Kolkata"),
("12.95629", "78.27539", "Robertsonpet", "IN", "Asia/Kolkata"),
("26.44931", "91.61356", "Rangia", "IN", "Asia/Kolkata"),
("33.37526", "74.3092", "Rajaori", "IN", "Asia/Kolkata"),
("24.81757", "84.63445", "Rafiganj", "IN", "Asia/Kolkata"),
("18.51957", "73.85535", "Pune", "IN", "Asia/Kolkata"),
("11.93381", "79.82979", "Puducherry", "IN", "Asia/Kolkata"),
("28.71271", "77.656", "Pilkhua", "IN", "Asia/Kolkata"),
("10.12268", "77.54372", "Periyakulam", "IN", "Asia/Kolkata"),
("31.28092", "74.85849", "Patti", "IN", "Asia/Kolkata"),
("20.88098", "75.11937", "Parola", "IN", "Asia/Kolkata"),
("23.07492", "88.28637", "Pandua", "IN", "Asia/Kolkata"),
("18.18158", "76.03889", "Osmanabad", "IN", "Asia/Kolkata"),
("25.6439", "77.9129", "Narwar", "IN", "Asia/Kolkata"),
("30.81383", "75.16878", "Moga", "IN", "Asia/Kolkata"),
("28.98002", "77.70636", "Meerut", "IN", "Asia/Kolkata"),
("11.12018", "76.11996", "Manjeri", "IN", "Asia/Kolkata"),
("30.21121", "74.4818", "Malaut", "IN", "Asia/Kolkata"),
("25.92127", "86.79271", "Madhipura", "IN", "Asia/Kolkata"),
("24.05979", "77.40858", "Leteri", "IN", "Asia/Kolkata"),
("21.34222", "71.30633", "Kundla", "IN", "Asia/Kolkata"),
("22.75218", "72.68533", "Kheda", "IN", "Asia/Kolkata"),
("23.1959", "86.51499", "Kenda", "IN", "Asia/Kolkata"),
("29.21399", "78.95693", "Kashipur", "IN", "Asia/Kolkata"),
("11.00599", "77.5609", "Kangayam", "IN", "Asia/Kolkata"),
("22.88783", "84.13864", "Jashpurnagar", "IN", "Asia/Kolkata"),
("26.2649", "81.54855", "Jais", "IN", "Asia/Kolkata"),
("16.06213", "76.0586", "Hungund", "IN", "Asia/Kolkata"),
("29.22254", "79.5286", "Haldwani", "IN", "Asia/Kolkata"),
("26.76628", "83.36889", "Gorakhpur", "IN", "Asia/Kolkata"),
("12.25282", "79.41727", "Gingee", "IN", "Asia/Kolkata"),
("21.53889", "71.57737", "Gariadhar", "IN", "Asia/Kolkata"),
("15.73628", "75.96976", "Gajendragarh", "IN", "Asia/Kolkata"),
("17.54907", "82.85749", "Elamanchili", "IN", "Asia/Kolkata"),
("19.21667", "73.08333", "Dombivli", "IN", "Asia/Kolkata"),
("22.19303", "88.18466", "Diamond Harbour", "IN", "Asia/Kolkata"),
("12.1277", "78.15794", "Dharmapuri", "IN", "Asia/Kolkata"),
("25.75728", "75.37991", "Deoli", "IN", "Asia/Kolkata"),
("14.46693", "75.92694", "Davangere", "IN", "Asia/Kolkata"),
("25.66795", "85.83636", "Dalsingh Sarai", "IN", "Asia/Kolkata"),
("15.5439", "73.7553", "Calangute", "IN", "Asia/Kolkata"),
("27.9247", "78.40102", "Chharra", "IN", "Asia/Kolkata"),
("32.55531", "76.12647", "Chamba", "IN", "Asia/Kolkata"),
("20.88197", "85.83334", "Bhuban", "IN", "Asia/Kolkata"),
("19.30157", "72.85107", "Bhayandar", "IN", "Asia/Kolkata"),
("15.45144", "78.14797", "Betamcherla", "IN", "Asia/Kolkata"),
("26.32293", "91.00632", "Barpeta", "IN", "Asia/Kolkata"),
("28.92694", "78.23456", "Bachhraon", "IN", "Asia/Kolkata"),
("21.59983", "71.21169", "Amreli", "IN", "Asia/Kolkata"),
("10.10649", "76.35484", "Alwaye", "IN", "Asia/Kolkata"),
("24.41288", "76.56719", "Aklera", "IN", "Asia/Kolkata"),
("23.49668", "86.68363", "Adra", "IN", "Asia/Kolkata"),
("22.4711", "88.1453", "Pujali", "IN", "Asia/Kolkata"),
("22.10194", "85.37752", "Barbil", "IN", "Asia/Kolkata"),
("17.34769", "78.55757", "Lal Bahadur Nagar", "IN", "Asia/Kolkata"),
("23.18", "88.58", "Aistala", "IN", "Asia/Kolkata"),
("9.57046", "76.32756", "Kalavoor", "IN", "Asia/Kolkata"),
("32.61603", "44.02488", "Karbala", "IQ", "Asia/Baghdad"),
("35.6803", "51.0193", "Shahre Jadide Andisheh", "IR", "Asia/Tehran"),
("36.64852", "51.49621", "Nowshahr", "IR", "Asia/Tehran"),
("33.14447", "47.3799", "Darreh Shahr", "IR", "Asia/Tehran"),
("33.86419", "48.26258", "Aleshtar", "IR", "Asia/Tehran"),
("32.65246", "51.67462", "Isfahan", "IR", "Asia/Tehran"),
("38.07789", "13.44275", "Villabate", "IT", "Europe/Rome"),
("36.92574", "14.72443", "Ragusa", "IT", "Europe/Rome"),
("37.51803", "15.00913", "Misterbianco", "IT", "Europe/Rome"),
("37.49223", "15.07041", "Catania", "IT", "Europe/Rome"),
("37.31065", "13.57661", "Agrigento", "IT", "Europe/Rome"),
("43.78956", "7.60872", "Ventimiglia", "IT", "Europe/Rome"),
("44.89784", "8.86374", "Tortona", "IT", "Europe/Rome"),
("40.87329", "14.43865", "Somma Vesuviana", "IT", "Europe/Rome"),
("40.72586", "8.55552", "Sassari", "IT", "Europe/Rome"),
("45.39402", "9.29109", "San Giuliano Milanese", "IT", "Europe/Rome"),
("42.67164", "14.01481", "Roseto degli Abruzzi", "IT", "Europe/Rome"),
("45.78071", "12.84052", "Portogruaro", "IT", "Europe/Rome"),
("43.1122", "12.38878", "Perugia", "IT", "Europe/Rome"),
("45.44694", "8.62118", "Novara", "IT", "Europe/Rome"),
("45.50369", "11.412", "Montecchio Maggiore-Alte Ceccato", "IT", "Europe/Rome"),
("40.55851", "17.80774", "Mesagne", "IT", "Europe/Rome"),
("45.79377", "8.88104", "Malnate", "IT", "Europe/Rome"),
("42.22718", "14.39024", "Lanciano", "IT", "Europe/Rome"),
("45.53069", "9.40531", "Gorgonzola", "IT", "Europe/Rome"),
("40.53123", "17.58522", "Francavilla Fontana", "IT", "Europe/Rome"),
("43.62558", "13.39954", "Falconara Marittima", "IT", "Europe/Rome"),
("45.9836", "12.70038", "Cordenons", "IT", "Europe/Rome"),
("44.31771", "9.32241", "Chiavari", "IT", "Europe/Rome"),
("44.59445", "11.04979", "Castelfranco Emilia", "IT", "Europe/Rome"),
("41.55947", "14.66737", "Campobasso", "IT", "Europe/Rome"),
("41.24264", "16.50104", "Bisceglie", "IT", "Europe/Rome"),
("41.72063", "12.6723", "Ariccia", "IT", "Europe/Rome"),
("40.92298", "14.30935", "Afragola", "IT", "Europe/Rome"),
("40.87363", "14.34085", "Volla", "IT", "Europe/Rome"),
("18.00747", "-76.78319", "New Kingston", "JM", "America/Jamaica"),
("35.8", "137.23333", "Gero", "JP", "Asia/Tokyo"),
("34.61667", "135.6", "Yao", "JP", "Asia/Tokyo"),
("34.75856", "136.13108", "Ueno-ebisumachi", "JP", "Asia/Tokyo"),
("34.81667", "137.4", "Toyokawa", "JP", "Asia/Tokyo"),
("34.4833", "136.84186", "Toba", "JP", "Asia/Tokyo"),
("36.65", "138.31667", "Suzaka", "JP", "Asia/Tokyo"),
("34.9", "137.5", "Shinshiro", "JP", "Asia/Tokyo"),
("35.06667", "135.21667", "Sasayama", "JP", "Asia/Tokyo"),
("36", "139.55722", "Okegawa", "JP", "Asia/Tokyo"),
("36.53333", "136.61667", "Nonoichi", "JP", "Asia/Tokyo"),
("36.75965", "137.36215", "Namerikawa", "JP", "Asia/Tokyo"),
("35", "136.51667", "Komono", "JP", "Asia/Tokyo"),
("33.4425", "129.96972", "Karatsu", "JP", "Asia/Tokyo"),
("35.30889", "139.55028", "Kamakura", "JP", "Asia/Tokyo"),
("34.25", "135.31667", "Iwade", "JP", "Asia/Tokyo"),
("35.82756", "137.95378", "Ina", "JP", "Asia/Tokyo"),
("33.3213", "130.94098", "Hita", "JP", "Asia/Tokyo"),
("36.24624", "139.07204", "Fujioka", "JP", "Asia/Tokyo"),
("36.33011", "138.89585", "Annaka", "JP", "Asia/Tokyo"),
("35.815", "139.6853", "Shimotoda", "JP", "Asia/Tokyo"),
("39.46667", "141.95", "Yamada", "JP", "Asia/Tokyo"),
("37.56667", "140.11667", "Inawashiro", "JP", "Asia/Tokyo"),
("43.82634", "144.09638", "Motomachi", "JP", "Asia/Tokyo"),
("44.35056", "142.45778", "Nayoro", "JP", "Asia/Tokyo"),
("41.77583", "140.73667", "Hakodate", "JP", "Asia/Tokyo"),
("35.48199", "137.02166", "Minokamo", "JP", "Asia/Tokyo"),
("0.03813", "36.36339", "Nyahururu", "KE", "Africa/Nairobi"),
("3.11988", "35.59642", "Lodwar", "KE", "Africa/Nairobi"),
("0.46005", "34.11169", "Busia", "KE", "Africa/Nairobi"),
("40.93333", "73", "Jalal-Abad", "KG", "Asia/Bishkek"),
("13.65805", "102.56365", "Paoy Paet", "KH", "Asia/Phnom_Penh"),
("36.82167", "128.63083", "Eisen", "KR", "Asia/Seoul"),
("37.1759", "128.9889", "T’aebaek", "KR", "Asia/Seoul"),
("36.20389", "127.08472", "Nonsan", "KR", "Asia/Seoul"),
("37.65639", "126.835", "Goyang-si", "KR", "Asia/Seoul"),
("36.6009", "126.665", "Hongseong", "KR", "Asia/Seoul"),
("34.8825", "128.62667", "Sinhyeon", "KR", "Asia/Seoul"),
("47.83333", "59.6", "Shalqar", "KZ", "Asia/Aqtobe"),
("47.46657", "84.87144", "Zaysan", "KZ", "Asia/Almaty"),
("44.85278", "65.50917", "Kyzylorda", "KZ", "Asia/Qyzylorda"),
("43.41949", "77.0202", "Otegen Batyra", "KZ", "Asia/Almaty"),
("6.84019", "79.87116", "Dehiwala-Mount Lavinia", "LK", "Asia/Colombo"),
("6.9909", "79.883", "Hendala", "LK", "Asia/Colombo"),
("7.57944", "-8.53778", "New Yekepa", "LR", "Africa/Monrovia"),
("55.25", "24.75", "Ukmerge", "LT", "Europe/Vilnius"),
("54.39635", "24.04142", "Alytus", "LT", "Europe/Vilnius"),
("30.75545", "20.22625", "Ajdabiya", "LY", "Africa/Tripoli"),
("24.96334", "10.18003", "Ghat", "LY", "Africa/Tripoli"),
("33.92866", "-6.90656", "Temara", "MA", "Africa/Casablanca"),
("33.42585", "-6.00137", "Oulmes", "MA", "Africa/Casablanca"),
("34.31", "-2.16", "Jerada", "MA", "Africa/Casablanca"),
("33.43443", "-5.22126", "Azrou", "MA", "Africa/Casablanca"),
("48.15659", "28.28489", "Soroca", "MD", "Europe/Chisinau"),
("42.28639", "18.84", "Budva", "ME", "Europe/Podgorica"),
("-22.9", "44.53333", "Sakaraha", "MG", "Indian/Antananarivo"),
("-21.15", "46.58333", "Ikalamavony", "MG", "Indian/Antananarivo"),
("-19.65", "47.31667", "Antanifotsy", "MG", "Indian/Antananarivo"),
("-17.83333", "48.41667", "Ambatondrazaka", "MG", "Indian/Antananarivo"),
("42", "21.32778", "Saraj", "MK", "Europe/Skopje"),
("41.92361", "20.91361", "Bogovinje", "MK", "Europe/Skopje"),
("12.74409", "-8.07257", "Kati", "ML", "Africa/Bamako"),
("14.0823", "98.19151", "Dawei", "MM", "Asia/Yangon"),
("16.68911", "98.50893", "Myawadi", "MM", "Asia/Yangon"),
("17.30858", "97.01124", "Kyaikto", "MM", "Asia/Yangon"),
("47.90771", "106.88324", "Ulan Bator", "MN", "Asia/Ulaanbaatar"),
("14.67751", "-60.94228", "Le Robert", "MQ", "America/Martinique"),
("35.89972", "14.51472", "Valletta", "MT", "Europe/Malta"),
("-13.7804", "34.4587", "Salima", "MW", "Africa/Blantyre"),
("16.75973", "-93.11308", "Tuxtla", "MX", "America/Mexico_City"),
("19.8173", "-97.35992", "Teziutlan", "MX", "America/Mexico_City"),
("21.28306", "-89.66123", "Progreso", "MX", "America/Merida"),
("17.06542", "-96.72365", "Oaxaca", "MX", "America/Mexico_City"),
("25.87972", "-97.50417", "Heroica Matamoros", "MX", "America/Matamoros"),
("19.32932", "-98.1664", "Contla", "MX", "America/Mexico_City"),
("17.94979", "-94.91386", "Acayucan", "MX", "America/Mexico_City"),
("19.32889", "-99.32556", "San Lorenzo Acopilco", "MX", "America/Mexico_City"),
("20.22816", "-103.5687", "Zacoalco de Torres", "MX", "America/Mexico_City"),
("20.74122", "-100.44843", "Santa Rosa Jauregui", "MX", "America/Mexico_City"),
("20.21322", "-100.88023", "Salvatierra", "MX", "America/Mexico_City"),
("19.64745", "-102.04897", "Paracho de Verduzco", "MX", "America/Mexico_City"),
("20.28527", "-103.42897", "Jocotepec", "MX", "America/Mexico_City"),
("21.01858", "-101.2591", "Guanajuato", "MX", "America/Mexico_City"),
("22.49396", "-105.36369", "Acaponeta", "MX", "America/Mazatlan"),
("19.04222", "-98.11889", "Casa Blanca", "MX", "America/Mexico_City"),
("1.6561", "103.6032", "Kulai", "MY", "Asia/Kuala_Lumpur"),
("5.90702", "116.10146", "Donggongon", "MY", "Asia/Kuching"),
("4.88441", "101.96857", "Gua Musang", "MY", "Asia/Kuala_Lumpur"),
("5.4709", "100.24529", "Batu Feringgi", "MY", "Asia/Kuala_Lumpur"),
("4.02219", "101.02083", "Teluk Intan", "MY", "Asia/Kuala_Lumpur"),
("1.6", "103.81667", "Ulu Tiram", "MY", "Asia/Kuala_Lumpur"),
("2.2139", "102.3278", "Kampung Ayer Molek", "MY", "Asia/Kuala_Lumpur"),
("-23.85972", "35.34722", "Maxixe", "MZ", "Africa/Maputo"),
("-21.98333", "16.91667", "Okahandja", "NA", "Africa/Windhoek"),
("13.70727", "9.15013", "Mirriah", "NE", "Africa/Niamey"),
("4.92675", "6.26764", "Yenagoa", "NG", "Africa/Lagos"),
("6.8485", "3.64633", "Shagamu", "NG", "Africa/Lagos"),
("7.6", "4.18333", "Olupona", "NG", "Africa/Lagos"),
("6.15038", "6.83042", "Nkpor", "NG", "Africa/Lagos"),
("6.45407", "3.39467", "Lagos", "NG", "Africa/Lagos"),
("9.58126", "8.2926", "Kafanchan", "NG", "Africa/Lagos"),
("7.62789", "4.74161", "Ilesa", "NG", "Africa/Lagos"),
("7.50251", "5.06258", "Igbara-Odo", "NG", "Africa/Lagos"),
("11.86064", "9.0027", "Gaya", "NG", "Africa/Lagos"),
("7.65649", "4.92235", "Efon-Alaaye", "NG", "Africa/Lagos"),
("10.61285", "12.19458", "Biu", "NG", "Africa/Lagos"),
("12.74482", "4.52514", "Argungu", "NG", "Africa/Lagos"),
("13.48082", "-86.58208", "Somoto", "NI", "America/Managua"),
("11.84962", "-86.19903", "Jinotepe", "NI", "America/Managua"),
("52.09", "5.23333", "Zeist", "NL", "Europe/Amsterdam"),
("51.65333", "5.2875", "Vught", "NL", "Europe/Amsterdam"),
("51.44889", "5.51978", "Tongelre", "NL", "Europe/Amsterdam"),
("51.95838", "4.47124", "Schiebroek", "NL", "Europe/Amsterdam"),
("52.31333", "6.92917", "Oldenzaal", "NL", "Europe/Amsterdam"),
("52.26083", "7.00417", "Losser", "NL", "Europe/Amsterdam"),
("53.16167", "6.76111", "Hoogezand", "NL", "Europe/Amsterdam"),
("52.57583", "6.61944", "Hardenberg", "NL", "Europe/Amsterdam"),
("52.71083", "5.74861", "Emmeloord", "NL", "Europe/Amsterdam"),
("51.955", "5.22778", "Culemborg", "NL", "Europe/Amsterdam"),
("52.14", "5.58472", "Barneveld", "NL", "Europe/Amsterdam"),
("68.79833", "16.54165", "Harstad", "NO", "Europe/Oslo"),
("-44.39672", "171.25364", "Timaru", "NZ", "Pacific/Auckland"),
("-38.65333", "178.00417", "Gisborne", "NZ", "Pacific/Auckland"),
("8.88988", "-79.62603", "Veracruz", "PA", "America/Panama"),
("9.15093", "-79.62098", "Chilibre", "PA", "America/Panama"),
("-3.74912", "-73.25383", "Iquitos", "PE", "America/Lima"),
("-16.25", "-69.08333", "Yunguyo", "PE", "America/Lima"),
("-15.21194", "-75.11028", "Minas de Marcona", "PE", "America/Lima"),
("-11.94306", "-76.70944", "Chosica", "PE", "America/Lima"),
("-5.85746", "144.23058", "Mount Hagen", "PG", "Pacific/Port_Moresby"),
("6.33444", "124.95278", "Tupi", "PH", "Asia/Manila"),
("10.7375", "122.9666", "Talisay", "PH", "Asia/Manila"),
("12.97389", "123.99333", "Sorsogon", "PH", "Asia/Manila"),
("9.3337", "122.8637", "Santa Catalina", "PH", "Asia/Manila"),
("12.35275", "121.06761", "San Jose", "PH", "Asia/Manila"),
("6.95194", "121.96361", "Recodo", "PH", "Asia/Manila"),
("14.66", "120.56528", "Pilar", "PH", "Asia/Manila"),
("10.20898", "123.758", "Naga", "PH", "Asia/Manila"),
("12.37169", "123.62494", "Masbate", "PH", "Asia/Manila"),
("16.0438", "120.4861", "Manaoag", "PH", "Asia/Manila"),
("10.13361", "124.84472", "Maasin", "PH", "Asia/Manila"),
("16.455", "120.5875", "La Trinidad", "PH", "Asia/Manila"),
("9.6531", "124.3697", "Jagna", "PH", "Asia/Manila"),
("14.8361", "120.97844", "Guyong", "PH", "Asia/Manila"),
("8.56697", "123.33471", "Dipolog", "PH", "Asia/Manila"),
("10.31672", "123.89071", "Cebu City", "PH", "Asia/Manila"),
("14.14989", "121.3152", "Calauan", "PH", "Asia/Manila"),
("15.72892", "120.57224", "Burgos", "PH", "Asia/Manila"),
("14.95472", "120.89694", "Baliuag", "PH", "Asia/Manila"),
("14.62578", "121.12251", "Antipolo", "PH", "Asia/Manila"),
("27.52948", "68.75915", "Khairpur Mir’s", "PK", "Asia/Karachi"),
("26.9423", "68.11759", "Tharu Shah", "PK", "Asia/Karachi"),
("31.82539", "72.54064", "Sillanwali", "PK", "Asia/Karachi"),
("31.71667", "73.38333", "Sangla Hill", "PK", "Asia/Karachi"),
("30.29184", "71.67164", "Qadirpur Ran", "PK", "Asia/Karachi"),
("31.96258", "73.97117", "Naushahra Virkan", "PK", "Asia/Karachi"),
("32.57756", "71.52847", "Mianwali", "PK", "Asia/Karachi"),
("27.55898", "68.21204", "Larkana", "PK", "Asia/Karachi"),
("30.46907", "70.96699", "Kot Addu", "PK", "Asia/Karachi"),
("30.76468", "74.12286", "Kanganpur", "PK", "Asia/Karachi"),
("25.95533", "68.88871", "Jhol", "PK", "Asia/Karachi"),
("29.69221", "72.54566", "Hasilpur", "PK", "Asia/Karachi"),
("32.17629", "75.06583", "Fazilpur", "PK", "Asia/Karachi"),
("32.87533", "71.57118", "Daud Khel", "PK", "Asia/Karachi"),
("25.80565", "68.49143", "Bhit Shah", "PK", "Asia/Karachi"),
("29.38242", "70.91106", "Alipur", "PK", "Asia/Karachi"),
("51.14942", "15.00835", "Zgorzelec", "PL", "Europe/Warsaw"),
("54.58048", "16.86194", "Ustka", "PL", "Europe/Warsaw"),
("50.5107", "18.30056", "Strzelce Opolskie", "PL", "Europe/Warsaw"),
("54.60528", "18.34717", "Reda", "PL", "Europe/Warsaw"),
("50.20528", "19.27498", "Jaworzno", "PL", "Europe/Warsaw"),
("50.86079", "17.4674", "Brzeg", "PL", "Europe/Warsaw"),
("18.42745", "-67.15407", "Aguadilla", "PR", "America/Puerto_Rico"),
("18.03496", "-66.8499", "Yauco", "PR", "America/Puerto_Rico"),
("31.78336", "35.23388", "East Jerusalem", "PS", "Asia/Hebron"),
("38.72706", "-9.24671", "Carnaxide", "PT", "Europe/Lisbon"),
("37.08819", "-8.2503", "Albufeira", "PT", "Europe/Lisbon"),
("41.20485", "-8.33147", "Paredes", "PT", "Europe/Lisbon"),
("41.1053", "-7.32097", "Custoias", "PT", "Europe/Lisbon"),
("37.74615", "-25.66689", "Ponta Delgada", "PT", "Atlantic/Azores"),
("-20.88231", "55.4504", "Saint-Denis", "RE", "Indian/Reunion"),
("44.43579", "26.01649", "Sector 6", "RO", "Europe/Bucharest"),
("44.22639", "22.53083", "Negotin", "RS", "Europe/Belgrade"),
("44.97639", "19.61222", "Sremska Mitrovica", "RS", "Europe/Belgrade"),
("53.53395", "33.72798", "Zhukovka", "RU", "Europe/Moscow"),
("46.7055", "38.2739", "Yeysk", "RU", "Europe/Moscow"),
("44.98901", "38.94324", "Yablonovskiy", "RU", "Europe/Moscow"),
("56.03361", "35.96944", "Volokolamsk", "RU", "Europe/Moscow"),
("57.97472", "33.2525", "Valday", "RU", "Europe/Moscow"),
("56.85836", "35.90057", "Tver", "RU", "Europe/Moscow"),
("55.62047", "37.49338", "Tyoply Stan", "RU", "Europe/Moscow"),
("54.90083", "38.07083", "Stupino", "RU", "Europe/Moscow"),
("55.63711", "37.38115", "Solntsevo", "RU", "Europe/Moscow"),
("59.80917", "30.38167", "Shushary", "RU", "Europe/Moscow"),
("64.5635", "39.8302", "Severodvinsk", "RU", "Europe/Moscow"),
("51.78771", "56.36091", "Saraktash", "RU", "Asia/Yekaterinburg"),
("53.95278", "32.86389", "Roslavl’", "RU", "Europe/Moscow"),
("51.40944", "46.04833", "Privolzhskiy", "RU", "Europe/Saratov"),
("61.78491", "34.34691", "Petrozavodsk", "RU", "Europe/Moscow"),
("53.37596", "51.3452", "Otradnyy", "RU", "Europe/Samara"),
("54.48147", "53.47103", "Oktyabr’skiy", "RU", "Asia/Yekaterinburg"),
("43.96222", "43.63417", "Novopavlovsk", "RU", "Europe/Moscow"),
("53.53041", "43.67663", "Nizhniy Lomov", "RU", "Europe/Moscow"),
("55.38752", "36.73307", "Naro-Fominsk", "RU", "Europe/Moscow"),
("50.06", "43.2379", "Mikhaylovka", "RU", "Europe/Volgograd"),
("55.64776", "38.02486", "Malakhovka", "RU", "Europe/Moscow"),
("55.85", "37.56667", "Likhobory", "RU", "Europe/Moscow"),
("51.4781", "57.3552", "Kuvandyk", "RU", "Asia/Yekaterinburg"),
("44.92934", "37.99117", "Krymsk", "RU", "Europe/Moscow"),
("54.03876", "43.91385", "Kovylkino", "RU", "Europe/Moscow"),
("60.02427", "30.28491", "Kolomyagi", "RU", "Europe/Moscow"),
("53.93361", "37.92792", "Kireyevsk", "RU", "Europe/Moscow"),
("54.84444", "38.16694", "Kashira", "RU", "Europe/Moscow"),
("58.7002", "59.4839", "Kachkanar", "RU", "Asia/Yekaterinburg"),
("43.35071", "46.10925", "Gudermes", "RU", "Europe/Moscow"),
("57.30185", "39.85331", "Gavrilov-Yam", "RU", "Europe/Moscow"),
("53.59782", "34.33825", "Dyat’kovo", "RU", "Europe/Moscow"),
("58.1908", "40.17171", "Danilov", "RU", "Europe/Moscow"),
("42.819", "47.1192", "Buynaksk", "RU", "Europe/Moscow"),
("53.77166", "38.12408", "Bogoroditsk", "RU", "Europe/Moscow"),
("54.39304", "53.26023", "Bavly", "RU", "Europe/Moscow"),
("55.39485", "43.83992", "Arzamas", "RU", "Europe/Moscow"),
("54.8421", "46.5813", "Alatyr’", "RU", "Europe/Moscow"),
("58.63667", "59.80222", "Lesnoy", "RU", "Asia/Yekaterinburg"),
("55.8736", "85.4265", "Yashkino", "RU", "Asia/Novokuznetsk"),
("58.04254", "65.27258", "Tavda", "RU", "Asia/Yekaterinburg"),
("55.54028", "89.20083", "Sharypovo", "RU", "Asia/Krasnoyarsk"),
("53.30972", "83.62389", "Novosilikatnyy", "RU", "Asia/Barnaul"),
("58.23583", "92.48278", "Lesosibirsk", "RU", "Asia/Krasnoyarsk"),
("56.11281", "69.49015", "Ishim", "RU", "Asia/Yekaterinburg"),
("56.9083", "60.8019", "Beryozovsky", "RU", "Asia/Yekaterinburg"),
("55.75556", "60.70278", "Ozersk", "RU", "Asia/Yekaterinburg"),
("51.82721", "107.60627", "Ulan-Ude", "RU", "Asia/Irkutsk"),
("45.47885", "133.42825", "Lesozavodsk", "RU", "Asia/Vladivostok"),
("65.93381", "111.4834", "Aykhal", "RU", "Asia/Yakutsk"),
("53.14657", "140.72287", "Nikolayevsk-on-Amure", "RU", "Asia/Vladivostok"),
("60.97944", "76.92421", "Izluchinsk", "RU", "Asia/Yekaterinburg"),
("-1.9487", "30.4347", "Rwamagana", "RW", "Africa/Kigali"),
("27.0174", "49.62251", "Al Jubayl", "SA", "Asia/Riyadh"),
("11.8659", "34.3869", "Ar Ruseris", "SD", "Africa/Khartoum"),
("61.72744", "17.10558", "Hudiksvall", "SE", "Europe/Stockholm"),
("59.33333", "18.28333", "Boo", "SE", "Europe/Stockholm"),
("48.8449", "17.22635", "Skalica", "SK", "Europe/Bratislava"),
("48.43174", "17.8031", "Hlohovec", "SK", "Europe/Bratislava"),
("8.48714", "-13.2356", "Freetown", "SL", "Africa/Freetown"),
("-0.35817", "42.54536", "Kismayo", "SO", "Africa/Mogadishu"),
("9.89206", "43.38531", "Baki", "SO", "Africa/Mogadishu"),
("13.73417", "-89.71472", "Sonzacate", "SV", "America/El_Salvador"),
("13.70167", "-89.10944", "Ilopango", "SV", "America/El_Salvador"),
("34.5624", "38.28402", "Tadmur", "SY", "Asia/Damascus"),
("35.95664", "36.7138", "Binnish", "SY", "Asia/Damascus"),
("12.18441", "18.69303", "Mongo", "TD", "Africa/Ndjamena"),
("15.46063", "99.89166", "Thap Than", "TH", "Asia/Bangkok"),
("8.43333", "99.96667", "Nakhon Si Thammarat", "TH", "Asia/Bangkok"),
("13.51825", "99.95469", "Damnoen Saduak", "TH", "Asia/Bangkok"),
("15.79408", "104.1451", "Yasothon", "TH", "Asia/Bangkok"),
("6.25947", "102.05461", "Tak Bai", "TH", "Asia/Bangkok"),
("16.0567", "103.65309", "Roi Et", "TH", "Asia/Bangkok"),
("13.44581", "101.18445", "Phanat Nikhom", "TH", "Asia/Bangkok"),
("13.8196", "100.04427", "Nakhon Pathom", "TH", "Asia/Bangkok"),
("14.64056", "104.64992", "Kantharalak", "TH", "Asia/Bangkok"),
("15.58552", "102.42587", "Bua Yai", "TH", "Asia/Bangkok"),
("14.37395", "100.48528", "Bang Ban", "TH", "Asia/Bangkok"),
("38.55632", "69.01354", "Vahdat", "TJ", "Asia/Dushanbe"),
("-8.99167", "125.21972", "Maliana", "TL", "Asia/Dili"),
("36.08497", "9.37082", "Siliana", "TN", "Africa/Tunis"),
("35.72917", "10.58082", "Msaken", "TN", "Africa/Tunis"),
("36.46917", "10.78222", "Beni Khiar", "TN", "Africa/Tunis"),
("37.16911", "10.03478", "El Alia", "TN", "Africa/Tunis"),
("38.13708", "41.00817", "Silvan", "TR", "Europe/Istanbul"),
("39.22493", "42.85693", "Patnos", "TR", "Europe/Istanbul"),
("37.31309", "40.74357", "Mardin", "TR", "Europe/Istanbul"),
("37.58105", "29.26639", "Serinhisar", "TR", "Europe/Istanbul"),
("37.05944", "37.3825", "Gaziantep", "TR", "Europe/Istanbul"),
("39.59611", "27.02444", "Edremit", "TR", "Europe/Istanbul"),
("39.12074", "27.18052", "Bergama", "TR", "Europe/Istanbul"),
("38.37255", "34.02537", "Aksaray", "TR", "Europe/Istanbul"),
("40.98894", "28.67582", "Yakuplu", "TR", "Europe/Istanbul"),
("40.1675", "34.37389", "Sungurlu", "TR", "Europe/Istanbul"),
("40.37528", "28.88222", "Mudanya", "TR", "Europe/Istanbul"),
("10.66668", "-61.51889", "Port of Spain", "TT", "America/Port_of_Spain"),
("23.5654", "119.58627", "Magong", "TW", "Asia/Taipei"),
("-2.68333", "33", "Usagara", "TZ", "Africa/Dar_es_Salaam"),
("-4.06667", "37.73333", "Same", "TZ", "Africa/Dar_es_Salaam"),
("-6.25", "38.66667", "Mvomero", "TZ", "Africa/Dar_es_Salaam"),
("-4.83", "29.65806", "Mwandiga", "TZ", "Africa/Dar_es_Salaam"),
("-6.8", "39.25", "Magomeni", "TZ", "Africa/Dar_es_Salaam"),
("-7.60361", "37.00438", "Kidodi", "TZ", "Africa/Dar_es_Salaam"),
("-7.76667", "35.7", "Iringa", "TZ", "Africa/Dar_es_Salaam"),
("-5.41667", "38.01667", "Chanika", "TZ", "Africa/Dar_es_Salaam"),
("-10.33333", "39.28333", "Nyangao", "TZ", "Africa/Dar_es_Salaam"),
("49.07866", "30.96755", "Zvenihorodka", "UA", "Europe/Kiev"),
("47.56494", "31.33078", "Voznesensk", "UA", "Europe/Kiev"),
("49.41029", "38.15035", "Svatove", "UA", "Europe/Zaporozhye"),
("50.18545", "27.06365", "Shepetivka", "UA", "Europe/Kiev"),
("47.48444", "36.25361", "Polohy", "UA", "Europe/Zaporozhye"),
("46.75451", "33.34864", "Nova Kakhovka", "UA", "Europe/Kiev"),
("50.75932", "25.34244", "Lutsk", "UA", "Europe/Kiev"),
("49.65186", "26.97253", "Krasyliv", "UA", "Europe/Kiev"),
("46.65581", "32.6178", "Kherson", "UA", "Europe/Kiev"),
("51.67822", "33.9162", "Hlukhiv", "UA", "Europe/Kiev"),
("45.99194", "29.41824", "Artsyz", "UA", "Europe/Kiev"),
("2.41669", "30.98551", "Paidha", "UG", "Africa/Kampala"),
("3.27833", "32.88667", "Kitgum", "UG", "Africa/Kampala"),
("3.02013", "30.91105", "Arua", "UG", "Africa/Kampala"),
("33.45122", "-86.99666", "Hueytown", "US", "America/Chicago"),
("33.44872", "-86.78777", "Vestavia Hills", "US", "America/Chicago"),
("35.25064", "-91.73625", "Searcy", "US", "America/Chicago"),
("26.68451", "-80.66756", "Belle Glade", "US", "America/New_York"),
("28.54944", "-81.77285", "Clermont", "US", "America/New_York"),
("28.90054", "-81.26367", "Deltona", "US", "America/New_York"),
("29.65163", "-82.32483", "Gainesville", "US", "America/New_York"),
("25.67927", "-80.31727", "Kendall", "US", "America/New_York"),
("28.15112", "-82.46148", "Lutz", "US", "America/New_York"),
("26.2173", "-80.22588", "North Lauderdale", "US", "America/New_York"),
("30.17746", "-81.38758", "Palm Valley", "US", "America/New_York"),
("26.91756", "-82.07842", "Punta Gorda Isles", "US", "America/New_York"),
("27.71809", "-82.35176", "Sun City Center", "US", "America/New_York"),
("27.09978", "-82.45426", "Venice", "US", "America/New_York"),
("34.06635", "-84.67837", "Acworth", "US", "America/New_York"),
("32.54044", "-82.90375", "Dublin", "US", "America/New_York"),
("33.08014", "-83.2321", "Milledgeville", "US", "America/New_York"),
("33.54428", "-84.23381", "Stockbridge", "US", "America/New_York"),
("38.58894", "-89.99038", "Fairview Heights", "US", "America/Chicago"),
("39.78504", "-85.76942", "Greenfield", "US", "America/Indiana/Indianapolis"),
("38.06084", "-97.92977", "Hutchinson", "US", "America/Chicago"),
("39.08367", "-84.50855", "Covington", "US", "America/New_York"),
("36.61033", "-88.31476", "Murray", "US", "America/Chicago"),
("29.84576", "-90.10674", "Estelle", "US", "America/Chicago"),
("32.52515", "-93.75018", "Shreveport", "US", "America/Chicago"),
("38.96372", "-76.99081", "Chillum", "US", "America/New_York"),
("38.70734", "-77.02303", "Fort Washington", "US", "America/New_York"),
("39.33427", "-76.43941", "Middle River", "US", "America/New_York"),
("39.32011", "-76.51552", "Rosedale", "US", "America/New_York"),
("39.32288", "-76.72803", "Woodlawn", "US", "America/New_York"),
("39.09112", "-94.41551", "Independence", "US", "America/Chicago"),
("37.95143", "-91.77127", "Rolla", "US", "America/Chicago"),
("33.41012", "-91.06177", "Greenville", "US", "America/Chicago"),
("34.25807", "-88.70464", "Tupelo", "US", "America/Chicago"),
("35.05266", "-78.87836", "Fayetteville", "US", "America/New_York"),
("34.25628", "-78.04471", "Leland", "US", "America/New_York"),
("35.88264", "-80.08199", "Thomasville", "US", "America/New_York"),
("39.71734", "-74.96933", "Sicklerville", "US", "America/New_York"),
("39.43534", "-84.20299", "Lebanon", "US", "America/New_York"),
("34.77453", "-96.67834", "Ada", "US", "America/Chicago"),
("35.74788", "-95.36969", "Muskogee", "US", "America/Chicago"),
("39.96097", "-75.60804", "West Chester", "US", "America/New_York"),
("33.98154", "-81.23621", "Lexington", "US", "America/New_York"),
("36.02506", "-86.77917", "Brentwood Estates", "US", "America/Chicago"),
("35.61452", "-88.81395", "Jackson", "US", "America/Chicago"),
("32.44874", "-99.73314", "Abilene", "US", "America/Chicago"),
("30.16688", "-96.39774", "Brenham", "US", "America/Chicago"),
("31.12406", "-97.90308", "Copperas Cove", "US", "America/Chicago"),
("29.53885", "-95.44744", "Fresno", "US", "America/Chicago"),
("30.5427", "-97.54667", "Hutto", "US", "America/Chicago"),
("32.5007", "-94.74049", "Longview", "US", "America/Chicago"),
("31.76212", "-95.63079", "Palestine", "US", "America/Chicago"),
("26.18924", "-98.15529", "San Juan", "US", "America/Chicago"),
("32.35126", "-95.30106", "Tyler", "US", "America/Chicago"),
("37.52487", "-77.55777", "Bon Air", "US", "America/New_York"),
("38.91817", "-78.19444", "Front Royal", "US", "America/New_York"),
("37.60876", "-77.37331", "Mechanicsville", "US", "America/New_York"),
("39.00622", "-77.4286", "Sterling", "US", "America/New_York"),
("39.45621", "-77.96389", "Martinsburg", "US", "America/New_York"),
("41.27621", "-72.86843", "East Haven", "US", "America/New_York"),
("41.14676", "-73.49484", "New Canaan", "US", "America/New_York"),
("41.55815", "-73.0515", "Waterbury", "US", "America/New_York"),
("41.6764", "-91.58045", "Coralville", "US", "America/Chicago"),
("41.57721", "-93.71133", "West Des Moines", "US", "America/Chicago"),
("41.15376", "-87.88754", "Bourbonnais", "US", "America/Chicago"),
("42.24113", "-88.3162", "Crystal Lake", "US", "America/Chicago"),
("41.72059", "-87.70172", "Evergreen Park", "US", "America/Chicago"),
("42.16808", "-88.42814", "Huntley", "US", "America/Chicago"),
("41.8542", "-87.66561", "Lower West Side", "US", "America/Chicago"),
("41.80753", "-87.65644", "New City", "US", "America/Chicago"),
("40.56754", "-89.64066", "Pekin", "US", "America/Chicago"),
("41.84364", "-87.71255", "South Lawndale", "US", "America/Chicago"),
("41.85059", "-87.882", "Westchester", "US", "America/Chicago"),
("41.75338", "-86.11084", "Granger", "US", "America/Indiana/Indianapolis"),
("41.47892", "-87.45476", "Schererville", "US", "America/Chicago"),
("42.35843", "-71.05977", "Boston", "US", "America/New_York"),
("42.58342", "-71.8023", "Fitchburg", "US", "America/New_York"),
("42.4251", "-71.06616", "Malden", "US", "America/New_York"),
("42.52787", "-70.92866", "Peabody", "US", "America/New_York"),
("41.9001", "-71.08977", "Taunton", "US", "America/New_York"),
("43.91452", "-69.96533", "Brunswick", "US", "America/New_York"),
("42.30865", "-83.48216", "Canton", "US", "America/Detroit"),
("46.09273", "-88.64235", "Iron River", "US", "America/Menominee"),
("42.97086", "-82.42491", "Port Huron", "US", "America/Detroit"),
("42.7392", "-84.62081", "Waverly", "US", "America/Detroit"),
("45.0408", "-93.263", "Columbia Heights", "US", "America/Chicago"),
("45.16024", "-93.08883", "Lino Lakes", "US", "America/Chicago"),
("44.73941", "-93.12577", "Rosemount", "US", "America/Chicago"),
("47.92526", "-97.03285", "Grand Forks", "US", "America/Chicago"),
("42.93369", "-72.27814", "Keene", "US", "America/New_York"),
("40.94065", "-73.99681", "Dumont", "US", "America/New_York"),
("40.72816", "-74.07764", "Jersey City", "US", "America/New_York"),
("40.82232", "-74.15987", "Nutley", "US", "America/New_York"),
("40.65538", "-74.38987", "Scotch Plains", "US", "America/New_York"),
("40.5576", "-74.28459", "Woodbridge", "US", "America/New_York"),
("40.57788", "-73.95958", "Brighton Beach", "US", "America/New_York"),
("40.67705", "-73.89125", "Cypress Hills", "US", "America/New_York"),
("40.60538", "-73.75513", "Far Rockaway", "US", "America/New_York"),
("40.72371", "-73.95097", "Greenpoint", "US", "America/New_York"),
("40.64621", "-73.97069", "Kensington", "US", "America/New_York"),
("40.68066", "-73.47429", "Massapequa", "US", "America/New_York"),
("41.50343", "-74.01042", "Newburgh", "US", "America/New_York"),
("40.63316", "-74.13653", "Port Richmond", "US", "America/New_York"),
("41.0051", "-73.78458", "Scarsdale", "US", "America/New_York"),
("43.1009", "-75.23266", "Utica", "US", "America/New_York"),
("40.93121", "-73.89875", "Yonkers", "US", "America/New_York"),
("41.55838", "-81.56929", "Collinwood", "US", "America/New_York"),
("41.48199", "-81.79819", "Lakewood", "US", "America/New_York"),
("41.24255", "-82.61573", "Norwalk", "US", "America/New_York"),
("41.66394", "-83.55521", "Toledo", "US", "America/New_York"),
("40.2737", "-76.88442", "Harrisburg", "US", "America/New_York"),
("40.24537", "-75.64963", "Pottstown", "US", "America/New_York"),
("41.54566", "-71.29144", "Middletown", "US", "America/New_York"),
("43.61062", "-72.97261", "Rutland", "US", "America/New_York"),
("44.27804", "-88.27205", "Kaukauna", "US", "America/Chicago"),
("42.55308", "-87.93341", "Pleasant Prairie", "US", "America/Chicago"),
("41.16704", "-73.20483", "Bridgeport", "US", "America/New_York"),
("33.35283", "-111.78903", "Gilbert", "US", "America/Phoenix"),
("33.50921", "-111.89903", "Scottsdale", "US", "America/Phoenix"),
("38.17492", "-122.2608", "American Canyon", "US", "America/Los_Angeles"),
("33.92946", "-116.97725", "Beaumont", "US", "America/Los_Angeles"),
("34.21639", "-119.0376", "Camarillo", "US", "America/Los_Angeles"),
("34.09668", "-117.71978", "Claremont", "US", "America/Los_Angeles"),
("38.54491", "-121.74052", "Davis", "US", "America/Los_Angeles"),
("33.03699", "-117.29198", "Encinitas", "US", "America/Los_Angeles"),
("34.14251", "-118.25508", "Glendale", "US", "America/Los_Angeles"),
("33.7207", "-116.21677", "Indio", "US", "America/Los_Angeles"),
("33.52253", "-117.70755", "Laguna Niguel", "US", "America/Los_Angeles"),
("34.63915", "-120.45794", "Lompoc", "US", "America/Los_Angeles"),
("32.9156", "-117.14392", "Mira Mesa", "US", "America/Los_Angeles"),
("33.93113", "-117.54866", "Norco", "US", "America/Los_Angeles"),
("33.72255", "-116.37697", "Palm Desert", "US", "America/Los_Angeles"),
("36.06523", "-119.01677", "Porterville", "US", "America/Los_Angeles"),
("37.73604", "-120.93549", "Riverbank", "US", "America/Los_Angeles"),
("34.09611", "-118.10583", "San Gabriel", "US", "America/Los_Angeles"),
("34.95303", "-120.43572", "Santa Maria", "US", "America/Los_Angeles"),
("33.95015", "-118.03917", "South Whittier", "US", "America/Los_Angeles"),
("33.76446", "-117.79394", "North Tustin", "US", "America/Los_Angeles"),
("36.91023", "-121.75689", "Watsonville", "US", "America/Los_Angeles"),
("39.72943", "-104.83192", "Aurora", "US", "America/Denver"),
("39.57582", "-105.11221", "Ken Caryl", "US", "America/Denver"),
("32.42067", "-104.22884", "Carlsbad", "US", "America/Denver"),
("36.20829", "-115.98391", "Pahrump", "US", "America/Los_Angeles"),
("31.84568", "-102.36764", "Odessa", "US", "America/Chicago"),
("40.58654", "-122.39168", "Redding", "US", "America/Los_Angeles"),
("43.54072", "-116.56346", "Nampa", "US", "America/Boise"),
("45.49428", "-122.86705", "Aloha", "US", "America/Los_Angeles"),
("44.99012", "-123.02621", "Keizer", "US", "America/Los_Angeles"),
("45.53929", "-122.38731", "Troutdale", "US", "America/Los_Angeles"),
("40.65995", "-111.99633", "Kearns", "US", "America/Denver"),
("40.34912", "-111.90466", "Saratoga Springs", "US", "America/Denver"),
("47.76232", "-122.2054", "Bothell", "US", "America/Los_Angeles"),
("47.38093", "-122.23484", "Kent", "US", "America/Los_Angeles"),
("47.64995", "-117.23991", "Opportunity", "US", "America/Los_Angeles"),
("46.32374", "-120.00865", "Sunnyside", "US", "America/Los_Angeles"),
("20.88953", "-156.47432", "Kahului", "US", "Pacific/Honolulu"),
("40.81", "-73.9625", "Morningside Heights", "US", "America/New_York"),
("43.16547", "-77.70066", "Gates-North Gates", "US", "America/New_York"),
("47.4943", "-122.24092", "Bryn Mawr-Skyway", "US", "America/Los_Angeles"),
("47.80527", "-122.24064", "Bothell West", "US", "America/Los_Angeles"),
("37.71715", "-122.40433", "Visitacion Valley", "US", "America/Los_Angeles"),
("-33.38056", "-56.52361", "Durazno", "UY", "America/Montevideo"),
("41.29444", "69.67639", "Parkent", "UZ", "Asia/Tashkent"),
("40.11583", "67.84222", "Jizzax", "UZ", "Asia/Samarkand"),
("40.78206", "72.34424", "Andijon", "UZ", "Asia/Tashkent"),
("9.91861", "-68.30472", "Tinaquillo", "VE", "America/Caracas"),
("10.22677", "-67.33122", "La Victoria", "VE", "America/Caracas"),
("8.35122", "-62.64102", "Ciudad Guayana", "VE", "America/Caracas"),
("8.62261", "-70.20749", "Barinas", "VE", "America/Caracas"),
("10.29085", "105.75635", "Sa Dec", "VN", "Asia/Ho_Chi_Minh"),
("-17.73648", "168.31366", "Port-Vila", "VU", "Pacific/Efate"),
("42.62833", "20.89389", "Glogovac", "XK", "Europe/Belgrade"),
("14.53767", "46.83187", "Ataq", "YE", "Asia/Aden"),
("-27.76952", "30.79165", "Vryheid", "ZA", "Africa/Johannesburg"),
("-26.93366", "29.24152", "Standerton", "ZA", "Africa/Johannesburg"),
("-24.19436", "29.00974", "Mokopane", "ZA", "Africa/Johannesburg"),
)
def coordinate(self, center=None, radius=0.001):
"""
Optionally center the coord and pick a point within radius.
"""
if center is None:
return Decimal(str(self.generator.random.randint(-180000000, 180000000) / 1000000.0)).quantize(
Decimal(".000001"),
)
else:
center = float(center)
radius = float(radius)
geo = self.generator.random.uniform(center - radius, center + radius)
return Decimal(str(geo)).quantize(Decimal(".000001"))
def latitude(self):
# Latitude has a range of -90 to 90, so divide by two.
return self.coordinate() / 2
def longitude(self):
return self.coordinate()
def latlng(self):
return (self.latitude(), self.longitude())
def local_latlng(self, country_code='US', coords_only=False):
"""Returns a location known to exist on land in a country specified by `country_code`.
Defaults to 'en_US'. See the `land_coords` list for available locations/countries.
"""
results = [loc for loc in self.land_coords if loc[3] == country_code]
if results:
place = self.random_element(results)
return (place[0], place[1]) if coords_only else place
def location_on_land(self, coords_only=False):
"""Returns a random tuple specifying a coordinate set guaranteed to exist on land.
Format is `(latitude, longitude, place name, two-letter country code, timezone)`
Pass `coords_only` to return coordinates without metadata.
"""
place = self.random_element(self.land_coords)
return (place[0], place[1]) if coords_only else place
|
deanishe/alfred-fakeum
|
src/libs/faker/providers/geo/__init__.py
|
Python
|
mit
| 70,121
|
# -*- coding: utf-8 -*-
# Este archivo simula que un usuario usó su NFC o RFID para entrar o salir de
# algún lugar
# Ejemplo de uso:
# $ python pub-local.py -p CICI -d puerta1 -t ABCDEF -k 123456
# Para verificar su funcionamiento, se puede crear una suscripción al canal
# $ mosquitto_sub -h localhost -t 'lugares/acceso/CICI/puerta1'
import paho.mqtt.publish as publish
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--place",
'-p',
required=True,
help="Name of the place",
metavar='PLACE',
default='')
parser.add_argument("--door",
'-d',
required=True,
help="Name of the door",
metavar='DOOR',
default='')
parser.add_argument("--tag",
'-t',
required=True,
help="Tag of an user",
metavar='TAG',
default='')
parser.add_argument("--key",
'-k',
required=True,
help="Key of the place",
metavar='KEY',
default='')
parser.add_argument("--token",
'-T',
required=False,
help="Auth token",
metavar='token',
default='')
parser.add_argument("--user",
'-U',
required=False,
help="Auth User",
metavar='user',
default='')
parser.add_argument("--password",
'-P',
required=False,
help="Auth Pass",
metavar='password',
default='')
args = parser.parse_args()
my_dict = {
'instruction': 'ACCESS',
'tag': str(args.tag),
'secure-key': str(args.key)
}
if(args.token):
my_dict['token'] = args.token
if(args.user and args.password):
my_dict['user'] = args.user
my_dict['password'] = args.password
publish.single('boards/instructions/'+str(args.place)+'/'+str(args.door)+'',
json.dumps(my_dict),
hostname="localhost")
|
smartcities-livinglab-udg/APP-SmartCheckIn
|
app/modules/MQTT/pub-local-example.py
|
Python
|
mit
| 2,306
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
* back to 2 conv layers
e70
* Based on e65
* Using sigmoid instead of rectify in Conv1D layers
e71
* Larger layers
* More data
e72
* At a third conv layer
e73
* Add a dense layer after 3 conv layers
e74
* Removed dense layer after 3 conv layers (because it failed to learn anything)
* Trying standard inits for weights and biases throughout network.
e75
* Putting back large init for first layer
e76
* Removed 3rd conv layer
e77
* Try init Uniform(1)
e78
* Back to large inits for first layers
* Trying 3rd conv layer, also with large init
e79
* Trying to merge 1D conv on bottom layer with hierarchical subsampling
from e59a.
* Replace first LSTM with BLSTM
* Add second BLSTM layer
* Add conv1d between BLSTM layers.
e80
* Remove third 1d conv layer
e81
* Change num_filters in conv layer between BLSTMs from 20 to 80
e82
* Remove first conv layers
Results
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_durations=[60, 60, 60],
input_padding=0,
subsample_target=5
)
net = Net(
experiment_name="e82",
source=source,
learning_rate=1e-1,
save_plot_interval=250,
loss_function=crossentropy,
layers_config=[
# {
# 'type': BLSTMLayer,
# 'num_units': 40,
# 'W_in_to_cell': Uniform(5)
# },
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1)
# },
# {
# 'type': Conv1DLayer,
# 'num_filters': 60,
# 'filter_length': 5,
# 'stride': 5,
# 'nonlinearity': sigmoid
# },
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1)
# },
{
'type': BLSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
|
JackKelly/neuralnilm_prototype
|
scripts/e82.py
|
Python
|
mit
| 4,091
|
#!/usr/bin/env python
#
# GrovePi Project for a Plant monitoring project.
# * Reads the data from moisture, light, temperature and humidity sensor
# and takes pictures from the Pi camera periodically and logs them
# * Sensor Connections on the GrovePi:
# -> Grove Moisture sensor - Port A1
# -> Grove light sensor - Port A2
# -> Grove DHT sensors - Port D4
#
# NOTE:
# * Make sure that the Pi camera is enabled and works. Directions here: https://www.raspberrypi.org/help/camera-module-setup/
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
import subprocess
import math
#analog sensor port number
mositure_sensor = 1
light_sensor = 2
#digital sensor
temp_himidity_sensor = 4
#temp_himidity_sensor type
# grove starter kit comes with the bluew sensor
blue=0
white=1
#############
#test timings
time_for_sensor = 4 # 4 seconds
time_for_picture = 12 # 12 seconds
# final
# time_for_sensor = 1*60*60 #1hr
# time_for_picture = 8*60*60 #8hr
time_to_sleep = 1
log_file="plant_monitor_log.csv"
#Read the data from the sensors
def read_sensor():
try:
moisture=grovepi.analogRead(mositure_sensor)
light=grovepi.analogRead(light_sensor)
[temp,humidity] = grovepi.dht(temp_himidity_sensor,white)
#Return -1 in case of bad temp/humidity sensor reading
if math.isnan(temp) or math.isnan(humidity): #temp/humidity sensor sometimes gives nan
return [-1,-1,-1,-1]
return [moisture,light,temp,humidity]
#Return -1 in case of sensor error
except IOError as TypeError:
return [-1,-1,-1,-1]
#Take a picture with the current time using the Raspberry Pi camera. Save it in the same folder
def take_picture():
try:
cmd="raspistill -t 1 -o plant_monitor_"+str(time.strftime("%Y_%m_%d__%H_%M_%S"))+".jpg"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print("Picture taken\n------------>\n")
except:
print("Camera problem,please check the camera connections and settings")
#Save the initial time, we will use this to find out when it is time to take a picture or save a reading
last_read_sensor=last_pic_time= int(time.time())
while True:
curr_time_sec=int(time.time())
# If it is time to take the sensor reading
if curr_time_sec-last_read_sensor>time_for_sensor:
[moisture,light,temp,humidity]=read_sensor()
# If any reading is a bad reading, skip the loop and try again
if moisture==-1:
print("Bad reading")
time.sleep(1)
continue
curr_time = time.strftime("%Y-%m-%d:%H-%M-%S")
print(("Time:%s\nMoisture: %d\nLight: %d\nTemp: %.2f\nHumidity:%.2f %%\n" %(curr_time,moisture,light,temp,humidity)))
# Save the sensor reading to the CSV file
f=open(log_file,'a')
f.write("%s,%d,%d,%.2f,%.2f;\n" %(curr_time,moisture,light,temp,humidity))
f.close()
#Update the last read time
last_read_sensor=curr_time_sec
# If it is time to take the picture
if curr_time_sec-last_pic_time>time_for_picture:
take_picture()
last_pic_time=curr_time_sec
#Slow down the loop
time.sleep(time_to_sleep)
|
penoud/GrovePi
|
Projects/plant_monitor/plant_project.py
|
Python
|
mit
| 4,476
|
"""Torrenting utils, mostly for handling bencoding and torrent files."""
# Torrent decoding is a short fragment from effbot.org. Site copyright says:
# Test scripts and other short code fragments can be considered as being in the public domain.
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import binascii
import functools
import re
import logging
log = logging.getLogger('torrent')
# Magic indicator used to quickly recognize torrent files
TORRENT_RE = re.compile(br'^d\d{1,3}:')
# List of all standard keys in a metafile
# See http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-module.html#METAFILE_STD_KEYS
METAFILE_STD_KEYS = [i.split('.') for i in (
"announce",
"announce-list", # BEP-0012
"comment",
"created by",
"creation date",
"encoding",
"info",
"info.length",
"info.name",
"info.piece length",
"info.pieces",
"info.private",
"info.files",
"info.files.length",
"info.files.path",
)]
def clean_meta(meta, including_info=False, logger=None):
""" Clean meta dict. Optionally log changes using the given logger.
See also http://packages.python.org/pyrocore/apidocs/pyrocore.util.metafile-pysrc.html#clean_meta
@param logger: If given, a callable accepting a string message.
@return: Set of keys removed from C{meta}.
"""
modified = set()
for key in list(meta.keys()):
if [key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r..." % (key,))
del meta[key]
modified.add(key)
if including_info:
for key in list(meta["info"].keys()):
if ["info", key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r..." % ("info." + key,))
del meta["info"][key]
modified.add("info." + key)
for idx, entry in enumerate(meta["info"].get("files", [])):
for key in list(entry.keys()):
if ["info", "files", key] not in METAFILE_STD_KEYS:
if logger:
logger("Removing key %r from file #%d..." % (key, idx + 1))
del entry[key]
modified.add("info.files." + key)
return modified
def is_torrent_file(metafilepath):
""" Check whether a file looks like a metafile by peeking into its content.
Note that this doesn't ensure that the file is a complete and valid torrent,
it just allows fast filtering of candidate files.
@param metafilepath: Path to the file to check, must have read permissions for it.
@return: True if there is a high probability this is a metafile.
"""
with open(metafilepath, 'rb') as f:
data = f.read(200)
magic_marker = bool(TORRENT_RE.match(data))
if not magic_marker:
log.trace('%s doesn\'t seem to be a torrent, got `%s` (hex)' % (metafilepath, binascii.hexlify(data)))
return bool(magic_marker)
def tokenize(text, match=re.compile(b'([idel])|(\d+):|(-?\d+)').match):
i = 0
while i < len(text):
m = match(text, i)
s = m.group(m.lastindex)
i = m.end()
if m.lastindex == 2:
yield b's'
yield text[i:i + int(s)]
i += int(s)
else:
yield s
def decode_item(next, token):
if token == b'i':
# integer: "i" value "e"
data = int(next())
if next() != b'e':
raise ValueError
elif token == b's':
# string: "s" value (virtual tokens)
data = next()
# Strings in torrent file are defined as utf-8 encoded
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
# The pieces field is a byte string, and should be left as such.
pass
elif token == b'l' or token == b'd':
# container: "l" (or "d") values "e"
data = []
tok = next()
while tok != b'e':
data.append(decode_item(next, tok))
tok = next()
if token == b'd':
data = dict(list(zip(data[0::2], data[1::2])))
else:
raise ValueError
return data
def bdecode(text):
try:
src = tokenize(text)
data = decode_item(functools.partial(next, src), next(src)) # pylint:disable=E1101
for _ in src: # look for more tokens
raise SyntaxError("trailing junk")
except (AttributeError, ValueError, StopIteration, TypeError) as e:
raise SyntaxError("syntax error: %s" % e)
return data
# encoding implementation by d0b
def encode_string(data):
return encode_bytes(data.encode('utf-8'))
def encode_bytes(data):
return str(len(data)).encode() + b':' + data
def encode_integer(data):
return b'i' + str(data).encode() + b'e'
def encode_list(data):
encoded = b'l'
for item in data:
encoded += bencode(item)
encoded += b'e'
return encoded
def encode_dictionary(data):
encoded = b'd'
items = list(data.items())
items.sort()
for (key, value) in items:
encoded += bencode(key)
encoded += bencode(value)
encoded += b'e'
return encoded
def bencode(data):
if isinstance(data, bytes):
return encode_bytes(data)
if isinstance(data, str):
return encode_string(data)
if isinstance(data, int):
return encode_integer(data)
if isinstance(data, list):
return encode_list(data)
if isinstance(data, dict):
return encode_dictionary(data)
raise TypeError('Unknown type for bencode: ' + str(type(data)))
class Torrent(object):
"""Represents a torrent"""
# string type used for keys, if this ever changes, stuff like "x in y"
# gets broken unless you coerce to this type
KEY_TYPE = str
@classmethod
def from_file(cls, filename):
"""Create torrent from file on disk."""
with open(filename, 'rb') as handle:
return cls(handle.read())
def __init__(self, content):
"""Accepts torrent file as string"""
# Make sure there is no trailing whitespace. see #1592
content = content.strip()
# decoded torrent structure
self.content = bdecode(content)
self.modified = False
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
", ".join("%s=%r" % (key, self.content["info"].get(key))
for key in ("name", "length", "private",)),
", ".join("%s=%r" % (key, self.content.get(key))
for key in ("announce", "comment",)))
def get_filelist(self):
"""Return array containing fileinfo dictionaries (name, length, path)"""
files = []
if 'length' in self.content['info']:
# single file torrent
if 'name.utf-8' in self.content['info']:
name = self.content['info']['name.utf-8']
else:
name = self.content['info']['name']
t = {'name': name,
'size': self.content['info']['length'],
'path': ''}
files.append(t)
else:
# multifile torrent
for item in self.content['info']['files']:
if 'path.utf-8' in item:
path = item['path.utf-8']
else:
path = item['path']
t = {'path': '/'.join(path[:-1]),
'name': path[-1],
'size': item['length']}
files.append(t)
# Decode strings
for item in files:
for field in ('name', 'path'):
# These should already be decoded if they were utf-8, if not we can try some other stuff
if not isinstance(item[field], str):
try:
item[field] = item[field].decode(self.content.get('encoding', 'cp1252'))
except UnicodeError:
# Broken beyond anything reasonable
fallback = item[field].decode('utf-8', 'replace').replace(u'\ufffd', '_')
log.warning('%s=%r field in torrent %r is wrongly encoded, falling back to `%s`' %
(field, item[field], self.content['info']['name'], fallback))
item[field] = fallback
return files
@property
def is_multi_file(self):
"""Return True if the torrent is a multi-file torrent"""
return 'files' in self.content['info']
@property
def name(self):
"""Return name of the torrent"""
return self.content['info'].get('name', '')
@property
def size(self):
"""Return total size of the torrent"""
size = 0
# single file torrent
if 'length' in self.content['info']:
size = int(self.content['info']['length'])
else:
# multifile torrent
for item in self.content['info']['files']:
size += int(item['length'])
return size
@property
def private(self):
return self.content['info'].get('private', False)
@property
def trackers(self):
"""
:returns: List of trackers, supports single-tracker and multi-tracker implementations
"""
trackers = []
# the spec says, if announce-list present use ONLY that
# funny iteration because of nesting, ie:
# [ [ tracker1, tracker2 ], [backup1] ]
for tl in self.content.get('announce-list', []):
for t in tl:
trackers.append(t)
if not self.content.get('announce') in trackers:
trackers.append(self.content.get('announce'))
return trackers
@property
def info_hash(self):
"""Return Torrent info hash"""
import hashlib
hash = hashlib.sha1()
info_data = encode_dictionary(self.content['info'])
hash.update(info_data)
return str(hash.hexdigest().upper())
@property
def comment(self):
return self.content['comment']
@comment.setter
def comment(self, comment):
self.content['comment'] = comment
self.modified = True
@property
def piece_size(self):
return int(self.content['info']['piece length'])
@property
def libtorrent_resume(self):
return self.content.get('libtorrent_resume', {})
def set_libtorrent_resume(self, chunks, files):
self.content['libtorrent_resume'] = {}
self.content['libtorrent_resume']['bitfield'] = chunks
self.content['libtorrent_resume']['files'] = files
self.modified = True
def remove_multitracker(self, tracker):
"""Removes passed multi-tracker from this torrent"""
for tl in self.content.get('announce-list', [])[:]:
try:
tl.remove(tracker)
self.modified = True
# if no trackers left in list, remove whole list
if not tl:
self.content['announce-list'].remove(tl)
except (AttributeError, ValueError):
pass
def add_multitracker(self, tracker):
"""Appends multi-tracker to this torrent"""
self.content.setdefault('announce-list', [])
self.content['announce-list'].append([tracker])
self.modified = True
def __str__(self):
return '<Torrent instance. Files: %s>' % self.get_filelist()
def encode(self):
return bencode(self.content)
|
jawilson/Flexget
|
flexget/utils/bittorrent.py
|
Python
|
mit
| 11,767
|
"""
Copyright (c) 2016, Marcelo Leal
Description: Simple Azure Media Services Python library
License: MIT (see LICENSE.txt file for details)
"""
import os
import json
import amspy
import time
#import pytz
import logging
import datetime
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("ERROR: Expecting config.json in current folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
# Get the access token...
response = amspy.get_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Initialization...
print ("\n-----------------------= AMS Py =----------------------");
print ("Simple Python Library for Azure Media Services REST API");
print ("-------------------------------------------------------\n");
### list assets
print ("\n001 >>> Listing Media Assets")
response = amspy.list_media_asset(access_token)
if (response.status_code == 200):
resjson = response.json()
print("POST Status.............................: " + str(response.status_code))
for ma in resjson['d']['results']:
print("Media Asset Name........................: " + ma['Id'])
print("Media Asset Id..........................: " + ma['Name'])
else:
print("POST Status.............................: " + str(response.status_code) + " - Media Asset Listing ERROR." + str(response.content))
|
johndeu/amspy
|
amspy/examples/list_media_asset.py
|
Python
|
mit
| 2,469
|
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import pytest
@pytest.mark.parametrize('name, src, kwargs', [
(
'Foo', '''
struct Foo {
1: required string a
}
''',
{'a': 'zzz'}
),
(
'Bar', '''
union Bar{
1: binary b
2: string s
}
''',
{'s': 'bar'},
),
(
'Baz', '''
enum Enum { A = 1, B = 2 }
struct Baz {
1: optional Enum e
}
''',
{'e': 1},
),
])
def test_hashable(loads, name, src, kwargs):
module = loads(src)
klass = getattr(module, name)
obj1 = klass(**kwargs)
obj2 = klass(**kwargs)
assert hash(obj1) == hash(obj2)
assert hash(obj1) == hash(obj1)
@pytest.mark.parametrize('name, src, kwargs1, kwargs2', [
(
'Foo', '''
struct Foo {
1: required string a
}
''',
{'a': 'zzz'},
{'a': 'aaa'},
),
(
'Bar', '''
union Bar{
1: binary b
2: string s
}
''',
{'s': 'bar'},
{'b': '0b111'},
),
(
'Baz', '''
enum Enum { A = 1, B = 2 }
struct Baz {
1: optional Enum e
}
''',
{'e': 1},
{'e': 2},
),
])
def test_hash_inequality(loads, name, src, kwargs1, kwargs2):
module = loads(src)
klass = getattr(module, name)
obj1 = klass(**kwargs1)
obj2 = klass(**kwargs2)
assert hash(obj1) != hash(obj2)
@pytest.mark.parametrize('name, src, kwargs', [
(
'Foo', '''
struct Foo {
1: required string a
2: required list<string> b
3: required map<string, string> c = {}
}
''',
{'a': 'zzz', 'b': ['bar']},
),
(
'Bar', '''
union Bar {
1: binary b
2: string s
3: list<string> ls
}
''',
{'s': 'bar'},
),
])
def test_nonhashable(loads, name, src, kwargs):
module = loads(src)
klass = getattr(module, name)
obj = klass(**kwargs)
with pytest.raises(TypeError):
hash(obj)
|
uber/thriftrw-python
|
tests/spec/test_hashable.py
|
Python
|
mit
| 3,457
|
__version__ = "2.0.0.dev2"
|
kerstin/moviepy
|
moviepy/version.py
|
Python
|
mit
| 27
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 17:27:53 2016
Updated 2016 Aug 4 to include WIRC points
Updated 2016 Sep 7 to reformat
@author: stephaniekwan
IC-10 X-2 Spitzer IRAC light curves. Updated on August 4th to include July 18th
Palomar JHK band measurements.
Coordinates: 0:20:20.940 +59:17:59.00
Circle radius: 3 arcsec. Annulus radii: 5 arcsec inner, 10 arcsec outer
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from jdcal import gcal2jd
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
# Use LaTeX font
plt.rc({'weight' : 'normal',
'size' : 15})
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
#plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('text', usetex = True)
index = np.array(['4424960 2004-07-23', '33204224 2010-01-29',
'33203968 2010-02-19', '33203456 2010-03-10',
'33202944 2010-09-09' '33202432 2010-10-04', '33201920 2010-10-14',
'42321152 2011-09-24', '42321408 2012-04-04', '52576256 2015-03-11',
'52577280 2016-03-23', '2016-07-18'])
gdates = np.array([(2004,7,23), (2010,1,29), (2010,2,19), (2010,3,10),
(2010,9,9), (2010,10,4), (2010,10,14), (2011,9,24),
(2012,4,4), (2015,3,11), (2016,3,23)])
# Convert to MJD dates
jdates = np.zeros(len(gdates))
for i in range(len(gdates)):
jdates[i] = gcal2jd( gdates[i][0], gdates[i][1], gdates[i][2] )[1]
WIRCjdate = gcal2jd(2016,7,18)[1]
#X-ray observation dates: 2003 Mar 12, 2006 Nov 5, 2010 May 21
xdates = np.array([52710.7, 54044.2, 55337.8])
# Dates of non-observations
xnondates = np.array([55140.7, 55190.2, 55238.5, 55290.6, 55397.5, 55444.6])
# Mean counts in circle (units: counts/sr per pixel), for 4.5 and 3.6 microns
# Mean counts in annulus: in units of MJr/sr per pixel
circMean36 = np.array([2.235913,
1.9806753,
1.8627226,
1.9333704,
1.9426107,
1.9242988,
1.8619019,
1.8695578,
1.9416175,
1.8303715,
1.8961317])
annMean36 = np.array([1.502455, 1.4441012, 1.4349068, 1.4300396,
1.4522621, 1.4369512, 1.4367747,
1.4509853, 1.4649935, 1.4423924, 1.4682426])
annSD36 = np.array([0.323036, 0.33284634, 0.30873036, 0.27726872,
0.30360679, 0.29375085, 0.31357359,
0.32412101, 0.30720197, 0.28204827, 0.28241972])
circMean45 = np.array([1.6294469, 1.3514017, 1.2583814, 1.2950296,
1.3489466, 1.2898556, 1.2250279,
1.2813393, 1.343888, 1.2231404, 1.2529148])
annMean45 = np.array([1.0128354, 0.93392948, 0.94994089, 0.96776315,
0.98786045, 0.93146131,0.91232822, 0.96418034,
1.0059549, 0.93307992, 0.94233364])
annSD45 = np.array([0.18814292, 0.19965652, 0.19302296, 0.18062225,
0.18524006, 0.18025225, 0.18849567, 0.19213017,
0.18247341, 0.19707077, 0.20098456])
circMean58 = np.array([2.4857705]) #only for '4424960 2004-07-23'
circMean80 = np.array([5.6362584]) # " "
annMean58 = np.array([2.2773678])
annMean80 = np.array([5.8670916])
# Standard deviation in annulus counts (counts/sr per pixel)
annSD58 = np.array([0.34377934])
annSD80 = np.array([0.81536177])
# Number of pixels in circle
circNpx36 = np.array([54,52,54,55,52,54,55,56,53,56,55])
circNpx45 = np.array([54,52,54,55,52,54,55,56,53,56,55])
circNpx58, circNpx80 = np.array([54]), np.array([54])
# Calculate number of non-background counts in the circle (counts/sr)
circCounts36 = (circMean36 - annMean36) * circNpx36
circCounts45 = (circMean45 - annMean45) * circNpx45
circCounts58 = (circMean58 - annMean58) * circNpx58
circCounts80 = (circMean80 - annMean80) * circNpx80
# Conversion between steradians and arcsecond. 1 steradian is 4.25 *^ 10 arcseconds
srOverArcSec = 1/(4.25 * 10**10)
# 1 pixel has 0.3600 arcsec^2. Convert "counts" (counts/sr) to counts
circFlux36 = circCounts36 * 0.3600 * srOverArcSec * 10**9
circFlux45 = circCounts45 * 0.3600 * srOverArcSec * 10**9
circFlux58 = circCounts58 * 0.3600 * srOverArcSec * 10**9
circFlux80 = circCounts80 * 0.3600 * srOverArcSec * 10**9
# Estimation of error: standard dev. in annulus counts times area of circle
fluxError36 = annSD36 * np.sqrt(circNpx36) * srOverArcSec * 10**9 * 0.3600
fluxError45 = annSD45 * np.sqrt(circNpx45) * srOverArcSec * 10**9 * 0.3600
fluxError58 = annSD58 * np.sqrt(circNpx58) * srOverArcSec * 10**9 * 0.3600
fluxError80 = annSD80 * np.sqrt(circNpx80) * srOverArcSec * 10**9 * 0.3600
# Palomar P200 JHK fluxes and errors (in mJy)
jFlux2, jErr2 = 0.3822, 0.05623
hFlux2, hErr2 = 0.34596, 0.02698
kFlux2, kErr2 = 0.396159, 0.0773288
circFlux58, circFlux80 = np.array([0.21036669]), np.array([0.19616618])
fluxError58, fluxError80 = np.array([0.03456009]), np.array([0.03161511])
# 2MASS fluxes upper limits (in mJy)
jFlux, hFlux, kFlux = 0.4192, 0.7084, 0.4207
jFluxErr = 0.0593
upperLimDate = gcal2jd(2000,9,16)[1]
dates2 = np.array([upperLimDate, WIRCjdate])
## Plot light curves
#fig, ax = plt.subplots()
#plt.hold(True)
#plt.scatter(dates2, jFluxes, facecolors = 'none', marker = '<', s = 30,
# edgecolors = 'navy')
#plt.scatter(dates2, hFluxes, facecolors = 'none', marker = 's', s = 30,
# edgecolors = 'royalblue')
#plt.scatter(dates2, kFluxes, facecolors = 'none', marker = '>', s = 30,
# edgecolors = 'lightskyblue')
#
#plt.scatter(jdates, circFlux36, color = 'black', marker = 'o', s = 15,
# zorder = 3)
#plt.scatter(jdates, circFlux45, color = 'grey', marker = 'v', s = 15,
# zorder = 3)
#plt.scatter(jdates[0], circFlux58, facecolors = 'none', edgecolors =
# 'darkgrey', marker = 'D', s = 22, zorder = 3)
#plt.scatter(jdates[0], circFlux80, facecolors ='none', edgecolors = 'black',
# marker = 'o', s = 25, zorder = 3)
#plt.xlim([51500,59500])
#plt.ylim([0.00,0.80])
#plt.legend(('J', 'H', 'K$_s$','[3.6]', '[4.5]', '[5.8]', '[8.0]'),
# scatterpoints = 1,
# loc = 'upper right',
# title = 'Filter/Channel',
# fontsize = 13,
# frameon = False)
## Plot time of burst and label it
#plt.axvline(x = 55337.8, color = 'k', ls = 'dashed')
##plt.text(55500, 0.45, "2010 May outburst", rotation=90, fontsize=13)
## Plot error bars
#plt.errorbar(WIRCjdate, kFlux, kErr, color = 'lightskyblue')
#plt.errorbar(WIRCjdate, hFlux, hErr, color = 'royalblue')
#plt.errorbar(WIRCjdate, jFlux, jErr, color = 'navy')
#plt.errorbar(dates2[0], j2Flux, 0.0593, color = 'navy')
#plt.errorbar(jdates, circFlux36, yerr = fluxError36, linestyle = 'None',
# color = 'black', zorder = 2)
#plt.errorbar(jdates, circFlux45, yerr = fluxError45, linestyle = 'None',
# color = 'grey', zorder = 2)
#plt.errorbar(jdates[0], circFlux58, yerr = fluxError58, linestyle = 'None',
# color = 'darkgrey', zorder = 2)
#plt.errorbar(jdates[0], circFlux80, yerr = fluxError80, linestyle = 'None',
# color = 'black', zorder = 2)
#plt.xlabel('Time (MJD)', fontsize = 14)
#plt.ylabel('Flux density (mJy)', fontsize = 14)
#ax.arrow(dates2[0], kFluxes[0], 0.0, -0.08, head_width = 150,
# head_length = 0.02, fc ='lightskyblue', ec ='lightskyblue')
#ax.arrow(dates2[0], hFluxes[0], 0.0, -0.08, head_width = 150,
# head_length = 0.02, fc = 'royalblue', ec ='royalblue')
#ax.arrow(WIRCjdate, 0.7, 0.0, -0.15, head_width = 300, head_length = 0.03,
# fc = 'k', ec = 'k', linestyle = '-')
#for i in range(len(xdates)):
# ax.arrow(xdates[i], 0.03, 0.0, 0.04, head_width = 100, head_length = 0.02,
# fc = 'darkslategrey', ec = 'darkslategrey', linestyle = '-')
#for j in range(len(xnondates)):
# ax.arrow(xnondates[j], 0.03, 0.0, 0.02, head_width = 30, head_length = 0.015,
# fc = 'lightgrey', ec = 'lightgrey', linestyle = '-')
#x1, x2, y1, y2 = 55100, 56100, 0.01, 0.27
## Plot quiescent levels
#quies36 = 0.19897806
#quies45 = 0.146110673
#plt.axhline(y = quies36, color = 'black', ls = 'dashed')
#plt.axhline(y = quies45, color = 'grey', ls = 'dashed')
#plt.text(51650, 0.11, 'Quiescent levels', fontsize = 10)
#
## Shaded area to denote uncertainty of median (average of mag1sig)
#ax.add_patch(
# patches.Rectangle(
# (51500, quies36 - np.average(fluxError36)), # (x, y)
# 59500 - 51500, # width
# 2 * np.average(fluxError36), # height
# 0.0, # angle
# facecolor = 'gainsboro',
# edgecolor = 'none',
# zorder = 1
# ))
#ax.add_patch(
# patches.Rectangle(
# (51500, quies45 - np.average(fluxError45)), # (x, y)
# 59500 - 51500, # width
# 2 * np.average(fluxError45), # height
# 0.0, # angle
# facecolor = 'gainsboro',
# edgecolor = 'none',
# zorder = 1
# ))
#
######
### Zoomed inset
#####
#axins = zoomed_inset_axes(ax,1.8,loc=9)
#axins.set_xlim(x1,x2)
#axins.set_ylim(y1,y2)
#plt.scatter(jdates[1:9], circFlux36[1:9], color = 'black', marker = 'o',
# s = 15, zorder = 2)
#plt.errorbar(jdates[1:9], circFlux36[1:9], yerr = fluxError36[1:9],
# linestyle = 'None', color = 'black', zorder = 2)
#plt.scatter(jdates[1:9], circFlux45[1:9], color = 'dimgrey', marker = 'v', s = 15,
# zorder = 3)
#plt.errorbar(jdates[1:9], circFlux45[1:9], yerr = fluxError45[1:9],
# linestyle = 'None', color = 'dimgrey', zorder = 3)
#plt.axvline(x = 55337.8, color = 'k', ls = 'dashed')
## Plot quiescent levels
#plt.axhline(y = quies36, color = 'k', ls = 'dashed')
#plt.axhline(y = quies45, color = 'grey', ls = 'dashed')
#
#plt.xticks(np.arange(x1, x2, 400))
#mark_inset(ax, axins, loc1 = 3, loc2 = 4, fc = "none", ec = "0.6")
#for i in range(len(xdates)):
# axins.arrow(xdates[i], 0.03, 0.0, 0.04, head_width = 100, head_length = 0.02,
# fc = 'darkslategrey', ec = 'darkslategrey', linestyle = '-')
#for j in range(len(xnondates)):
# axins.arrow(xnondates[j], 0.03, 0.0, 0.02, head_width = 30, head_length = 0.015,
# fc = 'lightgrey', ec = 'lightgrey', linestyle = '-')
#
## Shaded area to denote uncertainty of median (average of mag1sig)
#axins.add_patch(
# patches.Rectangle(
# (x1, quies36 - np.average(fluxError36)), # (x, y)
# x2 - x1, # width
# 2 * np.average(fluxError36), # height
# 0.0, # angle
# facecolor = 'gainsboro',
# edgecolor = 'none',
# zorder = 1
# ))
#axins.add_patch(
# patches.Rectangle(
# (x1, quies45 - np.average(fluxError45)), # (x, y)
# x2 - x1, # width
# 2 * np.average(fluxError45), # height
# 0.0, # angle
# facecolor = 'gainsboro',
# edgecolor = 'none',
# zorder = 1
# ))
#fig.savefig("170501_IC10X2_light_curve.pdf")
##################
##### Convert fluxes to magnitudes
##################
m36, m36sig, m45, m45sig = np.array([]), np.array([]), np.array([]), np.array([])
assert(len(circFlux36) == len(circFlux45))
for i in range(0, len(circFlux36)):
m36 = np.append(m36, -2.5 * np.log10(circFlux36[i] / (280.9 * 10**3)) )
m45 = np.append(m45, -2.5 * np.log10(circFlux45[i] / (179.7 * 10**3)) )
m36sig = np.append(m36sig, m36[i] + float(2.5 *
np.log10((circFlux36[i] + fluxError36[i]) / (280.9 * 10**3)) ))
m45sig = np.append(m45sig, m45[i] + float(2.5 *
np.log10((circFlux45[i] + fluxError45[i]) / (179.7 * 10**3)) ))
m58 = -2.5 * np.log10(circFlux58 / (115.0 * 10**3))
m58sig = m58 + float(2.5 * np.log10((circFlux58 + fluxError58) /
(115.0 * 10**3)) )
m80 = -2.5 * np.log10(circFlux80 / (64.9 * 10**3))
m80sig = m80 + float( 2.5 * np.log10((circFlux80 + fluxError80) /
(64.9 * 10**3)) )
# Zero-magnitude fluxes in Jy for 2MASS (as posted on website)
jzeroMagFlux, jzeroMagFluxsig = 1594, 27.8
hzeroMagFlux, hzeroMagFluxsig = 1024, 20.0
kzeroMagFlux, kzeroMagFluxsig = 666.7, 12.6
# Calculate absolute magnitude of 2MASS values
jmag1 = -2.5 * np.log10(jFlux * 10**-3/jzeroMagFlux)
jmag1sig = np.sqrt((jzeroMagFluxsig * (2.5/np.log(10)) / jzeroMagFlux )**2
+ (jFluxErr * (2.5/np.log(10)) / jFlux )**2)
# Upper limits on H and K bands (no sig)
hmag1 = -2.5 * np.log10(hFlux * 10**-3 / hzeroMagFlux)
kmag1 = -2.5 * np.log10(kFlux * 10**-3 / kzeroMagFlux)
# P200 magnitudes: see 160802 WIRC zero points.py file
jmag2, jmag2sig = 16.47779136, 0.0742855400832
hmag2, hmag2sig = 15.8452643514, 0.0738319761354
kmag2, kmag2sig = 15.5651506508, 0.205591601092
fig, ax = plt.subplots()
jMags = np.array([jmag1, jmag2]) # Concatenate data
hMags = np.array([hmag1, hmag2])
kMags = np.array([kmag1, kmag2])
# Plot MAGNITUDE light curves
#plt.scatter(dates2, jMags, facecolors = 'navy', marker = '<', s = 25,
# edgecolors = 'navy', zorder = 3)
#plt.scatter(dates2, hMags, facecolors = 'none', marker = 's', s = 25,
# edgecolors = 'royalblue', zorder = 3)
#plt.scatter(dates2, kMags, facecolors = 'none', marker = '>', s = 25,
# edgecolors = 'lightskyblue', zorder = 3)
## Add error arrows to the H and K 2MASS images
#ax.arrow(dates2[0], kMags[0], 0.0, 0.3, head_width = 100,
# head_length = 0.15, fc ='lightskyblue', ec ='lightskyblue', zorder = 2)
#ax.arrow(dates2[0], hMags[0], 0.0, 0.3, head_width = 100,
# head_length = 0.15, fc = 'royalblue', ec ='royalblue', zorder = 1)
#plt.errorbar(dates2[0], jMags[0], yerr = jmag1sig, linestyle = 'None',
# color = 'navy', zorder = 3)
## Plot errorbars for WIRC datapoints
#plt.errorbar(dates2[1], jMags[1], yerr = jmag2sig, linestyle = 'None',
# color = 'navy', zorder = 3)
#plt.errorbar(dates2[1], hMags[1], yerr = hmag2sig, linestyle = 'None',
# color = 'royalblue', zorder = 3)
#plt.errorbar(dates2[1], kMags[1], yerr = kmag2sig, linestyle = 'None',
# color = 'lightskyblue', zorder = 3)
## Plot errorbars for Spitzer datapoints
#plt.errorbar(jdates, m36, yerr = m36sig, linestyle = 'None',
# color = 'k', zorder = 2)
#plt.errorbar(jdates, m45, yerr = m45sig, linestyle = 'None',
# color = 'grey', zorder = 2)
#plt.errorbar(jdates[0], m58, yerr = m58sig, linestyle = 'None',
# color = 'grey', zorder = 1)
#plt.errorbar(jdates[0], m80, yerr = m80sig, linestyle = 'None',
# color = 'grey', zorder = 1)
## Plot the Spitzer datapoints
#plt.scatter(jdates, m36, color = 'black', marker = 'o', s = 15,
# zorder = 3)
#plt.scatter(jdates, m45, color = 'grey', marker = 'v', s = 15,
# zorder = 3)
#plt.scatter(jdates[0], m58, facecolors = 'none', edgecolors =
# 'grey', marker = 'D', s = 22, zorder = 3)
#plt.scatter(jdates[0], m80, facecolors ='none', edgecolors = 'grey',
# marker = 'o', s = 25, zorder = 3)
## Set magnitude plot limits
#plt.xlim([51500, 58100])
#plt.ylim([13.5, 17.0])
### Plot legend
#lgd = plt.legend(('J', 'H', 'K$_s$','[3.6]', '[4.5]', '[5.8]', '[8.0]'),
# scatterpoints = 1,
# loc = 'upper right',
# title = 'Filter/Channel',
# fontsize = 13,
# frameon = True,
# bbox_to_anchor = (1.26, 1.03))
## Plot non-detections
#for i in range(len(xdates)):
# ax.arrow(xdates[i], 17.1, 0.0, -0.2, head_width = 100, head_length = 0.1,
# fc = 'darkslategrey', ec = 'darkslategrey', linestyle = '-')
#for j in range(len(xnondates)):
# ax.arrow(xnondates[j], 17.1, 0.0, -0.2, head_width = 30, head_length = 0.1,
# fc = 'lightgrey', ec = 'lightgrey', linestyle = '-')
### Plot quiescent magnitudes
#quiesmag36 = np.mean(m36)
#quiesmag45 = np.mean(m45)
#plt.axhline(y = quiesmag36, color = 'black', ls = 'dashed')
#plt.axhline(y = quiesmag45, color = 'grey', ls = 'dashed')
#plt.text(52650, 15.5, 'Quiescent levels', fontsize = 10)
### Shade rectangles around it
#ax.add_patch(
# patches.Rectangle(
# (51500, quiesmag36 - np.average(m36sig)), # (x, y)
# 60000 - 51500, # width
# 2 * np.average(m36sig), # height
# 0.0, # angle
# facecolor = 'gainsboro',
# edgecolor = 'none',
# zorder = 1,
# alpha = 0.95
# ))
#ax.add_patch(
# patches.Rectangle(
# (51500, quiesmag45 - np.average(m45sig)), # (x, y)
# 60000 - 51500, # width
# 2 * np.average(m45sig), # height
# 0.0, # angle
# facecolor = 'lightgrey',
# edgecolor = 'none',
# zorder = 2,
# alpha = 0.4
# ))
#
## Plot time of burst and label it
#plt.axvline(x = 55337.8, color = 'k', ls = 'dashed')
#plt.text(55500, 15.6, "2010 May outburst", rotation=90, fontsize=11)
#
## Reverse y axis
#plt.gca().invert_yaxis()
#
#plt.xlabel('Date (MJD)', fontsize = 14)
#plt.ylabel('Magnitude', fontsize = 14)
#fig.savefig("170522_IRmags_lc.pdf", bbox_extra_artists=(lgd,), bbox_inches='tight')
#
#np.savetxt('170502_Spitzer_mags_TENTATIVE.txt',
# np.column_stack((jdates, m36, m36sig, m45, m45sig)),
# header = 'Spitzer IC 10 X-2. Col 1: Date (MJD),' +
# ' Cols 2-3: [3.6] mags and uncertainties, Cols 3-4: [4.5] mags and uncertainties. For the first epoch, '+
# '[5.8] mag: %f +/- %f and [8.0] mag: %f +/- %f' % (m58, m58sig, m80, m80sig),
# fmt = '%10.5f')
#
#print '\n 2MASS magnitudes (date:', dates2[0], ') \n J band =', jmag1, '+/-', jmag1sig,\
# '. Upper limits on H and K bands =', hmag1, \
# 'and', kmag1, '.', \
# '\n Palomar magnitudes (Date:', dates2[1], ') \n J =', jmag2, '+/-', jmag2sig,\
# '\n H =', hmag2, '+/-', hmag2sig, \
# '\n K =', kmag2, '+/-', kmag2sig
#plt.tight_layout()
#plt.show()
|
skkwan/IC10X2
|
infrared_and_lightcurve/fluxes_with_xdates_170314.py
|
Python
|
mit
| 18,310
|
# Generated by Django 2.2.13 on 2020-11-13 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0013_securitygrouprule_direction'),
]
operations = [
migrations.AddField(
model_name='securitygrouprule',
name='ethertype',
field=models.CharField(
choices=[('IPv4', 'IPv4'), ('IPv6', 'IPv6')],
default='IPv4',
max_length=8,
),
),
]
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_openstack/openstack/migrations/0014_securitygrouprule_ethertype.py
|
Python
|
mit
| 534
|
# -*- coding: utf-8 -*-
"""
babel.messages
~~~~~~~~~~~~~~
Support for ``gettext`` message catalogs.
:copyright: (c) 2013-2018 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from babel.messages.catalog import *
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/babel/messages/__init__.py
|
Python
|
mit
| 254
|
#!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2014 Toms Baugis <toms.baugis@gmail.com>
"""Exploring symmetry. Feel free to add more handles!"""
import math
import random
from collections import defaultdict
from gi.repository import Gtk as gtk
from gi.repository import GObject as gobject
from gi.repository import Pango as pango
from .lib import graphics
from .lib.pytweener import Easing
from . import sprites
class Point(gobject.GObject):
__gsignals__ = {
"on-point-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, x=0, y=0):
gobject.GObject.__init__(self)
self.x = x
self.y = y
def __setattr__(self, name, val):
if hasattr(self, name) and getattr(self, name) == val:
return
gobject.GObject.__setattr__(self, name, val)
self.emit("on-point-changed")
def __repr__(self):
return "<%s x=%d, y=%d>" % (self.__class__.__name__, self.x, self.y)
def __iter__(self):
yield self.x
yield self.y
class Line(object):
def __init__(self, a, b, anchor=None):
self.a, self.b = a, b
# anchor can be either dot A or dot B
self.anchor = anchor or self.a
@property
def length(self):
return math.sqrt((self.a.x - self.b.x) ** 2 + (self.a.y - self.b.y) ** 2)
@property
def rotation(self):
a = self.anchor
b = self.b if a != self.b else self.a
return math.atan2(b.x - a.x, b.y - a.y)
def __setattr__(self, name, val):
if name == "rotation":
self.set_rotation(val)
else:
object.__setattr__(self, name, val)
def set_rotation(self, radians):
a = self.anchor
b = self.b if a != self.b else self.a
length = self.length
b.x = a.x + math.cos(radians - math.radians(90)) * length
b.y = a.y + math.sin(radians - math.radians(90)) * length
class SymmetricalRepeater(graphics.Sprite):
def __init__(self, sides, poly=None, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
poly = poly or [(0, 0), (0, 0)]
self.master_poly = [Point(*coords) for coords in poly]
for point in self.master_poly:
point.connect("on-point-changed", self.on_master_changed)
self.sides = []
for i in range(sides):
side = [Point(*coords) for coords in poly]
self.sides.append(side)
for point in side:
point.connect("on-point-changed", self.on_side_changed)
self.connect("on-render", self.on_render)
def on_master_changed(self, point):
"""propagate to the kids"""
idx = self.master_poly.index(point)
for side in self.sides:
side[idx].x, side[idx].y = point.x, point.y
def on_side_changed(self, point):
self._sprite_dirty = True
def on_render(self, sprite):
angle = 360.0 / len(self.sides)
# debug
"""
self.graphics.save_context()
for i in range(len(self.sides)):
self.graphics.move_to(0, 0)
self.graphics.line_to(1000, 0)
self.graphics.rotate(math.radians(angle))
self.graphics.stroke("#3d3d3d")
self.graphics.restore_context()
"""
self.graphics.set_line_style(3)
for side in self.sides:
self.graphics.move_to(*side[0])
for point in side[1:]:
self.graphics.line_to(*point)
self.graphics.rotate(math.radians(angle))
self.graphics.stroke("#fafafa")
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self, background_color="#333")
self.repeater2 = None
self.container = graphics.Sprite()
self.add_child(self.container)
self.connect("on-first-frame", self.on_first_frame)
self.connect("on-resize", self.on_resize)
def appear1(self, parent, callback):
def clone_grow(repeater, on_complete=None):
repeater2 = SymmetricalRepeater(len(repeater.sides),
repeater.master_poly)
parent.add_child(repeater2)
a, b = repeater2.master_poly
self.animate(a, x=diagonal, delay=0.3, duration=1.3)
self.animate(b, y=-diagonal, delay=0.3, duration=1.3)
if on_complete:
on_complete()
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
size = 100
diagonal = math.sqrt(100**2 + 100**2)
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": size, "duration": 1, "easing": Easing.Expo.ease_in_out},
self.animate, {"sprite": Line(a, b), "rotation": math.radians(-45), "duration": 0.8},
#clone_grow, {"repeater": repeater},
#repeater, {"rotation": math.radians(-45), "duration": 1.3, "delay": 0.3},
callback, {},
)
# parallel chains
graphics.chain(
self.animate, {"sprite": b, "x": size + diagonal, "duration": 1, "easing": Easing.Expo.ease_in_out},
)
def appear2(self, parent, callback):
size = 100
diagonal = math.sqrt((2 * size) ** 2)
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
poly = [(1000, 0), (size, 0), (1000, 0)]
repeater2 = SymmetricalRepeater(4, poly)
def appear21(on_complete=None):
parent.add_child(repeater2)
a, b, c = repeater2.master_poly
self.animate(Line(b, a), rotation=math.radians(-45), duration=0.7, easing=Easing.Expo.ease_in_out)
self.animate(Line(b, c), rotation=math.radians(225), duration=0.7, easing=Easing.Expo.ease_in_out)
repeater2.animate(rotation=math.radians(-90), duration=0.7, easing=Easing.Expo.ease_in_out,
on_complete=on_complete)
def disappear21(on_complete):
a, b = repeater.master_poly
c, d, e = repeater2.master_poly
graphics.chain(
self.animate, {"sprite": b, "x": 0, "duration": 0.6, "easing": Easing.Expo.ease_out},
on_complete, {}
)
self.animate(d, x=d.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
self.animate(c, x=c.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
self.animate(e, x=e.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
def add_outline(on_complete=None):
self._add_outline(parent, on_complete)
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": 0, "duration": 1.3},
appear21, {},
add_outline, {},
disappear21, {},
callback, {},
)
def _add_outline(self, parent, on_complete=None):
cube2 = graphics.Polygon([(100, 0), (0, -100), (-100, 0), (0, 100), (100, 0)],
stroke="#fafafa", line_width=3)
parent.add_child(cube2)
if on_complete:
on_complete()
def appear3(self, parent, callback):
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
size = 100
diagonal = math.sqrt(100**2 + 100**2)
def appear31(on_complete=None):
poly = [(size, 0), (size, 0), (size, 0)]
repeater2 = SymmetricalRepeater(4, poly)
parent.add_child(repeater2)
a, b, c = repeater2.master_poly
self.animate(a, x=0, y=size, duration=1)
self.animate(c, x=0, y=-size, duration=1, on_complete=on_complete)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": 0, "duration": 1.3},
appear31, {},
callback, {},
)
graphics.chain(
# fly in
self.animate, {"sprite": b, "x": size, "duration": 1.3},
)
def on_first_frame(self, scene, context):
func = random.choice([self.appear1, self.appear2, self.appear3])
func(self.container, lambda: self.on_intro_done())
def on_resize(self, scene, event):
self.container.x = self.width / 2
self.container.y = self.height / 2
def on_intro_done(self):
container = self[0]
cube = graphics.Polygon([(100, 0), (0, -100), (-100, 0), (0, 100)],
fill="#fafafa", opacity=0)
title = sprites.Label("APX", size=200, y=150, opacity=0)
title.x = -title.width / 2
description = sprites.Label("A cousine of QIX\nPress <Space>!",
y=350, opacity=0, alignment=pango.Alignment.CENTER)
description.x = -description.width / 2
container.add_child(cube, title, description)
def announce_ready():
pass
graphics.chain(
cube, {"opacity": 1,
"duration": 0.7, "delay": 0.3, "easing": Easing.Sine.ease_in_out},
announce_ready, {}
)
container.animate(y=150, duration=0.7, delay=0.3, easing= Easing.Sine.ease_out)
title.animate(opacity=1, y=110, duration=0.7, delay=0.5, easing= Easing.Expo.ease_out)
description.animate(opacity=1, y=300, duration=0.5, delay=0.5, easing= Easing.Expo.ease_out)
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_default_size(600, 600)
window.connect("delete_event", lambda *args: gtk.main_quit())
self.scene = Scene()
window.add(self.scene)
window.show_all()
if __name__ == '__main__':
window = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
|
projecthamster/apx
|
apx/splash.py
|
Python
|
mit
| 10,155
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_source import CopySource
class DynamicsSource(CopySource):
"""A copy activity Dynamics source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param query: FetchXML is a proprietary query language that is used in
Microsoft Dynamics (online & on-premises). Type: string (or Expression
with resultType string).
:type query: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'query': {'key': 'query', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, query=None):
super(DynamicsSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.query = query
self.type = 'DynamicsSource'
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/dynamics_source.py
|
Python
|
mit
| 2,144
|
# -*- coding: utf-8 -*-
class DoubanBaseError(Exception):
def __str__(self):
return "***%s (%s)*** %s" % (self.status, self.reason, self.msg)
class DoubanOAuthError(DoubanBaseError):
def __init__(self, status, reason, msg={}):
self.status = status
self.reason = reason
self.msg = {}
class DoubanAPIError(DoubanBaseError):
def __init__(self, resp):
self.status = resp.status
self.reason = resp.reason
self.msg = resp.parsed
|
zangzhe/douban-review-api-testing
|
douban_client/api/error.py
|
Python
|
mit
| 498
|
#!/usr/bin/env python
""" lk_tracker.py - Version 1.1 2013-12-20
Based on the OpenCV lk_track.py demo code
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2011 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import cv2
import cv2.cv as cv
import numpy as np
from rbx1_vision.good_features import GoodFeatures
class LKTracker(GoodFeatures):
def __init__(self, node_name):
super(LKTracker, self).__init__(node_name)
self.show_text = rospy.get_param("~show_text", True)
self.feature_size = rospy.get_param("~feature_size", 1)
# LK parameters
self.lk_winSize = rospy.get_param("~lk_winSize", (10, 10))
self.lk_maxLevel = rospy.get_param("~lk_maxLevel", 2)
self.lk_criteria = rospy.get_param("~lk_criteria", (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.lk_params = dict( winSize = self.lk_winSize,
maxLevel = self.lk_maxLevel,
criteria = self.lk_criteria)
self.detect_interval = 1
self.keypoints = None
self.detect_box = None
self.track_box = None
self.mask = None
self.grey = None
self.prev_grey = None
def process_image(self, cv_image):
# If we don't yet have a detection box (drawn by the user
# with the mouse), keep waiting
if self.detect_box is None:
return cv_image
# Create a greyscale version of the image
self.grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
# Equalize the grey histogram to minimize lighting effects
self.grey = cv2.equalizeHist(self.grey)
# If we haven't yet started tracking, set the track box to the
# detect box and extract the keypoints within it
if self.track_box is None or not self.is_rect_nonzero(self.track_box):
self.track_box = self.detect_box
self.keypoints = self.get_keypoints(self.grey, self.track_box)
else:
if self.prev_grey is None:
self.prev_grey = self.grey
# Now that have keypoints, track them to the next frame
# using optical flow
self.track_box = self.track_keypoints(self.grey, self.prev_grey)
# Process any special keyboard commands for this module
if 32 <= self.keystroke and self.keystroke < 128:
cc = chr(self.keystroke).lower()
if cc == 'c':
# Clear the current keypoints
self.keypoints = None
self.track_box = None
self.detect_box = None
self.prev_grey = self.grey
return cv_image
def track_keypoints(self, grey, prev_grey):
# We are tracking points between the previous frame and the
# current frame
img0, img1 = prev_grey, grey
# Reshape the current keypoints into a numpy array required
# by calcOpticalFlowPyrLK()
p0 = np.float32([p for p in self.keypoints]).reshape(-1, 1, 2)
# Calculate the optical flow from the previous frame to the current frame
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)
# Do the reverse calculation: from the current frame to the previous frame
try:
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)
# Compute the distance between corresponding points in the two flows
d = abs(p0-p0r).reshape(-1, 2).max(-1)
# If the distance between pairs of points is < 1 pixel, set
# a value in the "good" array to True, otherwise False
good = d < 1
# Initialize a list to hold new keypoints
new_keypoints = list()
# Cycle through all current and new keypoints and only keep
# those that satisfy the "good" condition above
for (x, y), good_flag in zip(p1.reshape(-1, 2), good):
if not good_flag:
continue
new_keypoints.append((x, y))
# Draw the keypoint on the image
cv2.circle(self.marker_image, (x, y), self.feature_size, (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
# Set the global keypoint list to the new list
self.keypoints = new_keypoints
# Convert the keypoints list to a numpy array
keypoints_array = np.float32([p for p in self.keypoints]).reshape(-1, 1, 2)
# If we have enough points, find the best fit ellipse around them
if len(self.keypoints) > 6:
track_box = cv2.fitEllipse(keypoints_array)
else:
# Otherwise, find the best fitting rectangle
track_box = cv2.boundingRect(keypoints_array)
except:
track_box = None
return track_box
if __name__ == '__main__':
try:
node_name = "lk_tracker"
LKTracker(node_name)
rospy.spin()
except KeyboardInterrupt:
print "Shutting down LK Tracking node."
cv.DestroyAllWindows()
|
fujy/ROS-Project
|
src/rbx1/rbx1_vision/src/rbx1_vision/lk_tracker.py
|
Python
|
mit
| 6,007
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import subprocess
import sys
import tempfile
import re
import logging
import unittest
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
TEST_FRAMEWORK_MODULES = [
"address",
"blocktools",
"muhash",
"key",
"script",
"segwit_addr",
"util",
]
EXTENDED_SCRIPTS = [
# These tests are not run by default.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
BASE_SCRIPTS = [
# Scripts that are run by default.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_hd.py --descriptors',
'wallet_backup.py',
'wallet_backup.py --descriptors',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'rpc_fundrawtransaction.py --descriptors',
'p2p_compactblocks.py',
'feature_segwit.py --legacy-wallet',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_basic.py --descriptors',
'wallet_labels.py',
'wallet_labels.py --descriptors',
'p2p_segwit.py',
'p2p_timeouts.py',
'p2p_tx_download.py',
'mempool_updatefromblock.py',
'wallet_dump.py --legacy-wallet',
'wallet_listtransactions.py',
'wallet_listtransactions.py --descriptors',
'feature_taproot.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_importmulti.py --legacy-wallet',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_listreceivedby.py --descriptors',
'wallet_abandonconflict.py',
'wallet_abandonconflict.py --descriptors',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'rpc_rawtransaction.py --descriptors',
'wallet_address_types.py',
'wallet_address_types.py --descriptors',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
'feature_abortnode.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'wallet_keypool_topup.py --descriptors',
'feature_fee_estimation.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'tool_wallet.py --descriptors',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'wallet_avoidreuse.py',
'wallet_avoidreuse.py --descriptors',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --descriptors',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'wallet_createwallet.py --descriptors',
'wallet_watchonly.py --legacy-wallet',
'wallet_watchonly.py --usecli --legacy-wallet',
'wallet_reorgsrestore.py',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_psbt.py --descriptors',
'rpc_users.py',
'rpc_whitelist.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'rpc_signrawtransaction.py --descriptors',
'wallet_groups.py',
'p2p_addrv2_relay.py',
'wallet_groups.py --descriptors',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'wallet_disable.py --descriptors',
'p2p_addr_relay.py',
'p2p_getaddr_caching.py',
'p2p_getdata.py',
'rpc_net.py',
'wallet_keypool.py',
'wallet_keypool.py --descriptors',
'wallet_descriptor.py --descriptors',
'p2p_nobloomfilter_messages.py',
'p2p_filter.py',
'rpc_setban.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_doublespend.py --descriptors',
'feature_backwards_compatibility.py',
'feature_backwards_compatibility.py --descriptors',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_getblockfilter.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'mempool_package_onemore.py',
'rpc_createmultisig.py',
'rpc_createmultisig.py --descriptors',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'wallet_importprunedfunds.py --descriptors',
'p2p_leak_tx.py',
'p2p_eviction.py',
'rpc_signmessage.py',
'rpc_generateblock.py',
'rpc_generate.py',
'wallet_balance.py',
'wallet_balance.py --descriptors',
'feature_nulldummy.py',
'feature_nulldummy.py --descriptors',
'mempool_accept.py',
'mempool_expiry.py',
'wallet_import_rescan.py --legacy-wallet',
'wallet_import_with_label.py --legacy-wallet',
'wallet_importdescriptors.py --descriptors',
'wallet_upgradewallet.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'feature_signet.py',
'wallet_bumpfee.py',
'wallet_bumpfee.py --descriptors',
'wallet_implicitsegwit.py --legacy-wallet',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'wallet_listsinceblock.py --descriptors',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_encryption.py --descriptors',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_resendwallettransactions.py --descriptors',
'wallet_fallbackfee.py',
'wallet_fallbackfee.py --descriptors',
'rpc_dumptxoutset.py',
'feature_minchainwork.py',
'rpc_estimatefee.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'wallet_send.py',
'wallet_create_tx.py --descriptors',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'wallet_coinbase_category.py --descriptors',
'feature_filelock.py',
'feature_loadblock.py',
'p2p_dos_header_tree.py',
'p2p_unrequested_blocks.py',
'p2p_blockfilters.py',
'feature_includeconf.py',
'feature_asmap.py',
'mempool_unbroadcast.py',
'mempool_compatibility.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'p2p_ping.py',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'p2p_permissions.py',
'feature_blocksdir.py',
'wallet_startup.py',
'feature_config_args.py',
'feature_settings.py',
'rpc_getdescriptorinfo.py',
'rpc_getpeerinfo_deprecation.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
'p2p_ibd_txrelay.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--ansi', action='store_true', default=sys.stdout.isatty(), help="Use ANSI colors and dots in output (enabled by default when standard output is a TTY)")
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
parser.add_argument('--filter', help='filter scripts to run by regular expression')
args, unknown_args = parser.parse_known_args()
if not args.ansi:
global BOLD, GREEN, RED, GREY
BOLD = ("", "")
GREEN = ("", "")
RED = ("", "")
GREY = ("", "")
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_₿_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept names with or without a .py extension.
# Specified tests can contain wildcards, but in that case the supplied
# paths should be coherent, e.g. the same path as that provided to call
# test_runner.py. Examples:
# `test/functional/test_runner.py test/functional/wallet*`
# `test/functional/test_runner.py ./test/functional/wallet*`
# `test_runner.py wallet*`
# but not:
# `test/functional/test_runner.py wallet*`
# Multiple wildcards can be passed:
# `test_runner.py tool* mempool*`
for test in tests:
script = test.split("/")[-1]
script = script + ".py" if ".py" not in script else script
if script in ALL_SCRIPTS:
test_list.append(script)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if args.filter:
test_list = list(filter(re.compile(args.filter).search, test_list))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
use_term_control=args.ansi,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control):
args = args or []
# Warn if bitcoind is already running
try:
# pgrep exits with code zero when one or more matching processes found
if subprocess.run(["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL).returncode == 0:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except OSError:
# pgrep not supported
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
# Test Framework Tests
print("Running Unit Tests for Test Framework Modules")
test_framework_tests = unittest.TestSuite()
for module in TEST_FRAMEWORK_MODULES:
test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module)))
result = unittest.TextTestRunner(verbosity=1, failfast=True).run(test_framework_tests)
if not result.wasSuccessful():
logging.debug("Early exiting after failure in TestFramework unit tests")
sys.exit(False)
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
use_term_control=use_term_control,
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage_passed = coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
else:
coverage_passed = True
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) and coverage_passed
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, use_term_control):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
self.use_term_control = use_term_control
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
if self.use_term_control:
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("^(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On CI this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
return False
else:
print("All RPC commands covered.")
return True
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test_framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
# Consider RPC generate covered, because it is overloaded in
# test_framework/test_node.py and not seen by the coverage check.
covered_cmds = set({'generate'})
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
alecalve/bitcoin
|
test/functional/test_runner.py
|
Python
|
mit
| 30,190
|
"""
Contains class for gaussian process hyperparameter optimizations.
"""
import os
import logging
import tempfile
from typing import Dict, List, Optional, Tuple, Union, Any
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
def compute_parameter_range(params_dict: Dict,
search_range: Union[int, float, Dict]
) -> Dict[str, Tuple[str, List[float]]]:
"""Convenience Function to compute parameter search space.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to Ints/Floats. An explicit list of
parameters is computed with `search_range`. The optimization range
computed is specified in the documentation for `search_range`
below.
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
Returns
-------
param_range: Dict
Dictionary mapping hyperparameter names to tuples. Each tuple is
of form `(value_type, value_range)` where `value_type` is a string
that is either "int" or "cont" and `value_range` is a list of two
elements of the form `[low, hi]`. This format is expected by
pyGPGO which `GaussianProcessHyperparamOpt` uses to perform
optimization.
"""
# Range of optimization
param_range = {}
if isinstance(search_range, dict):
if sorted(params_dict.keys()) != sorted(search_range.keys()):
raise ValueError(
"If search_range is provided as a dictionary, it must have the same keys as params_dict."
)
elif (not isinstance(search_range, int)) and (not isinstance(
search_range, float)):
raise ValueError("search_range must be a dict or int or float.")
for hp, value in params_dict.items():
if isinstance(search_range, dict):
hp_search_range = search_range[hp]
else:
# We know from guard above that this is an int/float
hp_search_range = search_range
if isinstance(value, int):
value_range = [value // hp_search_range, value * hp_search_range]
param_range[hp] = ("int", value_range)
elif isinstance(value, float):
value_range = [value / hp_search_range, value * hp_search_range]
param_range[hp] = ("cont", value_range)
return param_range
class GaussianProcessHyperparamOpt(HyperparamOpt):
"""
Gaussian Process Global Optimization(GPGO)
This class uses Gaussian Process optimization to select
hyperparameters. Underneath the hood it uses pyGPGO to optimize
models. If you don't have pyGPGO installed, you won't be able to use
this class.
Note that `params_dict` has a different semantics than for
`GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is
used as the center of a search range.
Examples
--------
This example shows the type of constructor function expected.
>>> import deepchem as dc
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(n_tasks=1, **p))
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments
(in this case, `n_tasks` and `n_features` which are properties of a
dataset and not hyperparameters to search over.)
>>> import numpy as np
>>> from sklearn.ensemble import RandomForestRegressor as RF
>>> def model_builder(**model_params):
... n_estimators = model_params['n_estimators']
... min_samples_split = model_params['min_samples_split']
... rf_model = RF(n_estimators=n_estimators, min_samples_split=min_samples_split)
... rf_model = RF(n_estimators=n_estimators)
... return dc.models.SklearnModel(rf_model)
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder)
>>> params_dict = {"n_estimators":100, "min_samples_split":2}
>>> train_dataset = dc.data.NumpyDataset(X=np.random.rand(50, 5),
... y=np.random.rand(50, 1))
>>> valid_dataset = dc.data.NumpyDataset(X=np.random.rand(20, 5),
... y=np.random.rand(20, 1))
>>> metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
>> best_model, best_hyperparams, all_results =\
optimizer.hyperparam_search(params_dict, train_dataset, valid_dataset, metric, max_iter=2)
>> type(best_hyperparams)
<class 'dict'>
Notes
-----
This class requires pyGPGO to be installed.
"""
def hyperparam_search(self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logdir: Optional[str] = None,
max_iter: int = 20,
search_range: Union[int, float, Dict] = 4,
logfile: Optional[str] = None,
**kwargs):
"""Perform hyperparameter search using a gaussian process.
Parameters
----------
params_dict: Dict
Maps hyperparameter names (strings) to possible parameter
values. The semantics of this list are different than for
`GridHyperparamOpt`. `params_dict[hp]` must map to an int/float,
which is used as the center of a search with radius
`search_range` since pyGPGO can only optimize numerical
hyperparameters.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
Not used by all model types.
use_max: bool, (default True)
Specifies whether to maximize or minimize `metric`.
maximization(True) or minimization(False)
logdir: str, optional, (default None)
The directory in which to store created models. If not set, will
use a temporary directory.
max_iter: int, (default 20)
number of optimization trials
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
logfile: str, optional (default None)
Name of logfile to write results to. If specified, this is must
be a valid file. If not specified, results of hyperparameter
search will be written to `logdir/.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
try:
from pyGPGO.covfunc import matern32
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
except ModuleNotFoundError:
raise ImportError("This class requires pyGPGO to be installed.")
# Specify logfile
log_file = None
if logfile:
log_file = logfile
elif logdir is not None:
# Make logdir if it doesn't exist.
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, "results.txt")
# setup range
param_range = compute_parameter_range(params_dict, search_range)
param_keys = list(param_range.keys())
# Stores all results
all_results: Dict[Any, Any] = {}
# Store all model references so we don't have to reload
all_models = {}
# Stores all model locations
model_locations = {}
def _optimize(nb_epoch, **placeholders):
"""Private Optimizing function
Take in hyper parameter values and number of training epochs.
Return valid set performances.
Parameters
----------
nb_epoch: int
Number of epochs to train model being optimized during each iteration.
Not used by all model types.
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
# param values are always float in BO, so this line converts float to int
# see : https://github.com/josejimenezluna/pyGPGO/issues/10
hyper_parameters[hp] = int(placeholders[hp])
else:
hyper_parameters[hp] = float(placeholders[hp])
logger.info("Running hyperparameter set: %s" % str(hyper_parameters))
if log_file:
with open(log_file, 'w+') as f:
# Record hyperparameters
f.write("Parameters: %s" % str(hyper_parameters))
f.write('\n')
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
if hp_str in all_results:
# We have already evaluated the model for these hyperparameters.
if use_max:
return all_results[hp_str]
else:
return -all_results[hp_str]
if logdir is not None:
filename = "model%s" % hp_str
model_dir = os.path.join(logdir, filename)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info("Error creating model_dir, using tempfile directory")
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
# Add it on to the information needed for the constructor
hyper_parameters["model_dir"] = model_dir
model = self.model_builder(**hyper_parameters)
try:
model.fit(train_dataset, nb_epoch=nb_epoch)
# Not all models have nb_epoch
except TypeError:
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric],
output_transformers)
score = multitask_scores[metric.name]
if log_file:
with open(log_file, 'a') as f:
# Record performances
f.write("Score: %s" % str(score))
f.write('\n')
# Store all results
all_results[hp_str] = score
# Store reference to model
all_models[hp_str] = model
model_locations[hp_str] = model_dir
# GPGO maximize performance by default
# set performance to its negative value for minimization
if use_max:
return score
else:
return -score
# Demarcating internal function for readability
def optimizing_function(**placeholders):
"""Wrapper function
Take in hyper parameter values.
Calls a private optimize function (_optimize) with number of epochs.
Returns valid set performances.
Parameters
----------
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
return _optimize(nb_epoch=nb_epoch, **placeholders)
# execute GPGO
cov = matern32()
gp = GaussianProcess(cov)
acq = Acquisition(mode='ExpectedImprovement')
gpgo = GPGO(gp, acq, optimizing_function, param_range)
logger.info("Max number of iteration: %i" % max_iter)
gpgo.run(max_iter=max_iter)
hp_opt, valid_performance_opt = gpgo.getResult()
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
hyper_parameters[hp] = int(hp_opt[hp])
else:
# FIXME: Incompatible types in assignment
hyper_parameters[hp] = float(hp_opt[hp]) # type: ignore
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
# Let's fetch the model with the best parameters
best_model = all_models[hp_str]
# Compare best model to default hyperparameters
if log_file:
with open(log_file, 'a') as f:
# Record hyperparameters
f.write("params_dict:")
f.write(str(params_dict))
f.write('\n')
# Return default hyperparameters
return best_model, hyper_parameters, all_results
|
deepchem/deepchem
|
deepchem/hyper/gaussian_process.py
|
Python
|
mit
| 14,955
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_giant_sand_beetle.iff"
result.attribute_template_id = 9
result.stfName("monster_name","rock_mite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_giant_sand_beetle.py
|
Python
|
mit
| 440
|
# -*- coding: utf-8 -*-
from flask import json
from nose import tools as nose
from tests.integration.resource import ResourceTestCase
class PlaylistsResourceTestCase(ResourceTestCase):
"""
GET /playlists/ [user=<int>] [track=<int>]
200 OK
401 Unauthorized
POST /playlists/ name=<str>
201 Created
400 Bad Request
401 Unauthorized
GET /playlists/<id>/
200 OK
401 Unauthorized
404 Not Found
PUT /playlists/<id>/ name=<str>
204 No Content
401 Unauthorized
404 Not Found
POST /playlists/<id>/add/ track=<int> [index=<int>]
204 No Content
400 Bad Request
401 Unauthorized
404 Not Found
POST /playlists/<id>/remove/ index=<int>
204 No Content
400 Bad Request
401 Unauthorized
404 Not Found
DELETE /playlists/
204 No Content
401 Unauthorized
404 Not Found
"""
def get_payload(self):
return {
'name': 'Playtest',
}
# Unauthorized
def test_unauthorized_access(self):
resp = self.get('/playlists/', authenticate=False)
nose.eq_(resp.status_code, 401)
resp = self.get('/playlists/1/', authenticate=False)
nose.eq_(resp.status_code, 401)
# POST
payload = self.get_payload()
resp = self.post('/playlists/', data=payload, authenticate=False)
nose.eq_(resp.status_code, 401)
# PUT
resp = self.put('/playlists/', data=payload, authenticate=False)
nose.eq_(resp.status_code, 401)
# DELETE
resp = self.delete('/playlists/', authenticate=False)
nose.eq_(resp.status_code, 401)
# POST to /add/
payload = dict(track=1)
resp = self.post('/playlists/1/add/', data=payload, authenticate=False)
nose.eq_(resp.status_code, 401)
# POST to /remove/
resp = self.post('/playlists/1/remove/', data=payload,
authenticate=False)
nose.eq_(resp.status_code, 401)
# Authorized
def test_playlist_base_resource(self):
resp = self.get('/playlists/')
nose.eq_(resp.status_code, 200)
def test_nonexistent_playlist(self):
resp = self.get('/playlists/123/')
nose.eq_(resp.status_code, 404)
def test_playlist_creation(self):
resp = self.post('/playlists/', data=self.get_payload())
nose.eq_(resp.status_code, 201)
def test_playlist(self):
resp = self.post('/playlists/', data=self.get_payload())
playlist_url = '/playlists/%s/' % resp.json['id']
_resp = self.get(playlist_url)
nose.eq_(_resp.status_code, 200)
nose.ok_(_resp.json.has_key('id'))
nose.ok_(_resp.json.has_key('name'))
nose.ok_(_resp.json.has_key('user'))
nose.ok_(_resp.json.has_key('length'))
nose.ok_(_resp.json.has_key('tracks'))
nose.ok_(_resp.json.has_key('read_only'))
nose.ok_(_resp.json.has_key('creation_date'))
def test_playlist_creation_error(self):
resp = self.post('/playlists/', data={'name': ''})
nose.eq_(resp.status_code, 400)
self._app.config['ALLOW_ANONYMOUS_ACCESS'] = True
resp = self.post('/playlists/', data={'name': 'Mane'})
# A logged in user is required for playlist creation.
nose.eq_(resp.status_code, 400)
def test_playlist_update(self):
resp = self.post('/playlists/', data=self.get_payload())
playlist_url = '/playlists/%s/' % resp.json['id']
old_name = resp.json['name']
payload = {'name': '%s-2' % old_name}
_resp = self.put(playlist_url, data=payload)
nose.eq_(_resp.status_code, 204)
_resp = self.get(playlist_url)
nose.eq_(_resp.json['name'], '%s-2' % old_name)
# Playlist raise no error on update. If no attribute is set, nothing
# happens.
def test_track_addition(self):
resp = self.post('/playlists/', data=self.get_payload())
playlist_url = '/playlists/%s/' % resp.json['id']
add_url = '%sadd/' % playlist_url
resp = self.post(add_url, data={'track': self.track.pk, 'index': 0})
nose.eq_(resp.status_code, 204)
resp = self.post(add_url, data={'track': self.track.pk, 'index': 1})
nose.eq_(resp.status_code, 204)
resp = self.post(add_url, data={'track': self.track.pk, 'index': 0})
nose.eq_(resp.status_code, 204)
resp = self.post(add_url, data={'track': self.track.pk, 'index': 2})
nose.eq_(resp.status_code, 204)
resp = self.post(add_url, data={'track': self.track.pk, 'index': 0})
nose.eq_(resp.status_code, 204)
resp = self.get(playlist_url)
track = resp.json['tracks'][0]
nose.eq_(len(resp.json['tracks']), 5)
nose.ok_(track.has_key('id'))
nose.ok_(track.has_key('uri'))
nose.ok_(track.has_key('index'))
def test_track_addition_error(self):
resp = self.post('/playlists/', data=self.get_payload())
add_url = '/playlists/%s/add/' % resp.json['id']
_resp = self.post(add_url, data={})
nose.eq_(_resp.status_code, 400)
_resp = self.post(add_url, data={'track': self.track.pk, 'index': 123})
nose.eq_(_resp.status_code, 400)
def test_track_addition_to_nonexistent_playlist(self):
url = '/playlists/123/add/'
resp = self.post(url, data=self.get_payload())
nose.eq_(resp.status_code, 404)
def test_track_removal(self):
resp = self.post('/playlists/', data=self.get_payload())
add_url = '/playlists/%s/add/' % resp.json['id']
remove_url = '/playlists/%s/remove/' % resp.json['id']
resp = self.post(add_url, data={'track': self.track.pk, 'index': 0})
resp = self.post(remove_url, data={'index': 0})
nose.eq_(resp.status_code, 204)
def test_track_removal_error(self):
resp = self.post('/playlists/', data=self.get_payload())
remove_url = '/playlists/%s/remove/' % resp.json['id']
resp = self.post(remove_url, data={})
nose.eq_(resp.status_code, 400)
resp = self.post(remove_url, data={'index': 0})
nose.eq_(resp.status_code, 400)
def test_track_removal_from_nonexistent_playlist(self):
url = '/playlists/123/remove/'
resp = self.post(url, data={'index': 0})
nose.eq_(resp.status_code, 404)
def test_playlist_delete(self):
resp = self.post('/playlists/', data=self.get_payload())
playlist_url = '/playlists/%s/' % resp.json['id']
resp = self.delete(playlist_url)
nose.eq_(resp.status_code, 204)
resp = self.get(playlist_url)
nose.eq_(resp.status_code, 404)
def test_delete_nonexistent_playlist(self):
resp = self.delete('/playlists/123/')
nose.eq_(resp.status_code, 404)
def test_track_removal(self):
"""
1. Create playlist
2. Add existing track
3. Delete track from DB
4. Check that the playlists get updated and track is no longer present
"""
resp = self.post('/playlists/', data=self.get_payload())
nose.eq_(resp.status_code, 201)
playlist_url = '/playlists/%s/' % resp.json['id']
add_url = '/playlists/%s/add/' % resp.json['id']
track = self.mk_track()
resp = self.post(add_url, data={'track': track.pk, 'index': 0})
nose.eq_(resp.status_code, 204)
resp = self.delete('/tracks/%s/' % track.pk)
nose.eq_(resp.status_code, 204)
resp = self.get(playlist_url)
nose.eq_(resp.json['length'], 0)
|
maurodelazeri/shiva-server
|
tests/integration/resource-playlists-test.py
|
Python
|
mit
| 7,712
|
#
# pyrope module
#
|
rgravina/Pyrope
|
pyrope/tests/__init__.py
|
Python
|
mit
| 19
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/pet_deed/shared_snorbal_deed.iff"
result.attribute_template_id = 2
result.stfName("pet_deed","snorbal")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/deed_datapad.py")
result.setStringAttribute("deed_pcd", "object/intangible/pet/shared_snorbal_hue.iff")
result.setStringAttribute("deed_mobile", "object/mobile/shared_snorbal_hue.iff")
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/deed/pet_deed/shared_snorbal_deed.py
|
Python
|
mit
| 687
|
import platform
import icontroller
from constants import *
system = platform.system()
if system == "Windows":
from windows.control import *
def controllers():
devices = []
for i in range(4):
try:
devices.append(Controller(i))
except:
pass
return devices
|
michaelneu/threesixty
|
threesixty/__init__.py
|
Python
|
mit
| 319
|
import test_runner
import time
import math
import os
import numpy as np
from odrive.enums import *
from test_runner import *
teensy_code_template = """
void setup() {
analogWriteResolution(10);
// base clock of the PWM timer is 150MHz (on Teensy 4.0)
int freq = 150000000/1024; // ~146.5kHz PWM frequency
analogWriteFrequency({analog_out}, freq);
// for filtering, assuming we have a 150 Ohm resistor, we need a capacitor of
// 1/(150000000/1024)*2*pi/150 = 2.85954744646751e-07 F, that's ~0.33uF
//pinMode({lpf_enable}, OUTPUT);
}
int i = 0;
void loop() {
i++;
i = i & 0x3ff;
if (digitalRead({analog_reset}))
i = 0;
analogWrite({analog_out}, i);
delay(1);
}
"""
class TestAnalogInput():
"""
Verifies the Analog input.
The Teensy generates a PWM signal with a duty cycle that follows a sawtooth signal
with a period of 1 second. The signal should be connected to the ODrive's
analog input through a low-pass-filter.
___ ___
Teensy PWM ----|___|-------o---------|___|----- ODrive Analog Input
150 Ohm | 150 Ohm
===
| 330nF
|
GND
"""
def get_test_cases(self, testrig: TestRig):
for odrive in testrig.get_components(ODriveComponent):
for odrive_gpio_num, odrive_gpio in [(2, odrive.gpio3), (3, odrive.gpio4)]:
alternatives = []
lpfs = [(gpio, TestFixture.all_of(tf1, tf2)) for lpf, tf1 in testrig.get_connected_components(odrive_gpio, LowPassFilterComponent)
for gpio, tf2 in testrig.get_connected_components(lpf.en, LinuxGpioComponent)]
for lpf, tf1 in lpfs:
for teensy_gpio, tf2 in testrig.get_connected_components(odrive_gpio, TeensyGpio):
teensy = teensy_gpio.parent
for gpio in teensy.gpios:
for local_gpio, tf3 in testrig.get_connected_components(gpio, LinuxGpioComponent):
alternatives.append([odrive, lpf, odrive_gpio_num, teensy, teensy_gpio, gpio, local_gpio, TestFixture.all_of(tf1, tf2, tf3)])
yield AnyTestCase(*alternatives)
def run_test(self, odrive: ODriveComponent, lpf_enable: LinuxGpioComponent, analog_in_num: int, teensy: TeensyComponent, teensy_analog_out: Component, teensy_analog_reset: Component, analog_reset_gpio: LinuxGpioComponent, logger: Logger):
code = teensy_code_template.replace("{analog_out}", str(teensy_analog_out.num)).replace("{analog_reset}", str(teensy_analog_reset.num)) #.replace("lpf_enable", str(lpf_enable.num))
teensy.compile_and_program(code)
analog_reset_gpio.config(output=True)
analog_reset_gpio.write(True)
lpf_enable.config(output=True)
lpf_enable.write(False)
logger.debug("Set up analog input...")
min_val = -20000
max_val = 20000
period = 1.025 # period in teensy code is 1s, but due to tiny overhead it's a bit longer
analog_mapping = [
None, #odrive.handle.config.gpio1_analog_mapping,
None, #odrive.handle.config.gpio2_analog_mapping,
odrive.handle.config.gpio3_analog_mapping,
odrive.handle.config.gpio4_analog_mapping,
None, #odrive.handle.config.gpio5_analog_mapping,
][analog_in_num]
odrive.disable_mappings()
setattr(odrive.handle.config, 'gpio' + str(analog_in_num+1) + '_mode', GPIO_MODE_ANALOG_IN)
analog_mapping.endpoint = odrive.handle.axis0.controller._input_pos_property
analog_mapping.min = min_val
analog_mapping.max = max_val
odrive.save_config_and_reboot()
analog_reset_gpio.write(False)
data = record_log(lambda: [odrive.handle.axis0.controller.input_pos], duration=5.0)
# Expect mean error to be at most 2% (of the full scale).
# Expect there to be less than 2% outliers, where an outlier is anything that is more than 5% (of full scale) away from the expected value.
full_range = abs(max_val - min_val)
slope, offset, fitted_curve = fit_sawtooth(data, min_val, max_val, sigma=30)
test_assert_eq(slope, (max_val - min_val) / period, accuracy=0.005)
test_curve_fit(data, fitted_curve, max_mean_err = full_range * 0.03, inlier_range = full_range * 0.05, max_outliers = len(data[:,0]) * 0.02)
tests = [TestAnalogInput()]
if __name__ == '__main__':
test_runner.run(tests)
|
madcowswe/ODriveFirmware
|
tools/odrive/tests/analog_input_test.py
|
Python
|
mit
| 4,676
|
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import TemplateDoesNotExist
from django.template.backends.base import BaseEngine
from django.template.backends.django import Template as DjangoTemplate, reraise, get_installed_libraries
from django.template.engine import Engine
class Template(DjangoTemplate):
def __init__(self, template, backend):
template._attached_images = []
super().__init__(template, backend)
def attach_related(self, email_message):
assert isinstance(email_message, EmailMultiAlternatives), "Parameter must be of type EmailMultiAlternatives"
email_message.mixed_subtype = 'related'
for attachment in self.template._attached_images:
email_message.attach(attachment)
class PostOfficeTemplates(BaseEngine):
"""
Customized Template Engine which keeps track on referenced images and stores them as attachments
to be used in multipart email messages.
"""
app_dirname = 'templates'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
options.setdefault('autoescape', True)
options.setdefault('debug', settings.DEBUG)
options.setdefault(
'file_charset',
settings.FILE_CHARSET
if settings.is_overridden('FILE_CHARSET')
else 'utf-8',
)
libraries = options.get('libraries', {})
options['libraries'] = self.get_templatetag_libraries(libraries)
super().__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name):
try:
template = self.engine.get_template(template_name)
return Template(template, self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
|
ui/django-post_office
|
post_office/template/backends/post_office.py
|
Python
|
mit
| 2,177
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(IoPTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-prune=550"]]
def setup_network(self):
self.setup_nodes()
self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, timewait=900)
self.start_nodes()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.start_node(0, extra_args=self.full_node_default_args)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
# Add node1's wallet transactions back to the mempool, to
# avoid the mined blocks from being too small.
self.nodes[1].resendwallettransactions()
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"])
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"])
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
# Get node0's wallet transactions back in its mempool, to avoid the
# mined blocks from being too small.
self.nodes[0].resendwallettransactions()
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
self.start_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
self.start_node(5, extra_args=["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
self.log.info("Mining a big blockchain of 995 blocks")
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
|
Anfauglith/iop-hd
|
test/functional/pruning.py
|
Python
|
mit
| 21,272
|
'''
PIL: PIL image loader
'''
__all__ = ('ImageLoaderPIL', )
try:
from PIL import Image as PILImage
except:
import Image as PILImage
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderPIL(ImageLoaderBase):
'''Image loader based on the PIL library.
.. versionadded:: 1.0.8
Support for GIF animation added.
Gif animation has a lot of issues(transparency/color depths... etc).
In order to keep it simple, what is implemented here is what is
natively supported by the PIL library.
As a general rule, try to use gifs that have no transparency.
Gif's with transparency will work but be prepared for some
artifacts until transparency support is improved.
'''
@staticmethod
def can_save():
return True
@staticmethod
def can_load_memory():
return True
@staticmethod
def extensions():
'''Return accepted extensions for this loader'''
# See http://www.pythonware.com/library/pil/handbook/index.htm
return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',
'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',
'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',
'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',
'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',
'xv')
def _img_correct(self, _img_tmp):
'''Convert image to the correct format and orientation.
'''
# image loader work only with rgb/rgba image
if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
try:
imc = _img_tmp.convert('RGBA')
except:
Logger.warning(
'Image: Unable to convert image to rgba (was %s)' %
(_img_tmp.mode.lower()))
raise
_img_tmp = imc
return _img_tmp
def _img_read(self, im):
'''Read images from an animated file.
'''
im.seek(0)
# Read all images inside
try:
img_ol = None
while True:
img_tmp = im
img_tmp = self._img_correct(img_tmp)
if img_ol and (hasattr(im, 'dispose') and not im.dispose):
# paste new frame over old so as to handle
# transparency properly
img_ol.paste(img_tmp, (0, 0), img_tmp)
img_tmp = img_ol
img_ol = img_tmp
yield ImageData(img_tmp.size[0], img_tmp.size[1],
img_tmp.mode.lower(), img_tmp.tobytes())
im.seek(im.tell() + 1)
except EOFError:
pass
def load(self, filename):
try:
im = PILImage.open(filename)
except:
Logger.warning('Image: Unable to load image <%s>' % filename)
raise
# update internals
if not self._inline:
self.filename = filename
# returns an array of type ImageData len 1 if not a sequence image
return list(self._img_read(im))
@staticmethod
def save(filename, width, height, fmt, pixels, flipped=False):
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
return True
# register
ImageLoader.register(ImageLoaderPIL)
|
darkopevec/kivy
|
kivy/core/image/img_pil.py
|
Python
|
mit
| 3,534
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/shared_barrel_blaster.iff"
result.attribute_template_id = -1
result.stfName("component_name","barrel_blaster")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/component/shared_barrel_blaster.py
|
Python
|
mit
| 457
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-06-02 18:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('polls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
('selected_choice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Choice')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([('question', 'user')]),
),
]
|
ncrmro/reango
|
server/polls/migrations/0002_auto_20170602_1838.py
|
Python
|
mit
| 1,124
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "20.3"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2019 %s" % __author__
|
davidharvey1986/pyRRG
|
unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/packaging/__about__.py
|
Python
|
mit
| 744
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Count
import json
def maintainers_from_property(apps, schema_editor):
# We can't import the models directly as they may be a newer
# version than this migration expects. We use the historical version.
Project = apps.get_model("api", "Project")
ProjectProperty = apps.get_model("api", "ProjectProperty")
User = apps.get_model("auth", "User")
projects = Project.objects.filter(projectproperty__name="maintainers")
for p in projects:
p.maintainers.clear()
pp = p.projectproperty_set.filter(name="maintainers")[0]
# NOTE: this will fail if the property is a blob
maintainers = json.loads(pp.value)
users = User.objects.filter(username__in=maintainers)
p.maintainers.add(*users)
ProjectProperty.objects.filter(name="maintainers").delete()
def maintainers_to_property(apps, schema_editor):
# We can't import the models directly as they may be a newer
# version than this migration expects. We use the historical version.
Project = apps.get_model("api", "Project")
ProjectProperty = apps.get_model("api", "ProjectProperty")
projects = Project.objects.annotate(maintainer_count=Count("maintainers")).filter(
maintainer_count__gt=0
)
for p in projects:
maintainers = [u.username for u in p.maintainers.all()]
pp = ProjectProperty(
project=p, name="maintainers", value=json.dumps(maintainers), blob=False
)
pp.save()
p.maintainers.clear()
class Migration(migrations.Migration):
dependencies = [("api", "0024_project_maintainers")]
operations = [
migrations.RunPython(
maintainers_from_property, reverse_code=maintainers_to_property
)
]
|
patchew-project/patchew
|
api/migrations/0025_populate_project_maintainers.py
|
Python
|
mit
| 1,869
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Report generator module based on **Jinja2**
"""
import codecs
import datetime as dt
from os import path
import six
import tqdm
import jinja2
from t4mon import gen_plot, arguments
from matplotlib import pyplot as plt
from t4mon.logger import init_logger
# from ast import literal_eval # TODO: is literal_eval working in Linux?
class Report(object):
"""Generate an HTML report, drawing all the items defined in a
``graphs_definition_file``.
Arguments:
container (t4mon.Orchestrator): cosa
system (str):
Identifier of the system for which the report will be
generated.
It must be a valid identifier present in ``container.data``,
more specifically matching one of
``container.data.index.levels[-1]``.
logger (Optional[logging.Logger]):
logging object optionally passed directly
Note:
Attributes in container are passed transparently to Report
Attributes:
data (pandas.DataFrame):
MultiIndex dataframe that will be used as data source
settings_file (str):
Settings filename where ``graphs_definition_file`` and
``html_template`` are defined
logs (dict):
log output (value) corresponding for each system (key)
date_time (str):
collection timestamp in the format ``%d/%m/%Y %H:%M:%S``
Note:
**Graphs definition file format** ::
###################################################################
# #
# Syntax (all lines starting with # will be treated as comments): #
# var_names;title;plot_options #
# #
# Where: #
# var_names: list of regular expressions matching column names #
# separated with commas #
# title: string containing graph's title #
# plot_option: [optional] comma-separated options passed #
# transparently to matplotlib #
###################################################################
# This is just a comment. No inline comments allowed.
message_buffered;Test 1
successful_FDA;Test 2 (percentage);ylim=(0.0,100.0),linewidth=2
"""
def __init__(self, container, system, logger=None):
self.system = system
# Transparently pass all container items
for item in container.__dict__:
setattr(self, item, getattr(container, item))
if 'loglevel' not in self.__dict__:
self.loglevel = logger.DEFAULT_LOGLEVEL
self.logger = logger or init_logger(self.loglevel)
current_date = dt.datetime.strptime(self.date_time,
"%d/%m/%Y %H:%M:%S")
self.year = current_date.year
# populate self.html_template and self.graphs_definition_file
conf = arguments.read_config(self.settings_file)
for item in ['html_template', 'graphs_definition_file']:
setattr(self,
item,
arguments.get_absolute_path(conf.get('MISC', item),
self.settings_file))
def render(self):
"""
Create the Jinja2 environment.
Notice the use of `trim_blocks`, which greatly helps control
whitespace.
"""
try:
assert not self.data.empty # Check that data isn't empty
assert self.system # Check a system was specified
env_dir = path.dirname(path.abspath(self.html_template))
j2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(env_dir),
trim_blocks=True
)
j2_tpl = j2_env.get_template(path.basename(self.html_template))
j2_tpl.globals['render_graphs'] = self.render_graphs
self.logger.info('{0} | Generating graphics and rendering report '
'(may take a while)'.format(self.system))
return j2_tpl.generate(data=self)
except AssertionError:
self.logger.error('{0} | No data, no report'.format(self.system)
if self.system
else 'Not a valid system, report skipped')
except IOError:
self.logger.error('Template file ({0}) not found.'
.format(self.html_template))
except jinja2.TemplateError as msg:
self.logger.error('{0} | Error in html template ({1}): {2}'
.format(self.system,
self.html_template,
repr(msg)))
# Stop the generator in case of exception
raise StopIteration
def render_graphs(self):
""" Produce base64 encoded graphs for the selected system
(``self.system``).
Yield:
tuple: (``graph_title``, ``graph_encoded_in_b64``)
"""
try:
progressbar_prefix = 'Rendering report for {}'.format(self.system)
with open(self.graphs_definition_file, 'r') as graphs_txt:
graphs_txt_contents = graphs_txt.readlines()
for line in tqdm.tqdm(graphs_txt_contents,
leave=True,
desc=progressbar_prefix,
unit='Graphs'):
line = line.strip()
if not len(line) or line[0] == '#':
continue
info = line.split(';')
# info[0] contains a comma-separated list of parameters to
# be drawn
# info[1] contains the title
# info[2] contains the plot options
if len(info) == 1:
self.logger.warning('Bad format in current line: '
'"{0}"...'.format(line[1:20]))
continue
try:
optional_kwargs = eval(
"dict({0})".format(info[2])
) if len(info) == 3 else {'ylim': 0.0}
except ValueError:
optional_kwargs = {'ylim': 0.0}
self.logger.debug('{0} | Plotting {1}'.format(self.system,
info[0]))
# Generate figure and encode to base64
plot_axis = gen_plot.plot_var(
self.data,
*[x.strip() for x in info[0].split(',')],
system=self.system,
logger=self.logger,
**optional_kwargs
)
_b64figure = gen_plot.to_base64(plot_axis)
plt.close(plot_axis.get_figure()) # close figure
if _b64figure:
yield (six.u(info[1].strip()),
codecs.decode(_b64figure, 'utf-8'))
except IOError:
self.logger.error('Graphs definition file not found: {0}'
.format(self.graphs_definition_file))
except Exception as unexpected:
self.logger.error('{0} | Unexpected exception found while '
'creating graphs: {1}'.format(self.system,
repr(unexpected)))
yield None
def gen_report(container, system):
"""
Convenience function for calling :meth:`.Report.render()` method
Arguments:
container (t4mon.Orchestrator):
object containing all the information required to render the report
system (str):
system for which the report will be generated
Return:
str
"""
_report = Report(container, system)
return _report.render()
|
fernandezcuesta/pySMSCMon
|
t4mon/gen_report.py
|
Python
|
mit
| 8,298
|
# -*- coding: utf-8 -*-
from collections import Counter
from threading import Lock, Thread
from time import sleep
import sys
if sys.version[0] == '2':
from Queue import Queue
else:
from queue import Queue
total_workers = 3 # Maximum number of threads chosen arbitrarily
class LetterCounter:
def __init__(self):
self.lock = Lock()
self.value = Counter()
def add_counter(self, counter_to_add):
self.lock.acquire()
try:
self.value = self.value + counter_to_add
finally:
self.lock.release()
def count_letters(queue_of_texts, letter_to_frequency, worker_id):
while not queue_of_texts.empty():
sleep(worker_id + 1)
line_input = queue_of_texts.get()
if line_input is not None:
letters_in_line = Counter([x for x in line_input.lower() if
x.isalpha()])
letter_to_frequency.add_counter(letters_in_line)
queue_of_texts.task_done()
if line_input is None:
break
def calculate(list_of_texts):
queue_of_texts = Queue()
[queue_of_texts.put(line) for line in list_of_texts]
letter_to_frequency = LetterCounter()
threads = []
for i in range(total_workers):
worker = Thread(target=count_letters, args=(queue_of_texts,
letter_to_frequency, i))
worker.start()
threads.append(worker)
queue_of_texts.join()
for i in range(total_workers):
queue_of_texts.put(None)
for t in threads:
t.join()
return letter_to_frequency.value
|
behrtam/xpython
|
exercises/parallel-letter-frequency/example.py
|
Python
|
mit
| 1,637
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
NumberInputPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtWidgets import QDialog
from math import log10, floor
from qgis.core import (QgsDataSourceURI,
QgsCredentials,
QgsExpressionContext,
QgsExpressionContextUtils,
QgsExpression,
QgsRasterLayer,
QgsExpressionContextScope)
from qgis.gui import QgsEncodingFileDialog, QgsExpressionBuilderDialog
from qgis.utils import iface
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetNumberSelector.ui'))
class NumberInputPanel(BASE, WIDGET):
hasChanged = pyqtSignal()
def __init__(self, number, minimum, maximum, isInteger):
super(NumberInputPanel, self).__init__(None)
self.setupUi(self)
self.spnValue.setExpressionsEnabled(True)
self.isInteger = isInteger
if self.isInteger:
self.spnValue.setDecimals(0)
else:
#Guess reasonable step value
if (maximum == 0 or maximum) and (minimum == 0 or minimum):
self.spnValue.setSingleStep(self.calculateStep(minimum, maximum))
if maximum == 0 or maximum:
self.spnValue.setMaximum(maximum)
else:
self.spnValue.setMaximum(99999999)
if minimum == 0 or minimum:
self.spnValue.setMinimum(minimum)
else:
self.spnValue.setMinimum(-99999999)
#Set default value
if number == 0 or number:
self.spnValue.setValue(float(number))
self.spnValue.setClearValue(float(number))
elif minimum == 0 or minimum:
self.spnValue.setValue(float(minimum))
self.spnValue.setClearValue(float(minimum))
else:
self.spnValue.setValue(0)
self.spnValue.setClearValue(0)
self.btnCalc.setFixedHeight(self.spnValue.height())
self.btnCalc.clicked.connect(self.showExpressionsBuilder)
self.spnValue.valueChanged.connect(lambda: self.hasChanged.emit())
def showExpressionsBuilder(self):
context = self.expressionContext()
dlg = QgsExpressionBuilderDialog(None, self.spnValue.text(), self, 'generic', context)
dlg.setWindowTitle(self.tr('Expression based input'))
if dlg.exec_() == QDialog.Accepted:
exp = QgsExpression(dlg.expressionText())
if not exp.hasParserError():
result = exp.evaluate(context)
if not exp.hasEvalError():
try:
self.spnValue.setValue(float(result))
except:
pass
def expressionContext(self):
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope())
processingScope = QgsExpressionContextScope()
layers = dataobjects.getAllLayers()
for layer in layers:
name = layer.name()
processingScope.setVariable('%s_minx' % name, layer.extent().xMinimum())
processingScope.setVariable('%s_miny' % name, layer.extent().yMinimum())
processingScope.setVariable('%s_maxx' % name, layer.extent().xMaximum())
processingScope.setVariable('%s_maxy' % name, layer.extent().yMaximum())
if isinstance(layer, QgsRasterLayer):
cellsize = (layer.extent().xMaximum()
- layer.extent().xMinimum()) / layer.width()
processingScope.setVariable('%s_cellsize' % name, cellsize)
layers = dataobjects.getRasterLayers()
for layer in layers:
for i in range(layer.bandCount()):
stats = layer.dataProvider().bandStatistics(i + 1)
processingScope.setVariable('%s_band%i_avg' % (name, i + 1), stats.mean)
processingScope.setVariable('%s_band%i_stddev' % (name, i + 1), stats.stdDev)
processingScope.setVariable('%s_band%i_min' % (name, i + 1), stats.minimumValue)
processingScope.setVariable('%s_band%i_max' % (name, i + 1), stats.maximumValue)
extent = iface.mapCanvas().extent()
processingScope.setVariable('canvasextent_minx', extent.xMinimum())
processingScope.setVariable('canvasextent_miny', extent.yMinimum())
processingScope.setVariable('canvasextent_maxx', extent.xMaximum())
processingScope.setVariable('canvasextent_maxy', extent.yMaximum())
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def getValue(self):
return self.spnValue.value()
def calculateStep(self, minimum, maximum):
valueRange = maximum - minimum
if valueRange <= 1.0:
step = valueRange / 10.0
# round to 1 significant figure
return round(step, -int(floor(log10(step))))
else:
return 1.0
|
AsgerPetersen/QGIS
|
python/plugins/processing/gui/NumberInputPanel.py
|
Python
|
gpl-2.0
| 6,584
|
import argparse
import asyncio
import logging
import sesamecontract.util.logging as logutil
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class SesameServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.peer = transport.get_extra_info("peername")
logger.debug("Received connection from {}:{}".format(self.peer[0], self.peer[1]))
def data_received(self, data):
self.transport.write(data)
def connection_lost(self, exc):
logger.debug("Lost connection to {}:{}".format(self.peer[0], self.peer[1]))
def parse_args():
parser = argparse.ArgumentParser()
return parser.parse_args()
def main():
logutil.set_stream_handler(logger)
args = parse_args()
loop = asyncio.get_event_loop()
server = loop.create_server(SesameServerProtocol, port=4499)
loop.run_until_complete(server)
logger.debug("Starting server")
loop.run_forever()
if __name__ == '__main__':
main()
|
EaterOA/sesamecontract
|
sesamecontract/sesameserver.py
|
Python
|
gpl-2.0
| 1,025
|
"""
Contains unit tests for the DB models
"""
__author__ = 'Michal Kononenko'
|
MichalKononenko/OmicronAPI
|
tests/unit/test_database/test_models/__init__.py
|
Python
|
gpl-2.0
| 78
|
"""Common tests for infrastructure provider"""
import pytest
from cfme.infrastructure.provider.openstack_infra import OpenstackInfraProvider
from utils import testgen
from utils.appliance.implementations.ui import navigate_to
pytest_generate_tests = testgen.generate([OpenstackInfraProvider],
scope='module')
pytestmark = [pytest.mark.usefixtures("setup_provider_modscope")]
def test_api_port(provider):
port = provider.get_yaml_data()['endpoints']['default']['api_port']
assert provider.summary.properties.api_port.value == port, 'Invalid API Port'
def test_credentials_quads(provider):
view = navigate_to(provider, 'All')
prov_item = view.entities.get_entity(by_name=provider.name)
assert prov_item.data.get('creds') and 'checkmark' in prov_item.data['creds']
def test_delete_provider(provider):
provider.delete(cancel=False)
provider.wait_for_delete()
view = navigate_to(provider, 'All')
assert provider.name not in [item.name for item in view.entities.get_all(surf_pages=True)]
|
dajohnso/cfme_tests
|
cfme/tests/openstack/infrastructure/test_provider.py
|
Python
|
gpl-2.0
| 1,069
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" WebMessage module, messaging system"""
__revision__ = "$Id$"
import invenio.webmessage_dblayer as db
from invenio.webmessage_config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_RESULTS_FIELD, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA, \
InvenioWebMessageError
from invenio.config import CFG_SITE_LANG, \
CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE
from invenio.messages import gettext_set_language
from invenio.dateutils import datetext_default, get_datetext
from invenio.htmlutils import escape_html
from invenio.webuser import list_users_in_roles
try:
import invenio.template
webmessage_templates = invenio.template.load('webmessage')
except:
pass
from invenio.errorlib import register_exception
def perform_request_display_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Displays a specific message
@param uid: user id
@param msgid: message id
@return: body
"""
_ = gettext_set_language(ln)
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message is not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
(msg_id,
msg_from_id, msg_from_nickname,
msg_sent_to, msg_sent_to_group,
msg_subject, msg_body,
msg_sent_date, msg_received_date,
msg_status) = db.get_message(uid, msgid)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (msg_status == CFG_WEBMESSAGE_STATUS_CODE['NEW']):
db.set_message_status(uid, msgid,
CFG_WEBMESSAGE_STATUS_CODE['READ'])
body = webmessage_templates.tmpl_display_msg(
msg_id,
msg_from_id,
msg_from_nickname,
msg_sent_to,
msg_sent_to_group,
msg_subject,
msg_body,
msg_sent_date,
msg_received_date,
ln)
return body
def perform_request_display(uid, warnings=[], infos=[], ln=CFG_SITE_LANG):
"""
Displays the user's Inbox
@param uid: user id
@return: body with warnings
"""
body = ""
rows = []
rows = db.get_all_messages_for_user(uid)
nb_messages = db.count_nb_messages(uid)
no_quota_users = list_users_in_roles(CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA)
no_quota = False
if uid in no_quota_users:
no_quota = True
body = webmessage_templates.tmpl_display_inbox(messages=rows,
infos=infos,
warnings=warnings,
nb_messages=nb_messages,
no_quota=no_quota,
ln=ln)
return body
def perform_request_delete_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Delete a given message from user inbox
@param uid: user id (int)
@param msgid: message id (int)
@param ln: language
@return: body with warnings
"""
_ = gettext_set_language(ln)
warnings = []
infos = []
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message is not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (db.delete_message_from_user_inbox(uid, msgid) == 0):
warnings.append(_("The message could not be deleted."))
else:
infos.append(_("The message was successfully deleted."))
return perform_request_display(uid, warnings, infos, ln)
def perform_request_delete_all(uid, confirmed=False, ln=CFG_SITE_LANG):
"""
Delete every message for a given user
@param uid: user id (int)
@param confirmed: 0 will produce a confirmation message
@param ln: language
@return: body with warnings
"""
infos = []
warnings = []
_ = gettext_set_language(ln)
if confirmed:
db.delete_all_messages(uid)
infos = [_("Your mailbox has been emptied.")]
return perform_request_display(uid, warnings, infos, ln)
else:
body = webmessage_templates.tmpl_confirm_delete(ln)
return body
def perform_request_write(uid,
msg_reply_id="",
msg_to="",
msg_to_group="",
msg_subject="",
msg_body="",
ln=CFG_SITE_LANG):
"""
Display a write a message page.
@param uid: user id.
@type uid: int
@param msg_reply_id: if this message is a reply to another, other's ID.
@type msg_reply_id: int
@param msg_to: comma separated usernames.
@type msg_to: string
@param msg_to_group: comma separated groupnames.
@type msg_to_group: string
@param msg_subject: message subject.
@type msg_subject: string
@param msg_body: message body.
@type msg_body: string
@param ln: language.
@type ln: string
@return: body with warnings.
"""
warnings = []
body = ""
_ = gettext_set_language(ln)
msg_from_nickname = ""
msg_id = 0
if (msg_reply_id):
if (db.check_user_owns_message(uid, msg_reply_id) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message is not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
# dummy == variable name to make pylint and pychecker happy!
(msg_id,
msg_from_id, msg_from_nickname,
dummy, dummy,
msg_subject, msg_body,
dummy, dummy, dummy) = db.get_message(uid, msg_reply_id)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
msg_to = msg_from_nickname or str(msg_from_id)
body = webmessage_templates.tmpl_write(msg_to=msg_to,
msg_to_group=msg_to_group,
msg_id=msg_id,
msg_subject=msg_subject,
msg_body=msg_body,
warnings=[],
ln=ln)
return body
def perform_request_write_with_search(
uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
names_selected=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
add_values=0,
ln=CFG_SITE_LANG):
"""
Display a write message page, with prefilled values
@param msg_to_user: comma separated usernames (str)
@param msg_to_group: comma separated groupnames (str)
@param msg_subject: message subject (str)
@param msg_bidy: message body (string)
@param msg_send_year: year to send this message on (int)
@param_msg_send_month: month to send this message on (int)
@param_msg_send_day: day to send this message on (int)
@param users_to_add: list of usernames ['str'] to add to msg_to_user
@param groups_to_add: list of groupnames ['str'] to add to msg_to_group
@param user_search_pattern: will search users with this pattern (str)
@param group_search_pattern: will search groups with this pattern (str)
@param mode_user: if 1 display user search box, else group search box
@param add_values: if 1 users_to_add will be added to msg_to_user field..
@param ln: language
@return: body with warnings
"""
warnings = []
search_results_list = []
def cat_names(name1, name2):
""" name1, name2 => 'name1, name2' """
return name1 + CFG_WEBMESSAGE_SEPARATOR + " " + name2
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
if add_values and len(names_selected):
usernames_to_add = reduce(cat_names, names_selected)
if msg_to_user:
msg_to_user = cat_names(msg_to_user, usernames_to_add)
else:
msg_to_user = usernames_to_add
users_found = db.get_nicknames_like(search_pattern)
if users_found:
for user_name in users_found:
search_results_list.append((user_name[0],
user_name[0] in names_selected))
elif results_field == CFG_WEBMESSAGE_RESULTS_FIELD['GROUP']:
if add_values and len(names_selected):
groupnames_to_add = reduce(cat_names, names_selected)
if msg_to_group:
msg_to_group = cat_names(msg_to_group, groupnames_to_add)
else:
msg_to_group = groupnames_to_add
groups_dict = db.get_groupnames_like(uid, search_pattern)
groups_found = groups_dict.values()
if groups_found:
for group_name in groups_found:
search_results_list.append((group_name,
group_name in names_selected))
body = webmessage_templates.tmpl_write(
msg_to=msg_to_user,
msg_to_group=msg_to_group,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
search_results_list=search_results_list,
search_pattern=search_pattern,
results_field=results_field,
ln=ln)
return body
def perform_request_send(uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
ln=CFG_SITE_LANG,
use_email_address = 0):
"""
send a message. if unable return warnings to write page
@param uid: id of user from (int)
@param msg_to_user: comma separated usernames (recipients) (str)
@param msg_to_group: comma separated groupnames (recipeints) (str)
@param msg_subject: subject of message (str)
@param msg_body: body of message (str)
@param msg_send_year: send this message on year x (int)
@param msg_send_month: send this message on month y (int)
@param msg_send_day: send this message on day z (int)
@param ln: language
@return: (body with warnings, title, navtrail)
"""
_ = gettext_set_language(ln)
def strip_spaces(text):
"""suppress spaces before and after x (str)"""
return text.strip()
# wash user input
users_to = map(strip_spaces, msg_to_user.split(CFG_WEBMESSAGE_SEPARATOR))
groups_to = map(strip_spaces, msg_to_group.split(CFG_WEBMESSAGE_SEPARATOR))
if users_to == ['']:
users_to = []
if groups_to == ['']:
groups_to = []
warnings = []
infos = []
problem = None
users_to_str = CFG_WEBMESSAGE_SEPARATOR.join(users_to)
groups_to_str = CFG_WEBMESSAGE_SEPARATOR.join(groups_to)
send_on_date = get_datetext(msg_send_year, msg_send_month, msg_send_day)
if (msg_send_year == msg_send_month == msg_send_day == 0):
status = CFG_WEBMESSAGE_STATUS_CODE['NEW']
else:
status = CFG_WEBMESSAGE_STATUS_CODE['REMINDER']
if send_on_date == datetext_default:
warning = \
_("The chosen date (%(x_year)i/%(x_month)i/%(x_day)i) is invalid.")
warning = warning % {'x_year': msg_send_year,
'x_month': msg_send_month,
'x_day': msg_send_day}
warnings.append(warning)
problem = True
if not(users_to_str or groups_to_str):
# <=> not(users_to_str) AND not(groups_to_str)
warnings.append(_("Please enter a user name or a group name."))
problem = True
if len(msg_body) > CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE:
warnings.append(_("Your message is too long, please shorten it. Maximum size allowed is %i characters.") % \
(CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE,))
problem = True
if use_email_address == 0:
users_dict = db.get_uids_from_nicks(users_to)
users_to = users_dict.items() # users_to=[(nick, uid),(nick2, uid2)]
elif use_email_address == 1:
users_dict = db.get_uids_from_emails(users_to)
users_to = users_dict.items() # users_to=[(email, uid),(email2, uid2)]
groups_dict = db.get_gids_from_groupnames(groups_to)
groups_to = groups_dict.items()
gids_to = []
for (group_name, group_id) in groups_to:
if not(group_id):
warnings.append(_("Group %s does not exist.") % \
(escape_html(group_name)))
problem = 1
else:
gids_to.append(group_id)
# Get uids from gids
uids_from_group = db.get_uids_members_of_groups(gids_to)
# Add the original uids, and make sure there is no double values.
tmp_dict = {}
for uid_receiver in uids_from_group:
tmp_dict[uid_receiver] = None
for (user_nick, user_id) in users_to:
if user_id:
if user_id not in tmp_dict:
uids_from_group.append(user_id)
tmp_dict[user_id] = None
else:
if type(user_nick) == int or \
type(user_nick) == str and user_nick.isdigit():
user_nick = int(user_nick)
if db.user_exists(user_nick) and user_nick not in tmp_dict:
uids_from_group.append(user_nick)
tmp_dict[user_nick] = None
else:
warnings.append(_("User %s does not exist.")% \
(escape_html(user_nick)))
problem = True
if problem:
body = webmessage_templates.tmpl_write(msg_to=users_to_str,
msg_to_group=groups_to_str,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
ln=ln)
title = _("Write a message")
navtrail = get_navtrail(ln, title)
return (body, title, navtrail)
else:
msg_id = db.create_message(uid,
users_to_str, groups_to_str,
msg_subject, msg_body,
send_on_date)
uid_problem = db.send_message(uids_from_group, msg_id, status)
if len(uid_problem) > 0:
usernames_problem_dict = db.get_nicks_from_uids(uid_problem)
usernames_problem = usernames_problem_dict.values()
def listing(name1, name2):
""" name1, name2 => 'name1, name2' """
return str(name1) + ", " + str(name2)
warning = _("Your message could not be sent to the following recipients as it would exceed their quotas:") + " "
warnings.append(warning + reduce(listing, usernames_problem))
if len(uids_from_group) != len(uid_problem):
infos.append(_("Your message has been sent."))
else:
db.check_if_need_to_delete_message_permanently([msg_id])
body = perform_request_display(uid, warnings,
infos, ln)
title = _("Your Messages")
return (body, title, get_navtrail(ln))
def account_new_mail(uid, ln=CFG_SITE_LANG):
"""
display new mail info for myaccount.py page.
@param uid: user id (int)
@param ln: language
@return: html body
"""
nb_new_mail = db.get_nb_new_messages_for_user(uid)
total_mail = db.get_nb_readable_messages_for_user(uid)
return webmessage_templates.tmpl_account_new_mail(nb_new_mail,
total_mail, ln)
def get_navtrail(ln=CFG_SITE_LANG, title=""):
"""
gets the navtrail for title...
@param title: title of the page
@param ln: language
@return: HTML output
"""
navtrail = webmessage_templates.tmpl_navtrail(ln, title)
return navtrail
|
mvesper/invenio
|
modules/webmessage/lib/webmessage.py
|
Python
|
gpl-2.0
| 19,737
|
#!/usr/bin/python -O
#PBS -N myjob
#PBS -l select=1:ncpus=9:mem=12GB
#PBS -l walltime=70:00:00
import subprocess
import os
import sys
def module_add(modulename):
p = subprocess.Popen("/usr/bin/modulecmd python add "+modulename, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
stdout,stderr = p.communicate()
exec stdout
module_add("bio-bwa/0.7.5a")
genomeFastaFiles = {"mm9":"/csc/rawdata/Cscbioinf/bioinfResources/mm9/mm9.fa"}
#genome = "mm9"
#fastq = "/csc/rawdata/Merkenshlager/131209_SN172_0451_BD26VHACXX_Merkenschlager/Unaligned/Sample_3InpVS/3InpVS_ACAGTG_L003_R1_001.fastq.gz"
#baseName = "/csc/rawdata/Dillon/DillonTest/test"
fastq = sys.argv[1]
baseName = sys.argv[2]
genome = sys.argv[3]
outputPath = sys.argv[4]
saiOut = os.path.join(outputPath,baseName+".sai")
trimmedFQOut = os.path.join(outputPath,baseName+"trimmed.fq.gz")
print os.environ["LOADEDMODULES"]
if not os.path.isfile(genomeFastaFiles[genome]) or not os.path.isfile(genomeFastaFiles[genome]+".bwt") or not os.path.isfile(genomeFastaFiles[genome]+".sa"):
if os.path.isfile(genomeFastaFiles[genome]):
print "Not all necessary index files found..indexing"
#p = subprocess.Popen(["/bin/bash",'-i',"-c","bwa index -a bwtsw "+genome], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout,stderr = p.communicate()
else:
"Fasta file is not found"
elif os.path.isfile(genomeFastaFiles[genome]) and os.path.isfile(genomeFastaFiles[genome]+".bwt") and os.path.isfile(genomeFastaFiles[genome]+".sa"):
if not os.path.isfile(saiOut):
pairedAlignCMD1 = "zcat "+fastq+" | python /csc/rawdata/Cscbioinf/bioinfResources/trimFQ.py 50 | gzip - > "+trimmedFQOut
print pairedAlignCMD1
p = subprocess.Popen(["/bin/bash",'-i',"-c",pairedAlignCMD1], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
pairedAlignCMD2 = "bwa aln -t 8 "+genomeFastaFiles[genome]+" "+trimmedFQOut+" > "+saiOut
print pairedAlignCMD2
p = subprocess.Popen(["/bin/bash",'-i',"-c",pairedAlignCMD2], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
|
ThomasCarroll/mrcchip
|
pairedAlign.py
|
Python
|
gpl-2.0
| 2,281
|
# Clock Example by Ralph Glass
# http://ralph-glass.homepage.t-online.de/clock/readme.html
from gi.repository import PangoCairo
from gi.repository import GObject
from gi.repository import Gtk
import math
import time
from gramps.gen.constfunc import is_quartz
TEXT = 'cairo'
BORDER_WIDTH = 10
class ClockWidget(Gtk.DrawingArea):
def __init__(self):
Gtk.DrawingArea.__init__(self)
self.connect("draw", self.on_draw)
self.timer = GObject.timeout_add(1000, self.tick)
def on_draw(self, widget, cr):
layout = PangoCairo.create_layout(cr)
if is_quartz():
PangoCairo.context_set_resolution(layout.get_context(), 72)
layout.set_font_description(self.get_style().font_desc)
layout.set_markup(TEXT, -1)
fontw, fonth = layout.get_pixel_size()
xmin = fontw + BORDER_WIDTH
ymin = fonth + BORDER_WIDTH
self.set_size_request(xmin, ymin)
# time
hours = time.localtime().tm_hour
minutes = time.localtime().tm_min
secs = time.localtime().tm_sec
second_arc = (2*math.pi / 60) * secs
minute_arc = (2*math.pi / 60) * minutes
if hours > 12:
hours = hours - 12
hour_arc = (2*math.pi / 12) * hours + minute_arc / 12
# clock background
alloc = self.get_allocation()
x = alloc.x
y = alloc.y
w = alloc.width
h = alloc.height
cr.set_source_rgba(1, 0.2, 0.2, 0.6)
cr.arc(w/2, h/2, min(w,h)/2 - 8 , 0, 2 * 3.14)
cr.fill()
cr.stroke()
# center arc
cr.set_source_rgb(0, 0, 0)
cr.arc ( w/2, h/2, (min(w,h)/2 -20) / 5, 0, 2 * math.pi)
cr.fill()
cr.line_to(w/2,h/2)
cr.stroke()
# pointer hour
cr.set_source_rgba(0.5, 0.5, 0.5, 0.5)
cr.set_line_width ((min(w,h)/2 -20)/6 )
cr.move_to(w/2,h/2)
cr.line_to(w/2 + (min(w,h)/2 -20) * 0.6 * math.cos(hour_arc - math.pi/2),
h/2 + (min(w,h)/2 -20) * 0.6 * math.sin(hour_arc - math.pi/2))
cr.stroke()
# pointer minute
cr.set_source_rgba(0.5, 0.5, 0.5, 0.5)
cr.set_line_width ((min(w,h)/2 -20)/6 * 0.8)
cr.move_to(w/2,h/2)
cr.line_to(w/2 + (min(w,h)/2 -20) * 0.8 * math.cos(minute_arc - math.pi/2),
h/2 + (min(w,h)/2 -20) * 0.8 * math.sin(minute_arc - math.pi/2))
cr.stroke()
# pointer second
cr.set_source_rgba(0.5, 0.5, 0.5, 0.5)
cr.set_line_width ((min(w,h)/2 -20)/6 * 0.4)
cr.move_to(w/2,h/2)
cr.line_to(w/2 + (min(w,h)/2 -20) * math.cos(second_arc - math.pi/2),
h/2 + (min(w,h)/2 -20) * math.sin(second_arc - math.pi/2))
cr.stroke()
# pango layout
cr.move_to((w - fontw - 4), (h - fonth ))
PangoCairo.show_layout(cr, layout)
def tick(self):
self.queue_draw()
return True
# Clock Integrated with Gramplets
# (c) 2009, Doug Blank
from gramps.gen.plug import Gramplet
class ClockGramplet(Gramplet):
def init(self):
self.gui.clock = ClockWidget()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.gui.clock)
self.gui.clock.show()
|
gramps-project/addons-source
|
ClockGramplet/ClockGramplet.py
|
Python
|
gpl-2.0
| 3,298
|
# -*- coding: utf-8 -*-
##
## This file is part of INSPIRE.
## Copyright (C) 2014 CERN.
##
## INSPIRE is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
DEPOSIT_INSPIRE_SUBJECTS_KB = "Subjects"
""" KB used for Subjects """
DEPOSIT_INSPIRE_JOURNALS_KB = "docextract-journals"
""" KB used for Journals """
DEPOSIT_INSPIRE_DEGREE_KB = "DEGREE"
""" KB used for Degrees """
DEPOSIT_INSPIRE_LICENSE_KB = "LICENSE"
""" KB used for Licenses """
DEPOSIT_ARXIV_TO_INSPIRE_CATEGORIES_KB = "arxiv-to-inspire-categories"
""" KB used for arXiv to INSPIRE categories """
|
ioannistsanaktsidis/inspire-next
|
inspire/modules/deposit/config.py
|
Python
|
gpl-2.0
| 1,199
|
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2009 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import os
import tempfile
from cgi import FieldStorage
from Box import Box
from Button import Button
from RawHTML import RawHTML
from Server import publish, get_scgi
HEADERS = [
'<script type="text/javascript" src="/CTK/js/ajaxupload.3.6.js"></script>'
]
JS = """
var button = $('#%(opener_widget_id)s');
var msg = $('#%(id)s .msg');
new AjaxUpload (button, {
name: 'file',
action: '%(upload_url)s',
onSubmit: function (file, ext) {
this.disable();
msg.html('Uploading');
interval = window.setInterval(function(){
var text = msg.html();
if (text.length < 13){
msg.html(text + '.');
} else {
msg.html('Uploading');
}
}, 200);
},
onComplete: function (file, response) {
window.clearInterval (interval);
msg.html('');
this.enable();
$('#%(id)s').trigger ({'type':'upload_finished', 'filename': file});
}
});
"""
# The internal POST Receiver and Storage classes are imported from
# CTK.Uploader().
#
from Uploader import UploadRequest
class AjaxUpload_Generic (Box):
def __init__ (self, opener_widget, props={}, params=None, direct=True):
Box.__init__ (self)
self.id = 'ajax_upload_%d' %(self.uniq_id)
self._url_local = '/ajax_upload_%d' %(self.uniq_id)
self.props = props.copy()
self.opener_widget = opener_widget
handler = self.props.get('handler')
target_dir = self.props.get('target_dir')
# Widgets
msg = Box ({'class': 'msg'}, RawHTML(' '))
self += opener_widget
self += msg
# Register the uploader path
publish (self._url_local, UploadRequest,
handler=handler, target_dir=target_dir, params=params, direct=direct)
def Render (self):
props = {'id': self.id,
'upload_url': self._url_local,
'opener_widget_id': self.opener_widget.id}
render = Box.Render (self)
render.headers += HEADERS
render.js += JS %(props)
return render
class AjaxUpload (AjaxUpload_Generic):
def __init__ (self, *args, **kwargs):
button = Button(_('Upload'))
AjaxUpload_Generic.__init__ (self, button, *args, **kwargs)
|
chetan/cherokee
|
admin/CTK/CTK/AjaxUpload.py
|
Python
|
gpl-2.0
| 3,039
|
import json
dumpStr = json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
print(dumpStr)
f = open('test.json', "w")
pathdict = {}
pathdict["corpus"] = "/home/corpus"
pathdict["intput"] = "/home/input"
path = json.dump(pathdict, f)
f.close()
f = open('test.json', 'r')
pathhook = json.load(f)
for key in pathhook:
print(key + ":" + pathhook[key])
|
Og192/Python
|
pythonLearning/json/jsonTest.py
|
Python
|
gpl-2.0
| 352
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('QuickBooking', '0012_auto_20150704_2004'),
]
operations = [
migrations.AlterField(
model_name='bustype',
name='type',
field=models.CharField(max_length=10, serialize=False, primary_key=True),
),
]
|
noorelden/QuickBooking
|
QuickBooking/migrations/0013_auto_20150704_2007.py
|
Python
|
gpl-2.0
| 441
|
#!/usr/bin/env python
import os, sys
def main():
if len(sys.argv) != 2:
print "Usage: %s <launchpad-download-folder>" % sys.argv[0]
return
files_to_merge = []
reldir = sys.argv[1]
for name in os.listdir(reldir):
if name.endswith(".po") and os.path.exists(os.path.join(reldir, name)):
dl_file = os.path.join(reldir, name)
old_po_file = None
if os.path.exists(name): #exists in current directory
old_po_file = name
elif name.startswith("jokosher-") and os.path.exists(name[len("jokosher-"):]):
old_po_file = name[len("jokosher-"):]
if old_po_file:
files = (dl_file, old_po_file)
files_to_merge.append(files)
print "Ready to merge %d PO files." % len(files_to_merge)
for dl_file, file in files_to_merge:
merge_files(dl_file, file)
print "Done."
def merge_files(rosetta_download, bzr_version):
COMMAND = 'msgmerge "%s" "%s" -o "%s"'
outfile = bzr_version + ".tmp"
cmd = COMMAND % (rosetta_download, bzr_version, outfile)
print "=> msgmerge-ing", bzr_version
os.system(cmd)
os.rename(outfile, bzr_version)
if __name__ == "__main__":
main()
|
mjumbewu/jokosher
|
locale/src/rosetta-merge.py
|
Python
|
gpl-2.0
| 1,107
|
from django.contrib import admin
from .models import *
from main import opengain_admin
@admin.register(Ticket, site=opengain_admin)
class TicketAdmin(admin.ModelAdmin):
list_display = ('user', 'subject', 'created', 'is_closed')
list_editable = ('is_closed',)
list_filter = ('is_closed',)
search_fields = ('user__username',)
@admin.register(TicketMessage, site=opengain_admin)
class TicketMessageAdmin(admin.ModelAdmin):
list_display = ('ticket', 'created', 'user', 'message', 'is_readed')
list_editable = ('is_readed',)
search_fields = ('user__username',)
list_filter = ('is_readed',)
|
null-none/OpenGain
|
default_set/tickets/admin.py
|
Python
|
gpl-2.0
| 621
|
#!/usr/bin/python
# -*- coding: UTF-* -*-
import sys
import re
trans_table = {
# dos newlines
"\r\n" : "\n",
# polish characters
"_a" : "ą",
"_c" : "ć",
"_e" : "ę",
"_l" : "ł",
"_n" : "ń",
"_o" : "ó",
"_s" : "ś",
"_z" : "ż",
"_x" : "ż",
"_A" : "Ą",
"_C" : "Ć",
"_E" : "Ę",
"_L" : "Ł",
"_N" : "Ń",
"_O" : "Ó",
"_S" : "Ś",
"_Z" : "Ż",
# clear formatting
"_\*" : "",
"_'" : "",
"_!![ZPWAOGXSTLQ][0-9]+;" : "",
"_>" : "",
"_0" : "",
"_1" : " ",
"_2" : " ",
"_3" : " ",
"_4" : " ",
"_5" : " ",
"_6" : " ",
"_7" : " ",
"_8" : " ",
"_\." : ".",
"_\+" : "",
"_\," : ",",
"_\(" : "",
#"_\ " : " ",
"_\:" : ":",
"_\?" : "?",
"_\[" : "[",
"_\]" : "]",
"_\|" : "|",
"^ *" : "",
"\|([\S])" : "\\1",
# fix punctuation errors
"(\w)\s:\s" : "\\1: ",
"(\w)\s+,(\w)" : "\\1, \\2",
"(\w),(\w)" : "\\1, \\2",
"(\w) \." : "\\1. ",
"([\w\.\,])\ +" : "\\1 ",
"^ " : "",
# fix myself
"żró": "źró",
"Żró": "Źró",
"wskażni" : "wskaźni",
"Wskażni" : "Wskaźni",
"__" : "_",
}
def trans_line(line, table):
for rule in table:
line, count = re.subn(rule, table[rule], line)
return line
# ------------------------------------------------------------------------
# ---- MAIN --------------------------------------------------------------
# ------------------------------------------------------------------------
in_file = sys.argv[1]
f = open(in_file, "r")
limit = -1
for line in f:
if limit == 0:
break
#print line,
print trans_line(line, trans_table),
#print "------------------------------------------------------"
limit -= 1
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saper/em400
|
tools/derep.py
|
Python
|
gpl-2.0
| 1,654
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simple unit tests for the "omeroweb.decorators".
"""
import pytest
import string
from django.test import RequestFactory
from django.test import override_settings
from django.utils.http import urlencode
from omeroweb.webclient.decorators import render_response
QUERY_STRING = " %s" % string.printable
def call_load_settings(request, conn):
context = {'ome': {}}
render_response().load_settings(request, context, conn)
return context
class TestRenderResponse(object):
def setup_method(self, method):
# prepare session
self.r = RequestFactory().get('/rand')
@override_settings()
def test_load_settings_defaults(self):
context = call_load_settings(self.r, None)
defaults = [
{
'link': u'/webclient/',
'attrs': {u'title': u'Browse Data via Projects, Tags etc'},
'label': u'Data'
}, {
'link': u'/webclient/history/',
'attrs': {u'title': u'History'},
'label': u'History'
}, {
'link': u'https://help.openmicroscopy.org/',
'attrs': {
u'target': u'new',
u'title': u'Open OMERO user guide in a new tab'
},
'label': u'Help'
}]
assert context['ome']['top_links'] == defaults
@pytest.mark.parametrize('top_links', [
[['Data1', 'webindex', {"title": "Some text"}], ["/webclient/"]],
[['Data2', {"viewname": 'webindex'}, {"title": "Some text"}],
["/webclient/"]],
[['Data3', {"viewname": "load_template", "args": ["userdata"]},
{}], ["/webclient/userdata/"]],
[['Data4', {"viewname": "load_template", "args": ["userdata"],
"query_string": {"experimenter": -1}}, {}],
["/webclient/userdata/?experimenter=-1"]],
[['Data5', {"viewname": "load_template", "args": ["userdata"],
"query_string": {"test": QUERY_STRING}}, {}],
["/webclient/userdata/?%s" % urlencode({'test': QUERY_STRING})]],
[['History', 'history', {"title": "History"}],
["/webclient/history/"]],
[['HELP', 'https://help.openmicroscopy.org', {"title": "Help"}],
["https://help.openmicroscopy.org"]],
[["", "", {}], [""]],
[["", None, {}], [None]],
[["Foo", "bar", {}], ["bar"]],
[['Foo', {"viewname": "foo"}, {}], [""]],
[["Foo", {"viewname": "load_template", "args": ["bar"]}, {}], [""]],
])
def test_load_settings(self, top_links):
@override_settings(TOP_LINKS=[top_links[0]])
def _test_load_settings():
return call_load_settings(self.r, None)
context = _test_load_settings()
assert context['ome']['top_links'][0]['label'] == top_links[0][0]
assert context['ome']['top_links'][0]['link'] == top_links[1][0]
assert context['ome']['top_links'][0]['attrs'] == top_links[0][2]
|
knabar/openmicroscopy
|
components/tools/OmeroWeb/test/unit/test_render_response.py
|
Python
|
gpl-2.0
| 3,836
|
import numpy as np
from flopy.mbase import Package
from flopy.utils import util_2d,util_3d
class Mt3dRct(Package):
'''
Chemical reaction package class
'''
def __init__(self, model, isothm=0, ireact=0, igetsc=1, rhob=1.8e3,
prsity2=0.1, srconc=0.0, sp1=0.0, sp2=0.0, rc1=0.0, rc2=0.0,
extension='rct'):
#Call ancestor's init to set self.parent, extension, name and
#unit number
Package.__init__(self, model, extension, 'RCT', 36)
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
self.heading1 = '# RCT for MT3DMS, generated by Flopy.'
self.isothm = isothm
self.ireact = ireact
self.irctop = 2 #All RCT vars are specified as 3D arrays
self.igetsc = igetsc
# Set values of all parameters
#self.rhob = self.assignarray((nlay, nrow, ncol), np.float, rhob,
# name='rhob')
self.rhob = util_3d(model,(nlay,nrow,ncol),np.float32,rhob,name='rhob',
locat=self.unit_number[0])
#self.prsity2 = self.assignarray((nlay, nrow, ncol), np.float, prsity2,
# name='prsity2')
self.prsity2 = util_3d(model,(nlay,nrow,ncol),np.float32,prsity2,
name='prsity2',locat=self.unit_number[0])
#self.srconc = self.assignarray((nlay, nrow, ncol), np.float, srconc,
# name='srconc')
self.srconc = util_3d(model,(nlay,nrow,ncol),np.float32,srconc,
name='srconc',locat=self.unit_number[0])
#self.sp1 = self.assignarray((nlay, nrow, ncol), np.float, sp1,
# name='sp1')
self.sp1 = util_3d(model,(nlay,nrow,ncol),np.float32,sp1,name='sp1',
locat=self.unit_number[0])
#self.sp2 = self.assignarray((nlay, nrow, ncol), np.float, sp2,
# name='sp2')
self.sp2 = util_3d(model,(nlay,nrow,ncol),np.float32,sp2,name='sp2',
locat=self.unit_number[0])
#self.rc1 = self.assignarray((nlay, nrow, ncol), np.float, rc1,
# name='rc1')
self.rc1 = util_3d(model,(nlay,nrow,ncol),np.float32,rc1,name='rc1',
locat=self.unit_number[0])
#self.rc2 = self.assignarray((nlay, nrow, ncol), np.float, rc2,
# name='rc2')
self.rc2 = util_3d(model,(nlay,nrow,ncol),np.float32,rc2,name='rc2',
locat=self.unit_number[0])
self.parent.add_package(self)
return
def __repr__( self ):
return 'Chemical reaction package class'
def write_file(self):
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
# Open file for writing
f_rct = open(self.fn_path, 'w')
f_rct.write('%10i%10i%10i%10i\n' % (self.isothm, self.ireact,
self.irctop, self.igetsc))
if (self.isothm in [1, 2, 3, 4, 6]):
#self.parent.write_array(f_rct, self.rhob, self.unit_number[0],
# True, 13, -ncol, 'Bulk density for Layer',
# ext_base='rhob')
f_rct.write(self.rhob.get_file_entry())
if (self.isothm in [5, 6]):
#self.parent.write_array(f_rct, self.prsity2, self.unit_number[0],
# True, 13, -ncol,
# 'Immobile porosity for Layer',
# ext_base='prsity2')
f_rct.write(self.prsity2.get_file_entry())
if (self.igetsc > 0):
#self.parent.write_array(f_rct, self.srconc, self.unit_number[0],
# True, 13, -ncol, 'Sorbed/immobile '
# 'starting concentration for Layer',
# ext_base='srconc')
f_rct.write(self.srconc.get_file_entry())
if (self.isothm > 0):
#self.parent.write_array(f_rct, self.sp1, self.unit_number[0],
# True, 13, -ncol,
# 'First sorption parameter for Layer',
# ext_base='sp1')
f_rct.write(self.sp1.get_file_entry())
if (self.isothm > 0):
#self.parent.write_array(f_rct, self.sp2, self.unit_number[0],
# True, 13, -ncol,
# 'Second sorption parameter for Layer',
# ext_base='sp2')
f_rct.write(self.sp2.get_file_entry())
if (self.ireact > 0):
#self.parent.write_array(f_rct, self.rc1, self.unit_number[0],
# True, 13, -ncol, 'First order reaction '
# 'rate for liquid phase for Layer',
# ext_base='rc1')
f_rct.write(self.rc1.get_file_entry())
if (self.ireact > 0):
#self.parent.write_array(f_rct, self.rc2, self.unit_number[0],
# True, 13, -ncol, 'First order reaction '
# 'rate for sorbed phase for Layer',
# ext_base='rc2')
f_rct.write(self.rc2.get_file_entry())
f_rct.close()
return
|
mjasher/gac
|
original_libraries/flopy-master/flopy/mt3d/mtrct.py
|
Python
|
gpl-2.0
| 5,665
|
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cloudinit.util as util
import cloudinit.SshUtil as sshutil
import os
import glob
import subprocess
DISABLE_ROOT_OPTS = "no-port-forwarding,no-agent-forwarding," \
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " \
"rather than the user \\\"root\\\".\';echo;sleep 10\""
def handle(_name, cfg, cloud, log, _args):
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
for f in glob.glob("/etc/ssh/ssh_host_*key*"):
try:
os.unlink(f)
except:
pass
if "ssh_keys" in cfg:
# if there are keys in cloud-config, use them
key2file = {
"rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
"rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
"dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
"dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
"ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
"ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
}
for key, val in cfg["ssh_keys"].items():
if key in key2file:
util.write_file(key2file[key][0], val, key2file[key][1])
priv2pub = {'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public',
'ecdsa_private': 'ecdsa_public', }
cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
for priv, pub in priv2pub.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
continue
pair = (key2file[priv][0], key2file[pub][0])
subprocess.call(('sh', '-xc', cmd % pair))
log.debug("generated %s from %s" % pair)
else:
# if not, generate them
for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes',
['rsa', 'dsa', 'ecdsa']):
keyfile = '/etc/ssh/ssh_host_%s_key' % keytype
if not os.path.exists(keyfile):
subprocess.call(['ssh-keygen', '-t', keytype, '-N', '',
'-f', keyfile])
util.restorecon_if_possible('/etc/ssh', recursive=True)
try:
user = util.get_cfg_option_str(cfg, 'user')
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
keys = cloud.get_public_ssh_keys()
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
apply_credentials(keys, user, disable_root, disable_root_opts, log)
except:
util.logexc(log)
log.warn("applying credentials failed!\n")
def apply_credentials(keys, user, disable_root,
disable_root_opts=DISABLE_ROOT_OPTS, log=None):
keys = set(keys)
if user:
sshutil.setup_user_keys(keys, user, '', log)
if disable_root:
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
sshutil.setup_user_keys(keys, 'root', key_prefix, log)
|
pwyliu/cloud-init-0.6.3
|
cloudinit/CloudConfig/cc_ssh.py
|
Python
|
gpl-3.0
| 4,064
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import string
from optparse import make_option
from django.db import transaction
from synnefo.lib.ordereddict import OrderedDict
from synnefo.quotas import util
from synnefo.quotas import enforce
from synnefo.quotas import errors
from snf_django.management.commands import SynnefoCommand, CommandError
from snf_django.management.utils import pprint_table
from collections import defaultdict
DEFAULT_RESOURCES = ["cyclades.cpu",
"cyclades.ram",
"cyclades.floating_ip",
]
DESTROY_RESOURCES = ["cyclades.vm",
"cyclades.total_cpu",
"cyclades.total_ram",
]
class Command(SynnefoCommand):
help = """Check and fix quota violations for Cyclades resources.
"""
command_option_list = (
make_option("--max-operations",
help="Limit operations per backend."),
make_option("--users", dest="users",
help=("Enforce resources only for the specified list "
"of users, e.g uuid1,uuid2")),
make_option("--exclude-users",
help=("Exclude list of users from resource enforcement")),
make_option("--projects",
help=("Enforce resources only for the specified list "
"of projects, e.g uuid1,uuid2")),
make_option("--exclude-projects",
help=("Exclude list of projects from resource enforcement")
),
make_option("--resources",
help="Specify resources to check, default: %s" %
",".join(DEFAULT_RESOURCES)),
make_option("--fix",
default=False,
action="store_true",
help="Fix violations"),
make_option("--force",
default=False,
action="store_true",
help=("Confirm actions that may permanently "
"remove a vm")),
make_option("--shutdown-timeout",
help="Force vm shutdown after given seconds."),
make_option("--remove-system-volumes",
default=False,
action="store_true",
help=("Allow removal of system volumes. This will also "
"remove the VM.")),
make_option("--cascade-remove",
default=False,
action="store_true",
help=("Allow removal of a VM which has additional "
"(non system) volumes attached. This will also "
"remove these volumes")),
)
def confirm(self):
self.stdout.write("Confirm? [y/N] ")
try:
response = raw_input()
except EOFError:
response = "ABORT"
if string.lower(response) not in ['y', 'yes']:
self.stderr.write("Aborted.\n")
exit()
def get_handlers(self, resources):
def rem(v):
try:
resources.remove(v)
return True
except ValueError:
return False
if resources is None:
resources = list(DEFAULT_RESOURCES)
else:
resources = resources.split(",")
handlers = [h for h in enforce.RESOURCE_HANDLING if rem(h[0])]
if resources:
m = "No such resource '%s'" % resources[0]
raise CommandError(m)
return handlers
@transaction.commit_on_success
def handle(self, *args, **options):
write = self.stderr.write
fix = options["fix"]
force = options["force"]
handlers = self.get_handlers(options["resources"])
maxops = options["max_operations"]
if maxops is not None:
try:
maxops = int(maxops)
except ValueError:
m = "Expected integer max operations."
raise CommandError(m)
shutdown_timeout = options["shutdown_timeout"]
if shutdown_timeout is not None:
try:
shutdown_timeout = int(shutdown_timeout)
except ValueError:
m = "Expected integer shutdown timeout."
raise CommandError(m)
remove_system_volumes = options["remove_system_volumes"]
cascade_remove = options["cascade_remove"]
excluded_users = options['exclude_users']
excluded_users = set(excluded_users.split(',')
if excluded_users is not None else [])
users_to_check = options['users']
if users_to_check is not None:
users_to_check = list(set(users_to_check.split(',')) -
excluded_users)
try:
qh_holdings = util.get_qh_users_holdings(users_to_check)
except errors.AstakosClientException as e:
raise CommandError(e)
excluded_projects = options["exclude_projects"]
excluded_projects = set(excluded_projects.split(',')
if excluded_projects is not None else [])
projects_to_check = options["projects"]
if projects_to_check is not None:
projects_to_check = list(set(projects_to_check.split(',')) -
excluded_projects)
try:
qh_project_holdings = util.get_qh_project_holdings(
projects_to_check)
except errors.AstakosClientException as e:
raise CommandError(e)
qh_project_holdings = sorted(qh_project_holdings.items())
qh_holdings = sorted(qh_holdings.items())
resources = set(h[0] for h in handlers)
dangerous = bool(resources.difference(DEFAULT_RESOURCES))
self.stderr.write("Checking resources %s...\n" %
",".join(list(resources)))
hopts = {"cascade_remove": cascade_remove,
"remove_system_volumes": remove_system_volumes,
}
opts = {"shutdown_timeout": shutdown_timeout}
actions = {}
overlimit = []
viol_id = 0
remains = defaultdict(list)
if users_to_check is None:
for resource, handle_resource, resource_type in handlers:
if resource_type not in actions:
actions[resource_type] = OrderedDict()
actual_resources = enforce.get_actual_resources(
resource_type, projects=projects_to_check)
for project, project_quota in qh_project_holdings:
if enforce.skip_check(project, projects_to_check,
excluded_projects):
continue
try:
qh = util.transform_project_quotas(project_quota)
qh_value, qh_limit, qh_pending = qh[resource]
except KeyError:
write("Resource '%s' does not exist in Quotaholder"
" for project '%s'!\n" %
(resource, project))
continue
if qh_pending:
write("Pending commission for project '%s', "
"resource '%s'. Skipping\n" %
(project, resource))
continue
diff = qh_value - qh_limit
if diff > 0:
viol_id += 1
overlimit.append((viol_id, "project", project, "",
resource, qh_limit, qh_value))
relevant_resources = enforce.pick_project_resources(
actual_resources[project], users=users_to_check,
excluded_users=excluded_users)
handle_resource(viol_id, resource, relevant_resources,
diff, actions, remains, options=hopts)
for resource, handle_resource, resource_type in handlers:
if resource_type not in actions:
actions[resource_type] = OrderedDict()
actual_resources = enforce.get_actual_resources(resource_type,
users_to_check)
for user, user_quota in qh_holdings:
if enforce.skip_check(user, users_to_check, excluded_users):
continue
for source, source_quota in user_quota.iteritems():
if enforce.skip_check(source, projects_to_check,
excluded_projects):
continue
try:
qh = util.transform_quotas(source_quota)
qh_value, qh_limit, qh_pending = qh[resource]
except KeyError:
write("Resource '%s' does not exist in Quotaholder"
" for user '%s' and source '%s'!\n" %
(resource, user, source))
continue
if qh_pending:
write("Pending commission for user '%s', source '%s', "
"resource '%s'. Skipping\n" %
(user, source, resource))
continue
diff = qh_value - qh_limit
if diff > 0:
viol_id += 1
overlimit.append((viol_id, "user", user, source,
resource, qh_limit, qh_value))
relevant_resources = actual_resources[source][user]
handle_resource(viol_id, resource, relevant_resources,
diff, actions, remains, options=hopts)
if not overlimit:
write("No violations.\n")
return
headers = ("#", "Type", "Holder", "Source", "Resource", "Limit",
"Usage")
pprint_table(self.stdout, overlimit, headers,
options["output_format"], title="Violations")
if any(actions.values()):
self.stdout.write("\n")
if fix:
if dangerous and not force:
write("You are enforcing resources that may permanently "
"remove a vm or volume.\n")
self.confirm()
write("Applying actions. Please wait...\n")
title = "Applied Actions" if fix else "Suggested Actions"
log = enforce.perform_actions(actions, maxops=maxops, fix=fix,
options=opts)
headers = ("Type", "ID", "State", "Backend", "Action", "Violation")
if fix:
headers += ("Result",)
pprint_table(self.stdout, log, headers,
options["output_format"], title=title)
def explain(resource):
if resource == "cyclades.disk":
if not remove_system_volumes:
return (", because this would need to remove system "
"volumes; if you want to do so, use the "
"--remove-system-volumes option:")
if not cascade_remove:
return (", because this would trigger the removal of "
"attached volumes, too; if you want to do "
"so, use the --cascade-remove option:")
elif resource in DESTROY_RESOURCES:
if not cascade_remove:
return (", because this would trigger the removal of "
"attached volumes, too; if you want to do "
"so, use the --cascade-remove option:")
return ":"
if remains:
self.stderr.write("\n")
for resource, viols in remains.iteritems():
self.stderr.write(
"The following violations for resource '%s' "
"could not be resolved%s\n"
% (resource, explain(resource)))
self.stderr.write(" %s\n" % ",".join(map(str, viols)))
|
Erethon/synnefo
|
snf-cyclades-app/synnefo/quotas/management/commands/enforce-resources-cyclades.py
|
Python
|
gpl-3.0
| 13,112
|
# Copyright (c) 2018, Ansible Project
# Copyright (c) 2018, Abhijeet Kasurde (akasurde@redhat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for DigitalOcean modules
DOCUMENTATION = '''
options:
oauth_token:
description:
- DigitalOcean OAuth token.
- "There are several other environment variables which can be used to provide this value."
- "i.e., - 'DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN' and 'OAUTH_TOKEN'"
required: false
timeout:
description:
- The timeout in seconds used for polling DigitalOcean's API.
default: 30
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
default: true
type: bool
'''
|
wrouesnel/ansible
|
lib/ansible/utils/module_docs_fragments/digital_ocean.py
|
Python
|
gpl-3.0
| 919
|
#!/usr/bin/env python2
import redis
import string
import random
KEYS_COUNT = 10000
def random_string(length):
return ''.join(random.choice(string.ascii_letters) for m in xrange(length))
def test():
r = redis.StrictRedis(host='localhost', port=6379, db=0)
for x in range(KEYS_COUNT / 2):
r.set(random_string(random.randint(1, 9)), random_string(random.randint(1, 9)))
for x in range(KEYS_COUNT / 2):
ns = random_string(random.randint(1, 9)) + ':' + random_string(random.randint(1, 9))
r.set(ns, random_string(random.randint(1, 9)))
test()
|
fastogt/fastonosql
|
tests/redis_test_many_keys.py
|
Python
|
gpl-3.0
| 588
|
from cjdnsadmin.cjdnsadmin import connect
cjdns = connect("127.0.0.1", 11234, "NONE")
print(cjdns.Sign_checkSig(
'test message',
'0ytl2njc1hy86tlxtc2zc3449up47uqb0u04kcy233d7zrn2cwh1_y96duzwpvmslj8b7pnk2b32m0rhs738yujwtrtlcq81r0u114svygwn56phn9yncpyzhswpj3bd808lgd5bknlj8xwf7purl0r0hc30'))
|
cjdelisle/cjdns
|
contrib/python3/sign_example.py
|
Python
|
gpl-3.0
| 297
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 SubDownloader Developers - See COPYING - GPLv3
import base64
import datetime
from http.client import CannotSendRequest
import io
import logging
import re
from socket import error as SocketError
import string
import time
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
from xmlrpc.client import ProtocolError, ServerProxy
from xml.parsers.expat import ExpatError
import zlib
from subdownloader.languages.language import Language, NotALanguageException, UnknownLanguage
from subdownloader.identification import ImdbIdentity, ProviderIdentities, SeriesIdentity, VideoIdentity
from subdownloader.movie import RemoteMovie
from subdownloader.provider.imdb import ImdbMovieMatch
from subdownloader.provider import window_iterator
from subdownloader.provider.provider import ProviderConnectionError, ProviderNotConnectedError, \
ProviderSettings, ProviderSettingsType, SubtitleProvider, SubtitleTextQuery, UploadResult
from subdownloader.subtitle2 import LocalSubtitleFile, RemoteSubtitleFile
from subdownloader.util import unzip_bytes, unzip_stream, write_stream
log = logging.getLogger('subdownloader.provider.opensubtitles')
class OpenSubtitlesProviderConnectionError(ProviderConnectionError):
CODE_MAP = {
200: _('OK'),
206: _('Partial content; message'),
301: _('Moved (host)'),
401: _('Unauthorized'),
402: _('Subtitle(s) have invalid format'),
403: _('Subtitle hashes (content and sent subhash) are not same!'),
404: _('Subtitles have invalid language!'),
405: _('Not all mandatory parameters were specified'),
406: _('No session'),
407: _('Download limit reached'),
408: _('Invalid parameters'),
409: _('Method not found'),
410: _('Other or unknown error'),
411: _('Empty or invalid useragent'),
412: _('%s has invalid format (reason)'),
413: _('Invalid IMDb ID'),
414: _('Unknown User Agent'),
415: _('Disabled user agent'),
416: _('Internal subtitle validation failed'),
429: _('Too many requests'),
503: _('Service unavailable'),
506: _('Server under maintenance'),
}
def __init__(self, code, message, extra_data=None):
self._code = code
if self._code:
try:
msg = '{} {}'.format(self._code, self.CODE_MAP[self._code])
except TypeError:
self._code = None
msg = '{} {}'.format(self._code, message)
else:
msg = message
ProviderConnectionError.__init__(self, msg, extra_data=extra_data)
def get_code(self):
return self._code
class OpenSubtitles(SubtitleProvider):
URL = 'http://api.opensubtitles.org/xml-rpc'
def __init__(self, settings=None):
SubtitleProvider.__init__(self)
self._xmlrpc = None
self._token = None
self._last_time = None
if settings is None:
settings = OpenSubtitlesSettings()
self._settings = settings
def get_settings(self):
return self._settings
def set_settings(self, settings):
if self.connected():
raise RuntimeError('Cannot set settings while connected') # FIXME: change error
self._settings = settings
def connect(self):
log.debug('connect()')
if self.connected():
return
self._xmlrpc = ServerProxy(self.URL, allow_none=False)
self._last_time = time.time()
def disconnect(self):
log.debug('disconnect()')
if self.logged_in():
self.logout()
if self.connected():
self._xmlrpc = None
def connected(self):
return self.logged_in()
def login(self):
log.debug('login()')
if self.logged_in():
return
if not self.connected():
self.connect()
def login_query():
# FIXME: 'en' language ok??? or '' as in the original
return self._xmlrpc.LogIn(str(self._settings.username), str(self._settings.password),
'en', str(self._settings.get_user_agent()))
result = self._safe_exec(login_query, None)
self.check_result(result)
self._token = result['token']
def logout(self):
log.debug('logout()')
if self.logged_in():
def logout_query():
return self._xmlrpc.LogOut(self._token)
# Do no check result of this call. Assume connection closed.
self._safe_exec(logout_query, None)
self._token = None
def logged_in(self):
return self._token is not None
def reestablish(self):
log.debug('reestablish()')
connected = self.connected()
logged_in = self.logged_in()
self.disconnect()
if connected:
self.connect()
if logged_in:
self.login()
_TIMEOUT_MS = 60000
def _ensure_connection(self):
now = time.time()
if now - time.time() > self._TIMEOUT_MS:
self.reestablish()
self._last_time = now
SEARCH_LIMIT = 500
def search_videos(self, videos, callback, languages=None):
log.debug('search_videos(#videos={})'.format(len(videos)))
if not self.logged_in():
raise ProviderNotConnectedError()
lang_str = self._languages_to_str(languages)
window_size = 5
callback.set_range(0, (len(videos) + (window_size - 1)) // window_size)
remote_subtitles = []
for window_i, video_window in enumerate(window_iterator(videos, window_size)):
callback.update(window_i)
if callback.canceled():
break
queries = []
hash_video = {}
for video in video_window:
query = {
'sublanguageid': lang_str,
'moviehash': video.get_osdb_hash(),
'moviebytesize': str(video.get_size()),
}
if video.get_osdb_hash() is None:
log.debug('osdb hash of "{}" is empty -> skip'.format(video.get_filepath()))
self._signal_connection_failed() # FIXME: other name + general signaling
continue
queries.append(query)
hash_video[video.get_osdb_hash()] = video
def run_query():
return self._xmlrpc.SearchSubtitles(self._token, queries, {'limit': self.SEARCH_LIMIT})
result = self._safe_exec(run_query, None)
self.check_result(result)
if result is None:
continue
for rsub_raw in result['data']:
try:
remote_filename = rsub_raw['SubFileName']
remote_file_size = int(rsub_raw['SubSize'])
remote_id = rsub_raw['IDSubtitleFile']
remote_md5_hash = rsub_raw['SubHash']
remote_download_link = rsub_raw['SubDownloadLink']
remote_link = rsub_raw['SubtitlesLink']
remote_uploader = rsub_raw['UserNickName'].strip()
remote_language_raw = rsub_raw['SubLanguageID']
try:
remote_language = Language.from_unknown(remote_language_raw,
xx=True, xxx=True)
except NotALanguageException:
remote_language = UnknownLanguage(remote_language_raw)
remote_rating = float(rsub_raw['SubRating'])
remote_date = datetime.datetime.strptime(rsub_raw['SubAddDate'], '%Y-%m-%d %H:%M:%S')
remote_subtitle = OpenSubtitlesSubtitleFile(
filename=remote_filename,
file_size=remote_file_size,
md5_hash=remote_md5_hash,
id_online=remote_id,
download_link=remote_download_link,
link=remote_link,
uploader=remote_uploader,
language=remote_language,
rating=remote_rating,
date=remote_date,
)
movie_hash = '{:>016}'.format(rsub_raw['MovieHash'])
video = hash_video[movie_hash]
imdb_id = rsub_raw['IDMovieImdb']
try:
imdb_rating = float(rsub_raw['MovieImdbRating'])
except (ValueError, KeyError):
imdb_rating = None
imdb_identity = ImdbIdentity(imdb_id=imdb_id, imdb_rating=imdb_rating)
video_name = rsub_raw['MovieName']
try:
video_year = int(rsub_raw['MovieYear'])
except (ValueError, KeyError):
video_year = None
video_identity = VideoIdentity(name=video_name, year=video_year)
try:
series_season = int(rsub_raw['SeriesSeason'])
except (KeyError, ValueError):
series_season = None
try:
series_episode = int(rsub_raw['SeriesEpisode'])
except (KeyError, ValueError):
series_episode = None
series_identity = SeriesIdentity(season=series_season, episode=series_episode)
identity = ProviderIdentities(video_identity=video_identity, imdb_identity=imdb_identity,
episode_identity=series_identity, provider=self)
video.add_subtitle(remote_subtitle)
video.add_identity(identity)
remote_subtitles.append(remote_subtitle)
except (KeyError, ValueError):
log.exception('Error parsing result of SearchSubtitles(...)')
log.error('Offending query is: {queries}'.format(queries=queries))
log.error('Offending result is: {remote_sub}'.format(remote_sub=rsub_raw))
callback.finish()
return remote_subtitles
def query_text(self, query):
return OpenSubtitlesTextQuery(query=query)
def download_subtitles(self, os_rsubs):
log.debug('download_subtitles()')
if not self.logged_in():
raise ProviderNotConnectedError()
window_size = 20
map_id_data = {}
for window_i, os_rsub_window in enumerate(window_iterator(os_rsubs, window_size)):
query = [subtitle.get_id_online() for subtitle in os_rsub_window]
def run_query():
return self._xmlrpc.DownloadSubtitles(self._token, query)
result = self._safe_exec(run_query, None)
self.check_result(result)
map_id_data.update({item['idsubtitlefile']: item['data'] for item in result['data']})
subtitles = [unzip_bytes(base64.b64decode(map_id_data[os_rsub.get_id_online()])).read() for os_rsub in os_rsubs]
return subtitles
def upload_subtitles(self, local_movie):
log.debug('upload_subtitles()')
if not self.logged_in():
raise ProviderNotConnectedError()
video_subtitles = list(local_movie.iter_video_subtitles())
if not video_subtitles:
return UploadResult(type=UploadResult.Type.MISSINGDATA, reason=_('Need at least one subtitle to upload'))
query_try = dict()
for sub_i, (video, subtitle) in enumerate(video_subtitles):
if not video:
return UploadResult(type=UploadResult.Type.MISSINGDATA, reason=_('Each subtitle needs an accompanying video'))
query_try['cd{}'.format(sub_i+1)] = {
'subhash': subtitle.get_md5_hash(),
'subfilename': subtitle.get_filename(),
'moviehash': video.get_osdb_hash(),
'moviebytesize': str(video.get_size()),
'moviefps': str(video.get_fps()) if video.get_fps() else None,
'movieframes': str(video.get_framecount()) if video.get_framecount() else None,
'moviefilename': video.get_filename(),
}
def run_query_try_upload():
return self._xmlrpc.TryUploadSubtitles(self._token, query_try)
try_result = self._safe_exec(run_query_try_upload, None)
self.check_result(try_result)
if int(try_result['alreadyindb']):
return UploadResult(type=UploadResult.Type.DUPLICATE, reason=_('Subtitle is already in database'))
if local_movie.get_imdb_id() is None:
return UploadResult(type=UploadResult.Type.MISSINGDATA, reason=_('Need IMDb id'))
upload_base_info = {
'idmovieimdb': local_movie.get_imdb_id(),
}
if local_movie.get_comments() is not None:
upload_base_info['subauthorcomment'] = local_movie.get_comments()
if not local_movie.get_language().is_generic():
upload_base_info['sublanguageid'] = local_movie.get_language().xxx()
if local_movie.get_release_name() is not None:
upload_base_info['moviereleasename'] = local_movie.get_release_name()
if local_movie.get_movie_name() is not None:
upload_base_info['movieaka'] = local_movie.get_movie_name()
if local_movie.is_hearing_impaired() is not None:
upload_base_info['hearingimpaired'] = local_movie.is_hearing_impaired()
if local_movie.is_high_definition() is not None:
upload_base_info['highdefinition'] = local_movie.is_high_definition()
if local_movie.is_automatic_translation() is not None:
upload_base_info['automatictranslation'] = local_movie.is_automatic_translation()
if local_movie.get_author() is not None:
upload_base_info['subtranslator'] = local_movie.get_author()
if local_movie.is_foreign_only() is not None:
upload_base_info['foreignpartsonly'] = local_movie.is_foreign_only()
query_upload = {
'baseinfo': upload_base_info,
}
for sub_i, (video, subtitle) in enumerate(video_subtitles):
sub_bytes = subtitle.get_filepath().open(mode='rb').read()
sub_tx_data = base64.b64encode(zlib.compress(sub_bytes)).decode()
query_upload['cd{}'.format(sub_i+1)] = {
'subhash': subtitle.get_md5_hash(),
'subfilename': subtitle.get_filename(),
'moviehash': video.get_osdb_hash(),
'moviebytesize': str(video.get_size()),
'movietimems': str(video.get_time_ms()) if video.get_time_ms() else None,
'moviefps': str(video.get_fps()) if video.get_fps() else None,
'movieframes': str(video.get_framecount()) if video.get_framecount() else None,
'moviefilename': video.get_filename(),
'subcontent': sub_tx_data,
}
def run_query_upload():
return self._xmlrpc.UploadSubtitles(self._token, query_upload)
result = self._safe_exec(run_query_upload, None)
self.check_result(result)
rsubs = []
for sub_data in result['data']:
filename = sub_data['SubFileName']
file_size = sub_data['SubSize']
md5_hash = sub_data['SubHash']
id_online = sub_data['IDSubMOvieFile']
download_link = sub_data['SubDownloadLink']
link = None
uploader = sub_data['UserNickName']
language = Language.from_xxx(sub_data['SubLanguageID'])
rating = float(sub_data['SubRating'])
add_date = datetime.datetime.strptime(sub_data['SubAddDate'], '%Y-%m-%d %H:%M:%S')
sub = OpenSubtitlesSubtitleFile(
filename=filename, file_size=file_size, md5_hash=md5_hash, id_online=id_online,
download_link=download_link, link=link, uploader=uploader, language=language,
rating=rating, date=add_date)
rsubs.append(sub)
return UploadResult(type=UploadResult.Type.OK, rsubs=rsubs)
def imdb_search_title(self, title):
self._ensure_connection()
def run_query():
return self._xmlrpc.SearchMoviesOnIMDB(self._token, title.strip())
result = self._safe_exec(run_query, default=None)
self.check_result(result)
imdbs = []
re_title = re.compile(r'(?P<title>.*) \((?P<year>[0-9]+)\)')
for imdb_data in result['data']:
imdb_id = imdb_data['id']
if all(c in string.digits for c in imdb_id):
imdb_id = 'tt{}'.format(imdb_id)
m = re_title.match(imdb_data['title'])
if m:
imdb_title = m['title']
imdb_year = int(m['year'])
else:
imdb_title = imdb_data['title']
imdb_year = None
imdbs.append(ImdbMovieMatch(imdb_id=imdb_id, title=imdb_title, year=imdb_year))
return imdbs
def ping(self):
log.debug('ping()')
if not self.logged_in():
raise ProviderNotConnectedError()
def run_query():
return self._xmlrpc.NoOperation(self._token)
result = self._safe_exec(run_query, None)
self.check_result(result)
def provider_info(self):
if self.connected():
def run_query():
return self._xmlrpc.ServerInfo()
result = self._safe_exec(run_query, None)
data = [
(_('XML-RPC version'), result['xmlrpc_version']),
(_('XML-RPC url'), result['xmlrpc_url']),
(_('Application'), result['application']),
(_('Contact'), result['contact']),
(_('Website url'), result['website_url']),
(_('Users online'), result['users_online_total']),
(_('Programs online'), result['users_online_program']),
(_('Users logged in'), result['users_loggedin']),
(_('Max users online'), result['users_max_alltime']),
(_('Users registered'), result['users_registered']),
(_('Subtitles downloaded'), result['subs_downloads']),
(_('Subtitles available'), result['subs_subtitle_files']),
(_('Number movies'), result['movies_total']),
(_('Number languages'), result['total_subtitles_languages']),
(_('Client IP'), result['download_limits']['client_ip']),
(_('24h global download limit'), result['download_limits']['global_24h_download_limit']),
(_('24h client download limit'), result['download_limits']['client_24h_download_limit']),
(_('24h client download count'), result['download_limits']['client_24h_download_count']),
(_('Client download quota'), result['download_limits']['client_download_quota']),
]
else:
data = []
return data
@staticmethod
def _languages_to_str(languages):
if languages:
lang_str = ','.join([language.xxx() for language in languages])
else:
lang_str = 'all'
return lang_str
@classmethod
def get_name(cls):
return 'opensubtitles'
@classmethod
def get_short_name(cls):
return 'os'
@classmethod
def get_icon(cls):
return ':/images/sites/opensubtitles.png'
def _signal_connection_failed(self):
# FIXME: set flag/... to signal users that the connection has failed
pass
def _safe_exec(self, query, default):
self._ensure_connection()
try:
result = query()
return result
except (ProtocolError, CannotSendRequest, SocketError, ExpatError) as e:
self._signal_connection_failed()
log.debug('Query failed: {} {}'.format(type(e), e.args))
return default
STATUS_CODE_RE = re.compile(r'(\d+) (.+)')
@classmethod
def check_result(cls, data):
log.debug('check_result(<data>)')
if data is None:
log.warning('data is None ==> FAIL')
raise OpenSubtitlesProviderConnectionError(None, _('No message'))
log.debug('checking presence of "status" in result ...')
if 'status' not in data:
log.debug('... no "status" in result ==> assuming SUCCESS')
return
log.debug('... FOUND')
status = data['status']
log.debug('result["status"]="{status}"'.format(status=status))
log.debug('applying regex to status ...')
try:
code, message = cls.STATUS_CODE_RE.match(status).groups()
log.debug('... regex SUCCEEDED')
code = int(code)
except (AttributeError, ValueError):
log.debug('... regex FAILED')
log.warning('Got unexpected status="{status}" from server.'.format(status=status))
log.debug('Checking for presence of "200" ...')
if '200' not in data['status']:
log.debug('... FAIL. Raising ProviderConnectionError.')
raise OpenSubtitlesProviderConnectionError(
None,
_('Server returned status="{status}". Expected "200 OK".').format(status=data['status']),
data['status'])
log.debug('... SUCCESS')
code, message = 200, 'OK'
log.debug('Checking code={code} ...'.format(code=code))
if code != 200:
log.debug('... FAIL. Raising ProviderConnectionError.')
raise OpenSubtitlesProviderConnectionError(code, message)
log.debug('... SUCCESS.')
log.debug('check_result() finished (data is ok)')
class OpenSubtitlesTextQuery(SubtitleTextQuery):
def get_movies(self):
return self._movies
def get_nb_movies_online(self):
return self._total
def more_movies_available(self):
if self._total is None:
return True
return len(self._movies) < self._total
def __init__(self, query):
SubtitleTextQuery.__init__(self, query)
self._movies = []
self._total = None
def search_online(self):
raise NotImplementedError()
@staticmethod
def _safe_exec(query, default):
try:
result = query()
return result
except HTTPError as e:
log.debug('Query failed: {} {}'.format(type(e), e.args))
return default
def search_more_movies(self):
if not self.more_movies_available():
return []
xml_url = 'http://www.opensubtitles.org/en/search2/moviename-{text_quoted}/offset-{offset}/xml'.format(
offset=len(self._movies),
text_quoted=quote(self.query))
xml_page = self._fetch_url(xml_url)
if xml_page is None:
raise OpenSubtitlesProviderConnectionError(None, 'Failed to fetch XML page at {!r}'.format(xml_url))
movies, nb_so_far, nb_provider = self._xml_to_movies(xml_page)
if movies is None:
raise OpenSubtitlesProviderConnectionError(None, 'Failed to extract movies from data at {!r}'.format(xml_url))
self._total = nb_provider
self._movies.extend(movies)
if len(self._movies) != nb_so_far:
log.warning('Provider told us it returned {nb_so_far} movies. '
'Yet we only extracted {nb_local} movies.'.format(
nb_so_far=nb_so_far, nb_local=len(movies)))
return movies
def search_more_subtitles(self, movie):
if movie.get_nb_subs_available() == movie.get_nb_subs_total():
return None
xml_url = 'http://www.opensubtitles.org{provider_link}/offset-{offset}/xml'.format(
provider_link=movie.get_provider_link(),
offset=movie.get_nb_subs_available())
xml_contents = self._fetch_url(xml_url)
if xml_contents is None:
raise OpenSubtitlesProviderConnectionError(None, 'Failed to fetch url {url}'.format(url=xml_url))
subtitles, nb_so_far, nb_provider = self._xml_to_subtitles(xml_contents)
if subtitles is None:
raise OpenSubtitlesProviderConnectionError(None, 'Failed to load subtitles from xml at {!r}'.format(xml_url))
movie.add_subtitles(subtitles)
return subtitles
def _xml_to_movies(self, xml):
subtitle_entries, nb_so_far, nb_provider = self._extract_subtitle_entries(xml)
if subtitle_entries is None:
return None, None, None
movies = []
for subtitle_entry in subtitle_entries:
try:
ads_entries = subtitle_entry.getElementsByTagName('ads1')
if ads_entries:
continue
def try_get_firstchild_data(key, default):
try:
return subtitle_entry.getElementsByTagName(key)[0].firstChild.data
except (AttributeError, IndexError):
return default
movie_id_entries = subtitle_entry.getElementsByTagName('MovieID')
movie_id = movie_id_entries[0].firstChild.data
movie_id_link = movie_id_entries[0].getAttribute('Link')
# movie_thumb = subtitle_entry.getElementsByTagName('MovieThumb')[0].firstChild.data
# link_use_next = subtitle_entry.getElementsByTagName('LinkUseNext')[0].firstChild.data
# link_zoozle = subtitle_entry.getElementsByTagName('LinkZoozle')[0].firstChild.data
# link_boardreader = subtitle_entry.getElementsByTagName('LinkBoardreader')[0].firstChild.data
movie_name = try_get_firstchild_data('MovieName', None)
movie_year = try_get_firstchild_data('MovieYear', None)
try:
movie_imdb_rating = float(
subtitle_entry.getElementsByTagName('MovieImdbRating')[0].getAttribute('Percent')) / 10
except AttributeError:
movie_imdb_rating = None
# try:
# movie_imdb_link = movie_id_entries[0].getAttribute('LinkImdb')
# except AttributeError:
# movie_imdb_link = None
movie_imdb_id = try_get_firstchild_data('MovieImdbID', None)
subs_total = int(subtitle_entry.getElementsByTagName('TotalSubs')[0].firstChild.data)
# newest = subtitle_entry.getElementsByTagName('Newest')[0].firstChild.data
imdb_identity = ImdbIdentity(imdb_id=movie_imdb_id, imdb_rating=movie_imdb_rating)
video_identity = VideoIdentity(name=movie_name, year=movie_year)
identity = ProviderIdentities(video_identity=video_identity, imdb_identity=imdb_identity, provider=self)
movie = RemoteMovie(
subtitles_nb_total=subs_total, provider_link=movie_id_link, provider_id=movie_id,
identities=identity
)
movies.append(movie)
except (AttributeError, IndexError, ValueError) as e:
log.warning('subtitle_entry={}'.format(subtitle_entry.toxml()))
log.warning('XML entry has invalid format: {} {}'.format(type(e), e.args))
return movies, nb_so_far, nb_provider
@staticmethod
def cleanup_string(name, alt='_'):
valid = string.ascii_letters + string.digits
name = name.strip()
return ''.join(c if c in valid else alt for c in name)
def _xml_to_subtitles(self, xml):
subtitle_entries, nb_so_far, nb_provider = self._extract_subtitle_entries(xml)
if subtitle_entries is None:
return None, None, None
subtitles = []
for subtitle_entry in subtitle_entries:
try:
ads_entries = subtitle_entry.getElementsByTagName('ads1') or subtitle_entry.getElementsByTagName('ads2')
if ads_entries:
continue
def try_get_first_child_data(key, default):
try:
return subtitle_entry.getElementsByTagName(key)[0].firstChild.data
except (AttributeError, IndexError):
return default
subtitle_id_entry = subtitle_entry.getElementsByTagName('IDSubtitle')[0]
# subtitle_id = subtitle_id_entry.firstChild.data
subtitle_link = 'http://www.opensubtitles.org' + subtitle_id_entry.getAttribute('Link')
subtitle_uuid = subtitle_id_entry.getAttribute('uuid')
subtitlefile_id = subtitle_entry.getElementsByTagName('IDSubtitleFile')[0].firstChild.data
user_entry = subtitle_entry.getElementsByTagName('UserID')[0]
user_id = int(user_entry.firstChild.data)
# user_link = 'http://www.opensubtitles.org' + user_entry.getAttribute('Link')
user_nickname = try_get_first_child_data('UserNickName', None)
# comment = try_get_first_child_data(''SubAuthorComment', None)
language_entry = subtitle_entry.getElementsByTagName('ISO639')[0]
language_iso639 = language_entry.firstChild.data
# language_link_search = 'http://www.opensubtitles.org' + language_entry.getAttribute('LinkSearch')
# language_flag = 'http:' + language_entry.getAttribute('flag')
# language_name = try_get_first_child_data('LanguageName', None)
subtitle_format = try_get_first_child_data('SubFormat', 'srt')
# subtitle_nbcds = int(try_get_first_child_data('SubSumCD', -1))
subtitle_add_date_locale = subtitle_entry.getElementsByTagName('SubAddDate')[0].getAttribute('locale')
subtitle_add_date = datetime.datetime.strptime(subtitle_add_date_locale, '%d/%m/%Y %H:%M:%S')
# subtitle_bad = int(subtitle_entry.getElementsByTagName('SubBad')[0].firstChild.data)
subtitle_rating = float(subtitle_entry.getElementsByTagName('SubRating')[0].firstChild.data)
subtitle_file_size = int(subtitle_entry.getElementsByTagName('SubSize')[0].firstChild.data)
# download_count = int(try_get_first_child_data('SubDownloadsCnt', -1))
# subtitle_movie_aka = try_get_first_child_data('SubMovieAka', None)
# subtitle_comments = int(try_get_first_child_data('SubComments', -1))
# subtitle_total = int(try_get_first_child_data('TotalSubs', -1)) #PRESENT?
# subtitle_newest = try_get_first_child_data('Newest', None) #PRESENT?
language = Language.from_xx(language_iso639)
movie_release_name = try_get_first_child_data('MovieReleaseName', None)
if movie_release_name is None:
movie_release_name = try_get_first_child_data('MovieName', None)
if movie_release_name is None:
log.warning('Skipping subtitle: no movie release name or movie name')
continue
movie_release_name = self.cleanup_string(movie_release_name)
filename = '{}.{}'.format(movie_release_name, subtitle_format)
download_link = None # 'https://www.opensubtitles.org/en/subtitleserve/sub/{}'.format(subtitle_id)
if user_nickname:
uploader = user_nickname
elif user_id != 0:
uploader = str(user_id)
else:
uploader = None
subtitle = OpenSubtitlesSubtitleFile(filename=filename, file_size=subtitle_file_size,
md5_hash=subtitle_uuid, id_online=subtitlefile_id,
download_link=download_link, link=subtitle_link, uploader=uploader,
language=language, rating=subtitle_rating, date=subtitle_add_date)
subtitles.append(subtitle)
except (AttributeError, IndexError, ValueError) as e:
log.warning('subtitle_entry={}'.format(subtitle_entry.toxml()))
log.warning('XML entry has invalid format: {} {}'.format(type(e), e.args))
return subtitles, nb_so_far, nb_provider
@staticmethod
def _extract_subtitle_entries(raw_xml):
entries = []
nb_so_far = 0
nb_total = 0
from xml.dom import minidom
import xml.parsers.expat
log.debug('extract_subtitle_entries() ...')
try:
# FIXME: use xpath
dom = minidom.parseString(raw_xml)
opensubtitles_entries = dom.getElementsByTagName('opensubtitles')
for opensubtitles_entry in opensubtitles_entries:
results_entries = opensubtitles_entry.getElementsByTagName('results')
for results_entry in results_entries:
try:
nb_so_far = int(results_entry.getAttribute('items'))
nb_total = int(results_entry.getAttribute('itemsfound'))
entries = results_entry.getElementsByTagName('subtitle')
break
except ValueError:
continue
if entries is None:
log.debug('... extraction FAILED: no entries found, maybe no subtitles on page!')
else:
log.debug('... extraction SUCCESS')
except (AttributeError, ValueError, xml.parsers.expat.ExpatError) as e:
log.debug('... extraction FAILED (xml error): {} {}'.format(type(e), e.args))
nb_so_far = None
entries = None
return entries, nb_so_far, nb_total
@staticmethod
def _fetch_url(url):
try:
log.debug('Fetching data from {}...'.format(url))
page = urlopen(url).read()
log.debug('... SUCCESS')
except HTTPError as e:
log.debug('... FAILED: {} {}'.format(type(e), e.args))
return None
return page
DEFAULT_USER_AGENT = ''
def set_default_user_agent(user_agent):
global DEFAULT_USER_AGENT
DEFAULT_USER_AGENT = user_agent
class OpenSubtitlesSettings(ProviderSettings):
def __init__(self, username='', password='', user_agent=None):
ProviderSettings.__init__(self)
self._username = username
self._password = password
self._user_agent = DEFAULT_USER_AGENT if user_agent is None else user_agent
@property
def username(self):
return self._username
@property
def password(self):
return self._password
@classmethod
def load(cls, username, password):
return cls(username=str(username), password=str(password))
def as_dict(self):
return {
'username': self._username,
'password': self._password,
}
@staticmethod
def key_types():
return {
'username': ProviderSettingsType.String,
'password': ProviderSettingsType.Password,
}
def get_user_agent(self):
return self._user_agent
class OpenSubtitlesSubtitleFile(RemoteSubtitleFile):
def __init__(self, filename, file_size, md5_hash, id_online, download_link,
link, uploader, language, rating, date):
RemoteSubtitleFile.__init__(self, filename=filename, file_size=file_size, language=language, md5_hash=md5_hash)
self._id_online = id_online
self._download_link = download_link
self._link = link
self._uploader = uploader
self._rating = rating
self._date = date
def get_id_online(self):
return self._id_online
def get_uploader(self):
return self._uploader
def get_rating(self):
return self._rating
def get_link(self):
return self._link
def get_age(self):
return self._age
def get_provider(self):
return OpenSubtitles
def download(self, target_path, provider_instance, callback):
if self._download_link is None:
stream = self._download_service(provider_instance)
else:
stream = unzip_stream(self._download_http())
write_stream(src_file=stream, destination_path=target_path)
local_sub = LocalSubtitleFile(filepath=target_path)
return local_sub
def _download_service(self, provider_instance):
subs = provider_instance.download_subtitles([self])
return io.BytesIO(subs[0])
def _download_http(self):
sub_stream = urlopen(self._download_link)
return sub_stream
providers = OpenSubtitles,
|
subdownloader/subdownloader
|
subdownloader/provider/opensubtitles.py
|
Python
|
gpl-3.0
| 37,191
|
## \example modeller/imp_restraints_in_modeller.py
# This demonstrates using IMP.Restraints as additional energy terms in the
# Modeller scoring function, so that IMP scoring terms can be incorporated into
# existing comparative modeling pipelines.
#
import modeller
import IMP
import IMP.core
import IMP.modeller
import sys
IMP.setup_from_argv(sys.argv, "IMP restraints in Modeller")
# Set up Modeller and build a model from the GGCC primary sequence
e = modeller.environ()
e.edat.dynamic_sphere = False
e.libs.topology.read('${LIB}/top_heav.lib')
e.libs.parameters.read('${LIB}/par.lib')
modmodel = modeller.model(e)
modmodel.build_sequence('GGCC')
# Set up IMP and load the Modeller model in as a new Hierarchy
m = IMP.Model()
protein = IMP.modeller.ModelLoader(modmodel).load_atoms(m)
# Create a simple IMP distance restraint between the first and last atoms
atoms = IMP.atom.get_by_type(protein, IMP.atom.ATOM_TYPE)
r = IMP.core.DistanceRestraint(m, IMP.core.Harmonic(10.0, 1.0),
atoms[0].get_particle(),
atoms[-1].get_particle())
sf = IMP.core.RestraintsScoringFunction([r])
# Use the IMPRestraints class to add this IMP scoring function to the
# Modeller scoring function
t = modmodel.env.edat.energy_terms
t.append(IMP.modeller.IMPRestraints(atoms, sf))
# Calculate the Modeller energy (score) for the whole protein
sel = modeller.selection(modmodel)
sel.energy()
|
shanot/imp
|
modules/modeller/examples/imp_restraints_in_modeller.py
|
Python
|
gpl-3.0
| 1,441
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# $URI:$
__version__=''' $Id$ '''
__doc__='''Gazillions of miscellaneous internal utility functions'''
import os, sys, imp, time
try:
from hashlib import md5
except:
from md5 import md5
from reportlab.lib.logger import warnOnce
from rltempfile import get_rl_tempfile, get_rl_tempdir, _rl_getuid
def isSeqType(v,_st=(tuple,list)):
return isinstance(v,_st)
if sys.hexversion<0x2030000:
True = 1
False = 0
if sys.hexversion >= 0x02000000:
def _digester(s):
return md5(s).hexdigest()
else:
# hexdigest not available in 1.5
def _digester(s):
return join(map(lambda x : "%02x" % ord(x), md5(s).digest()), '')
def _findFiles(dirList,ext='.ttf'):
from os.path import isfile, isdir, join as path_join
from os import listdir
ext = ext.lower()
R = []
A = R.append
for D in dirList:
if not isdir(D): continue
for fn in listdir(D):
fn = path_join(D,fn)
if isfile(fn) and (not ext or fn.lower().endswith(ext)): A(fn)
return R
try:
_UserDict = dict
except:
from UserDict import UserDict as _UserDict
class CIDict(_UserDict):
def __init__(self,*args,**kwds):
for a in args: self.update(a)
self.update(kwds)
def update(self,D):
for k,v in D.items(): self[k] = v
def __setitem__(self,k,v):
try:
k = k.lower()
except:
pass
_UserDict.__setitem__(self,k,v)
def __getitem__(self,k):
try:
k = k.lower()
except:
pass
return _UserDict.__getitem__(self,k)
def __delitem__(self,k):
try:
k = k.lower()
except:
pass
return _UserDict.__delitem__(self,k)
def get(self,k,dv=None):
try:
return self[k]
except KeyError:
return dv
def __contains__(self,k):
try:
self[k]
return True
except:
return False
def pop(self,k,*a):
try:
k = k.lower()
except:
pass
return _UserDict.pop(*((self,k)+a))
def setdefault(self,k,*a):
try:
k = k.lower()
except:
pass
return _UserDict.setdefault(*((self,k)+a))
if os.name == 'mac':
#with the Mac, we need to tag the file in a special
#way so the system knows it is a PDF file.
#This supplied by Joe Strout
import macfs, macostools
_KNOWN_MAC_EXT = {
'BMP' : ('ogle','BMP '),
'EPS' : ('ogle','EPSF'),
'EPSF': ('ogle','EPSF'),
'GIF' : ('ogle','GIFf'),
'JPG' : ('ogle','JPEG'),
'JPEG': ('ogle','JPEG'),
'PCT' : ('ttxt','PICT'),
'PICT': ('ttxt','PICT'),
'PNG' : ('ogle','PNGf'),
'PPM' : ('ogle','.PPM'),
'TIF' : ('ogle','TIFF'),
'TIFF': ('ogle','TIFF'),
'PDF' : ('CARO','PDF '),
'HTML': ('MSIE','TEXT'),
}
def markfilename(filename,creatorcode=None,filetype=None,ext='PDF'):
try:
if creatorcode is None or filetype is None and ext is not None:
try:
creatorcode, filetype = _KNOWN_MAC_EXT[ext.upper()]
except:
return
macfs.FSSpec(filename).SetCreatorType(creatorcode,filetype)
macostools.touched(filename)
except:
pass
else:
def markfilename(filename,creatorcode=None,filetype=None):
pass
import reportlab
__RL_DIR=os.path.dirname(reportlab.__file__) #possibly relative
_RL_DIR=os.path.isabs(__RL_DIR) and __RL_DIR or os.path.abspath(__RL_DIR)
del reportlab
#Attempt to detect if this copy of reportlab is running in a
#file system (as opposed to mostly running in a zip or McMillan
#archive or Jar file). This is used by test cases, so that
#we can write test cases that don't get activated in a compiled
try:
__file__
except:
__file__ = sys.argv[0]
import glob, fnmatch
try:
_isFSD = not __loader__
_archive = os.path.normcase(os.path.normpath(__loader__.archive))
_archivepfx = _archive + os.sep
_archivedir = os.path.dirname(_archive)
_archivedirpfx = _archivedir + os.sep
_archivepfxlen = len(_archivepfx)
_archivedirpfxlen = len(_archivedirpfx)
def __startswith_rl(fn,
_archivepfx=_archivepfx,
_archivedirpfx=_archivedirpfx,
_archive=_archive,
_archivedir=_archivedir,
os_path_normpath=os.path.normpath,
os_path_normcase=os.path.normcase,
os_getcwd=os.getcwd,
os_sep=os.sep,
os_sep_len = len(os.sep)):
'''if the name starts with a known prefix strip it off'''
fn = os_path_normpath(fn.replace('/',os_sep))
nfn = os_path_normcase(fn)
if nfn in (_archivedir,_archive): return 1,''
if nfn.startswith(_archivepfx): return 1,fn[_archivepfxlen:]
if nfn.startswith(_archivedirpfx): return 1,fn[_archivedirpfxlen:]
cwd = os_path_normcase(os_getcwd())
n = len(cwd)
if nfn.startswith(cwd):
if fn[n:].startswith(os_sep): return 1, fn[n+os_sep_len:]
if n==len(fn): return 1,''
return not os.path.isabs(fn),fn
def _startswith_rl(fn):
return __startswith_rl(fn)[1]
def rl_glob(pattern,glob=glob.glob,fnmatch=fnmatch.fnmatch, _RL_DIR=_RL_DIR,pjoin=os.path.join):
c, pfn = __startswith_rl(pattern)
r = glob(pfn)
if c or r==[]:
r += map(lambda x,D=_archivepfx,pjoin=pjoin: pjoin(_archivepfx,x),filter(lambda x,pfn=pfn,fnmatch=fnmatch: fnmatch(x,pfn),__loader__._files.keys()))
return r
except:
_isFSD = os.path.isfile(__file__) #slight risk of wrong path
__loader__ = None
def _startswith_rl(fn):
return fn
def rl_glob(pattern,glob=glob.glob):
return glob(pattern)
del glob, fnmatch
_isFSSD = _isFSD and os.path.isfile(os.path.splitext(__file__)[0] +'.py')
def isFileSystemDistro():
'''return truth if a file system distribution'''
return _isFSD
def isCompactDistro():
'''return truth if not a file system distribution'''
return not _isFSD
def isSourceDistro():
'''return truth if a source file system distribution'''
return _isFSSD
try:
#raise ImportError
### NOTE! FP_STR SHOULD PROBABLY ALWAYS DO A PYTHON STR() CONVERSION ON ARGS
### IN CASE THEY ARE "LAZY OBJECTS". ACCELLERATOR DOESN'T DO THIS (YET)
try:
from _rl_accel import fp_str # in case of builtin version
except ImportError:
from reportlab.lib._rl_accel import fp_str # specific
except ImportError:
from math import log
_log_10 = lambda x,log=log,_log_e_10=log(10.0): log(x)/_log_e_10
_fp_fmts = "%.0f", "%.1f", "%.2f", "%.3f", "%.4f", "%.5f", "%.6f"
import re
_tz_re = re.compile('0+$')
del re
def fp_str(*a):
'''convert separate arguments (or single sequence arg) into space separated numeric strings'''
if len(a)==1 and isSeqType(a[0]): a = a[0]
s = []
A = s.append
for i in a:
sa =abs(i)
if sa<=1e-7: A('0')
else:
l = sa<=1 and 6 or min(max(0,(6-int(_log_10(sa)))),6)
n = _fp_fmts[l]%i
if l:
n = _tz_re.sub('',n)
try:
if n[-1]=='.': n = n[:-1]
except:
print i, n
raise
A((n[0]!='0' or len(n)==1) and n or n[1:])
return ' '.join(s)
#hack test for comma users
if ',' in fp_str(0.25):
_FP_STR = fp_str
def fp_str(*a):
return _FP_STR(*a).replace(',','.')
def recursiveImport(modulename, baseDir=None, noCWD=0, debug=0):
"""Dynamically imports possible packagized module, or raises ImportError"""
normalize = lambda x: os.path.normcase(os.path.abspath(os.path.normpath(x)))
path = map(normalize,sys.path)
if baseDir:
if not isSeqType(baseDir):
tp = [baseDir]
else:
tp = filter(None,list(baseDir))
for p in tp:
p = normalize(p)
if p not in path: path.insert(0,p)
if noCWD:
for p in ('','.',normalize('.')):
while p in path:
if debug: print 'removed "%s" from path' % p
path.remove(p)
elif '.' not in path:
path.insert(0,'.')
if debug:
import pprint
pp = pprint.pprint
print 'path=',
pp(path)
#make import errors a bit more informative
opath = sys.path
try:
sys.path = path
exec 'import %s\nm = %s\n' % (modulename,modulename) in locals()
sys.path = opath
return m
except ImportError:
sys.path = opath
msg = "Could not import '%s'" % modulename
if baseDir:
msg = msg + " under %s" % baseDir
raise ImportError, msg
except Exception, e:
msg = "Exception raised while importing '%s': %s" % (modulename, e.message)
raise ImportError, msg
def recursiveGetAttr(obj, name):
"Can call down into e.g. object1.object2[4].attr"
return eval(name, obj.__dict__)
def recursiveSetAttr(obj, name, value):
"Can call down into e.g. object1.object2[4].attr = value"
#get the thing above last.
tokens = name.split('.')
if len(tokens) == 1:
setattr(obj, name, value)
else:
most = '.'.join(tokens[:-1])
last = tokens[-1]
parent = recursiveGetAttr(obj, most)
setattr(parent, last, value)
def import_zlib():
try:
import zlib
except ImportError:
zlib = None
from reportlab.rl_config import ZLIB_WARNINGS
if ZLIB_WARNINGS: warnOnce('zlib not available')
return zlib
# Image Capability Detection. Set a flag haveImages
# to tell us if either PIL or Java imaging libraries present.
# define PIL_Image as either None, or an alias for the PIL.Image
# module, as there are 2 ways to import it
if sys.platform[0:4] == 'java':
try:
import javax.imageio
import java.awt.image
haveImages = 1
except:
haveImages = 0
else:
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
haveImages = Image is not None
try:
from cStringIO import StringIO as __StringIO
except ImportError:
from StringIO import StringIO as __StringIO
def getStringIO(buf=None):
'''unified StringIO instance interface'''
return buf is not None and __StringIO(buf) or __StringIO()
_StringIOKlass=__StringIO().__class__
class ArgvDictValue:
'''A type to allow clients of getArgvDict to specify a conversion function'''
def __init__(self,value,func):
self.value = value
self.func = func
def getArgvDict(**kw):
''' Builds a dictionary from its keyword arguments with overrides from sys.argv.
Attempts to be smart about conversions, but the value can be an instance
of ArgDictValue to allow specifying a conversion function.
'''
def handleValue(v,av,func):
if func:
v = func(av)
else:
if isinstance(v,basestring):
if isinstance(v,unicode): v = v.encode('utf8')
v = av
elif isinstance(v,float):
v = float(av)
elif isinstance(v,int):
v = int(av)
elif isinstance(v,list):
v = list(eval(av))
elif isinstance(v,tuple):
v = tuple(eval(av))
else:
raise TypeError("Can't convert string %r to %s" % (av,type(v)))
return v
A = sys.argv[1:]
R = {}
for k, v in kw.items():
if isinstance(v,ArgvDictValue):
v, func = v.value, v.func
else:
func = None
handled = 0
ke = k+'='
for a in A:
if a.find(ke)==0:
av = a[len(ke):]
A.remove(a)
R[k] = handleValue(v,av,func)
handled = 1
break
if not handled: R[k] = handleValue(v,v,func)
return R
def getHyphenater(hDict=None):
try:
from reportlab.lib.pyHnj import Hyphen
if hDict is None: hDict=os.path.join(os.path.dirname(__file__),'hyphen.mashed')
return Hyphen(hDict)
except ImportError, errMsg:
if str(errMsg)!='No module named pyHnj': raise
return None
def _className(self):
'''Return a shortened class name'''
try:
name = self.__class__.__name__
i=name.rfind('.')
if i>=0: return name[i+1:]
return name
except AttributeError:
return str(self)
def open_for_read_by_name(name,mode='b'):
if 'r' not in mode: mode = 'r'+mode
try:
return open(name,mode)
except IOError:
if _isFSD or __loader__ is None: raise
#we have a __loader__, perhaps the filename starts with
#the dirname(reportlab.__file__) or is relative
name = _startswith_rl(name)
s = __loader__.get_data(name)
if 'b' not in mode and os.linesep!='\n': s = s.replace(os.linesep,'\n')
return getStringIO(s)
import urllib2
def open_for_read(name,mode='b', urlopen=urllib2.urlopen):
'''attempt to open a file or URL for reading'''
if hasattr(name,'read'): return name
try:
return open_for_read_by_name(name,mode)
except:
try:
return getStringIO(urlopen(name).read())
except:
raise IOError('Cannot open resource "%s"' % name)
del urllib2
def open_and_read(name,mode='b'):
return open_for_read(name,mode).read()
def open_and_readlines(name,mode='t'):
return open_and_read(name,mode).split('\n')
def rl_isfile(fn,os_path_isfile=os.path.isfile):
if hasattr(fn,'read'): return True
if os_path_isfile(fn): return True
if _isFSD or __loader__ is None: return False
fn = _startswith_rl(fn)
return fn in __loader__._files.keys()
def rl_isdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath):
if os_path_isdir(pn): return True
if _isFSD or __loader__ is None: return False
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return len(filter(lambda x,pn=pn: x.startswith(pn),__loader__._files.keys()))>0
def rl_listdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath,os_listdir=os.listdir):
if os_path_isdir(pn) or _isFSD or __loader__ is None: return os_listdir(pn)
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return [x[len(pn):] for x in __loader__._files.keys() if x.startswith(pn)]
def rl_getmtime(pn,os_path_isfile=os.path.isfile,os_path_normpath=os.path.normpath,os_path_getmtime=os.path.getmtime,time_mktime=time.mktime):
if os_path_isfile(pn) or _isFSD or __loader__ is None: return os_path_getmtime(pn)
p = _startswith_rl(os_path_normpath(pn))
try:
e = __loader__._files[p]
except KeyError:
return os_path_getmtime(pn)
s = e[5]
d = e[6]
return time_mktime((((d>>9)&0x7f)+1980,(d>>5)&0xf,d&0x1f,(s>>11)&0x1f,(s>>5)&0x3f,(s&0x1f)<<1,0,0,0))
def rl_get_module(name,dir):
if name in sys.modules:
om = sys.modules[name]
del sys.modules[name]
else:
om = None
try:
f = None
try:
f, p, desc= imp.find_module(name,[dir])
return imp.load_module(name,f,p,desc)
except:
if isCompactDistro():
#attempt a load from inside the zip archive
import zipimport
dir = _startswith_rl(dir)
dir = (dir=='.' or not dir) and _archive or os.path.join(_archive,dir.replace('/',os.sep))
zi = zipimport.zipimporter(dir)
return zi.load_module(name)
raise ImportError('%s[%s]' % (name,dir))
finally:
if om: sys.modules[name] = om
del om
if f: f.close()
def _isPILImage(im):
try:
return isinstance(im,Image.Image)
except AttributeError:
return 0
class ImageReader(object):
"Wraps up either PIL or Java to get data from bitmaps"
_cache={}
def __init__(self, fileName,ident=None):
if isinstance(fileName,ImageReader):
self.__dict__ = fileName.__dict__ #borgize
return
self._ident = ident
#start wih lots of null private fields, to be populated by
#the relevant engine.
self.fileName = fileName
self._image = None
self._width = None
self._height = None
self._transparent = None
self._data = None
if _isPILImage(fileName):
self._image = fileName
self.fp = getattr(fileName,'fp',None)
try:
self.fileName = self._image.fileName
except AttributeError:
self.fileName = 'PILIMAGE_%d' % id(self)
else:
try:
from reportlab.rl_config import imageReaderFlags
self.fp = open_for_read(fileName,'b')
if isinstance(self.fp,_StringIOKlass): imageReaderFlags=0 #avoid messing with already internal files
if imageReaderFlags>0: #interning
data = self.fp.read()
if imageReaderFlags&2: #autoclose
try:
self.fp.close()
except:
pass
if imageReaderFlags&4: #cache the data
if not self._cache:
from rl_config import register_reset
register_reset(self._cache.clear)
data=self._cache.setdefault(md5(data).digest(),data)
self.fp=getStringIO(data)
elif imageReaderFlags==-1 and isinstance(fileName,(str,unicode)):
#try Ralf Schmitt's re-opening technique of avoiding too many open files
self.fp.close()
del self.fp #will become a property in the next statement
self.__class__=LazyImageReader
if haveImages:
#detect which library we are using and open the image
if not self._image:
self._image = self._read_image(self.fp)
if getattr(self._image,'format',None)=='JPEG': self.jpeg_fh = self._jpeg_fh
else:
from reportlab.pdfbase.pdfutils import readJPEGInfo
try:
self._width,self._height,c=readJPEGInfo(self.fp)
except:
annotateException('\nImaging Library not available, unable to import bitmaps only jpegs\nfileName=%r identity=%s'%(fileName,self.identity()))
self.jpeg_fh = self._jpeg_fh
self._data = self.fp.read()
self._dataA=None
self.fp.seek(0)
except:
annotateException('\nfileName=%r identity=%s'%(fileName,self.identity()))
def identity(self):
'''try to return information that will identify the instance'''
fn = self.fileName
if not isinstance(fn,basestring):
fn = getattr(getattr(self,'fp',None),'name',None)
ident = self._ident
return '[%s@%s%s%s]' % (self.__class__.__name__,hex(id(self)),ident and (' ident=%r' % ident) or '',fn and (' filename=%r' % fn) or '')
def _read_image(self,fp):
if sys.platform[0:4] == 'java':
from javax.imageio import ImageIO
return ImageIO.read(fp)
else:
return Image.open(fp)
def _jpeg_fh(self):
fp = self.fp
fp.seek(0)
return fp
def jpeg_fh(self):
return None
def getSize(self):
if (self._width is None or self._height is None):
if sys.platform[0:4] == 'java':
self._width = self._image.getWidth()
self._height = self._image.getHeight()
else:
self._width, self._height = self._image.size
return (self._width, self._height)
def getRGBData(self):
"Return byte array of RGB data as string"
try:
if self._data is None:
self._dataA = None
if sys.platform[0:4] == 'java':
import jarray
from java.awt.image import PixelGrabber
width, height = self.getSize()
buffer = jarray.zeros(width*height, 'i')
pg = PixelGrabber(self._image, 0,0,width,height,buffer,0,width)
pg.grabPixels()
# there must be a way to do this with a cast not a byte-level loop,
# I just haven't found it yet...
pixels = []
a = pixels.append
for i in range(len(buffer)):
rgb = buffer[i]
a(chr((rgb>>16)&0xff))
a(chr((rgb>>8)&0xff))
a(chr(rgb&0xff))
self._data = ''.join(pixels)
self.mode = 'RGB'
else:
im = self._image
mode = self.mode = im.mode
if mode=='RGBA':
if Image.VERSION.startswith('1.1.7'): im.load()
self._dataA = ImageReader(im.split()[3])
im = im.convert('RGB')
self.mode = 'RGB'
elif mode not in ('L','RGB','CMYK'):
im = im.convert('RGB')
self.mode = 'RGB'
if hasattr(im, 'tobytes'): #make pillow and PIL both happy, for now
self._data = im.tobytes()
else:
self._data = im.tostring()
return self._data
except:
annotateException('\nidentity=%s'%self.identity())
def getImageData(self):
width, height = self.getSize()
return width, height, self.getRGBData()
def getTransparent(self):
if sys.platform[0:4] == 'java':
return None
else:
if "transparency" in self._image.info:
transparency = self._image.info["transparency"] * 3
palette = self._image.palette
try:
palette = palette.palette
except:
try:
palette = palette.data
except:
return None
return map(ord, palette[transparency:transparency+3])
else:
return None
class LazyImageReader(ImageReader):
def fp(self):
return open_for_read(self.fileName, 'b')
fp=property(fp)
def _image(self):
return self._read_image(self.fp)
_image=property(_image)
def getImageData(imageFileName):
"Get width, height and RGB pixels from image file. Wraps Java/PIL"
try:
return imageFileName.getImageData()
except AttributeError:
return ImageReader(imageFileName).getImageData()
class DebugMemo:
'''Intended as a simple report back encapsulator
Typical usages:
1. To record error data::
dbg = DebugMemo(fn='dbgmemo.dbg',myVar=value)
dbg.add(anotherPayload='aaaa',andagain='bbb')
dbg.dump()
2. To show the recorded info::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
dbg.show()
3. To re-use recorded information::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
myTestFunc(dbg.payload('myVar'),dbg.payload('andagain'))
In addition to the payload variables the dump records many useful bits
of information which are also printed in the show() method.
'''
def __init__(self,fn='rl_dbgmemo.dbg',mode='w',getScript=1,modules=(),capture_traceback=1, stdout=None, **kw):
import time, socket
self.fn = fn
if not stdout:
self.stdout = sys.stdout
else:
if hasattr(stdout,'write'):
self.stdout = stdout
else:
self.stdout = open(stdout,'w')
if mode!='w': return
self.store = store = {}
if capture_traceback and sys.exc_info() != (None,None,None):
import traceback
s = getStringIO()
traceback.print_exc(None,s)
store['__traceback'] = s.getvalue()
cwd=os.getcwd()
lcwd = os.listdir(cwd)
pcwd = os.path.dirname(cwd)
lpcwd = pcwd and os.listdir(pcwd) or '???'
exed = os.path.abspath(os.path.dirname(sys.argv[0]))
project_version='???'
md=None
try:
import marshal
md=marshal.loads(__loader__.get_data('meta_data.mar'))
project_version=md['project_version']
except:
pass
env = os.environ
K=env.keys()
K.sort()
store.update({ 'gmt': time.asctime(time.gmtime(time.time())),
'platform': sys.platform,
'version': sys.version,
'hexversion': hex(sys.hexversion),
'executable': sys.executable,
'exec_prefix': sys.exec_prefix,
'prefix': sys.prefix,
'path': sys.path,
'argv': sys.argv,
'cwd': cwd,
'hostname': socket.gethostname(),
'lcwd': lcwd,
'lpcwd': lpcwd,
'byteorder': sys.byteorder,
'maxint': sys.maxint,
'maxint': getattr(sys,'maxunicode','????'),
'api_version': getattr(sys,'api_version','????'),
'version_info': getattr(sys,'version_info','????'),
'winver': getattr(sys,'winver','????'),
'environment': '\n\t\t\t'.join(['']+['%s=%r' % (k,env[k]) for k in K]),
'__loader__': repr(__loader__),
'project_meta_data': md,
'project_version': project_version,
})
for M,A in (
(sys,('getwindowsversion','getfilesystemencoding')),
(os,('uname', 'ctermid', 'getgid', 'getuid', 'getegid',
'geteuid', 'getlogin', 'getgroups', 'getpgrp', 'getpid', 'getppid',
)),
):
for a in A:
if hasattr(M,a):
try:
store[a] = getattr(M,a)()
except:
pass
if exed!=cwd:
try:
store.update({'exed': exed, 'lexed': os.listdir(exed),})
except:
pass
if getScript:
fn = os.path.abspath(sys.argv[0])
if os.path.isfile(fn):
try:
store['__script'] = (fn,open(fn,'r').read())
except:
pass
module_versions = {}
for n,m in sys.modules.items():
if n=='reportlab' or n=='rlextra' or n[:10]=='reportlab.' or n[:8]=='rlextra.':
v = [getattr(m,x,None) for x in ('__version__','__path__','__file__')]
if filter(None,v):
v = [v[0]] + filter(None,v[1:])
module_versions[n] = tuple(v)
store['__module_versions'] = module_versions
self.store['__payload'] = {}
self._add(kw)
def _add(self,D):
payload = self.store['__payload']
for k, v in D.items():
payload[k] = v
def add(self,**kw):
self._add(kw)
def _dump(self,f):
import pickle
try:
pos=f.tell()
pickle.dump(self.store,f)
except:
S=self.store.copy()
ff=getStringIO()
for k,v in S.iteritems():
try:
pickle.dump({k:v},ff)
except:
S[k] = '<unpicklable object %r>' % v
f.seek(pos,0)
pickle.dump(S,f)
def dump(self):
f = open(self.fn,'wb')
try:
self._dump(f)
finally:
f.close()
def dumps(self):
f = getStringIO()
self._dump(f)
return f.getvalue()
def _load(self,f):
import pickle
self.store = pickle.load(f)
def load(self):
f = open(self.fn,'rb')
try:
self._load(f)
finally:
f.close()
def loads(self,s):
self._load(getStringIO(s))
def _show_module_versions(self,k,v):
self._writeln(k[2:])
K = v.keys()
K.sort()
for k in K:
vk = vk0 = v[k]
if isinstance(vk,tuple): vk0 = vk[0]
try:
m = recursiveImport(k,sys.path[:],1)
d = getattr(m,'__version__',None)==vk0 and 'SAME' or 'DIFFERENT'
except:
m = None
d = '??????unknown??????'
self._writeln(' %s = %s (%s)' % (k,vk,d))
def _banner(self,k,what):
self._writeln('###################%s %s##################' % (what,k[2:]))
def _start(self,k):
self._banner(k,'Start ')
def _finish(self,k):
self._banner(k,'Finish ')
def _show_lines(self,k,v):
self._start(k)
self._writeln(v)
self._finish(k)
def _show_file(self,k,v):
k = '%s %s' % (k,os.path.basename(v[0]))
self._show_lines(k,v[1])
def _show_payload(self,k,v):
if v:
import pprint
self._start(k)
pprint.pprint(v,self.stdout)
self._finish(k)
def _show_extensions(self):
for mn in ('_rl_accel','_renderPM','sgmlop','pyRXP','pyRXPU','_imaging','Image'):
try:
A = [mn].append
m = recursiveImport(mn,sys.path[:],1)
A(m.__file__)
for vn in ('__version__','VERSION','_version','version'):
if hasattr(m,vn):
A('%s=%r' % (vn,getattr(m,vn)))
except:
A('not found')
self._writeln(' '+' '.join(A.__self__))
specials = {'__module_versions': _show_module_versions,
'__payload': _show_payload,
'__traceback': _show_lines,
'__script': _show_file,
}
def show(self):
K = self.store.keys()
K.sort()
for k in K:
if k not in self.specials.keys(): self._writeln('%-15s = %s' % (k,self.store[k]))
for k in K:
if k in self.specials.keys(): self.specials[k](self,k,self.store[k])
self._show_extensions()
def payload(self,name):
return self.store['__payload'][name]
def __setitem__(self,name,value):
self.store['__payload'][name] = value
def __getitem__(self,name):
return self.store['__payload'][name]
def _writeln(self,msg):
self.stdout.write(msg+'\n')
def _flatten(L,a):
for x in L:
if isSeqType(x): _flatten(x,a)
else: a(x)
def flatten(L):
'''recursively flatten the list or tuple L'''
R = []
_flatten(L,R.append)
return R
def find_locals(func,depth=0):
'''apply func to the locals at each stack frame till func returns a non false value'''
while 1:
_ = func(sys._getframe(depth).f_locals)
if _: return _
depth += 1
class _FmtSelfDict:
def __init__(self,obj,overrideArgs):
self.obj = obj
self._overrideArgs = overrideArgs
def __getitem__(self,k):
try:
return self._overrideArgs[k]
except KeyError:
try:
return self.obj.__dict__[k]
except KeyError:
return getattr(self.obj,k)
class FmtSelfDict:
'''mixin to provide the _fmt method'''
def _fmt(self,fmt,**overrideArgs):
D = _FmtSelfDict(self, overrideArgs)
return fmt % D
def _simpleSplit(txt,mW,SW):
L = []
ws = SW(' ')
O = []
w = -ws
for t in txt.split():
lt = SW(t)
if w+ws+lt<=mW or O==[]:
O.append(t)
w = w + ws + lt
else:
L.append(' '.join(O))
O = [t]
w = lt
if O!=[]: L.append(' '.join(O))
return L
def simpleSplit(text,fontName,fontSize,maxWidth):
from reportlab.pdfbase.pdfmetrics import stringWidth
lines = text.split('\n')
SW = lambda text, fN=fontName, fS=fontSize: stringWidth(text, fN, fS)
if maxWidth:
L = []
for l in lines:
L[-1:-1] = _simpleSplit(l,maxWidth,SW)
lines = L
return lines
def escapeTextOnce(text):
"Escapes once only"
from xml.sax.saxutils import escape
if text is None:
return text
text = escape(text)
text = text.replace('&amp;', '&')
text = text.replace('&gt;', '>')
text = text.replace('&lt;', '<')
return text
def fileName2Utf8(fn):
'''attempt to convert a filename to utf8'''
from reportlab.rl_config import fsEncodings
for enc in fsEncodings:
try:
return fn.decode(enc).encode('utf8')
except:
pass
raise ValueError('cannot convert %r to utf8' % fn)
import itertools
def prev_this_next(items):
"""
Loop over a collection with look-ahead and look-back.
From Thomas Guest,
http://wordaligned.org/articles/zippy-triples-served-with-python
Seriously useful looping tool (Google "zippy triples")
lets you loop a collection and see the previous and next items,
which get set to None at the ends.
To be used in layout algorithms where one wants a peek at the
next item coming down the pipe.
"""
extend = itertools.chain([None], items, [None])
prev, this, next = itertools.tee(extend, 3)
try:
this.next()
next.next()
next.next()
except StopIteration:
pass
return itertools.izip(prev, this, next)
def commasplit(s):
'''
Splits the string s at every unescaped comma and returns the result as a list.
To escape a comma, double it. Individual items are stripped.
To avoid the ambiguity of 3 successive commas to denote a comma at the beginning
or end of an item, add a space between the item seperator and the escaped comma.
>>> commasplit('a,b,c')
['a', 'b', 'c']
>>> commasplit('a,, , b , c ')
['a,', 'b', 'c']
>>> commasplit('a, ,,b, c')
['a', ',b', 'c']
'''
n = len(s)-1
s += ' '
i = 0
r=['']
while i<=n:
if s[i]==',':
if s[i+1]==',':
r[-1]+=','
i += 1
else:
r[-1] = r[-1].strip()
if i!=n: r.append('')
else:
r[-1] += s[i]
i+=1
r[-1] = r[-1].strip()
return r
def commajoin(l):
'''
Inverse of commasplit, except that whitespace around items is not conserved.
Adds more whitespace than needed for simplicity and performance.
>>> commasplit(commajoin(['a', 'b', 'c']))
['a', 'b', 'c']
>>> commasplit((commajoin(['a,', ' b ', 'c']))
['a,', 'b', 'c']
>>> commasplit((commajoin(['a ', ',b', 'c']))
['a', ',b', 'c']
'''
return ','.join([ ' ' + i.replace(',', ',,') + ' ' for i in l ])
def findInPaths(fn,paths,isfile=True,fail=False):
'''search for relative files in likely places'''
exists = isfile and os.path.isfile or os.path.isdir
if exists(fn): return fn
pjoin = os.path.join
if not os.path.isabs(fn):
for p in paths:
pfn = pjoin(p,fn)
if exists(pfn):
return pfn
if fail: raise ValueError('cannot locate %r with paths=%r' % (fn,paths))
return fn
def annotateException(msg,enc='utf8'):
'''add msg to the args of an existing exception'''
if not msg: raise
t,v,b=sys.exc_info()
if not hasattr(v,'args'): raise
e = -1
A = list(v.args)
for i,a in enumerate(A):
if isinstance(a,basestring):
e = i
break
if e>=0:
if isinstance(a,unicode):
if not isinstance(msg,unicode):
msg=msg.decode(enc)
else:
if isinstance(msg,unicode):
msg=msg.encode(enc)
else:
msg = str(msg)
if isinstance(v,IOError) and getattr(v,'strerror',None):
v.strerror = msg+'\n'+str(v.strerror)
else:
A[e] += msg
else:
A.append(msg)
v.args = tuple(A)
raise t,v,b
def escapeOnce(data):
"""Ensure XML output is escaped just once, irrespective of input
>>> escapeOnce('A & B')
'A & B'
>>> escapeOnce('C & D')
'C & D'
>>> escapeOnce('E &amp; F')
'E & F'
"""
data = data.replace("&", "&")
#...but if it was already escaped, make sure it
# is not done twice....this will turn any tags
# back to how they were at the start.
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
data = data.replace("&#", "&#")
#..and just in case someone had double-escaped it, do it again
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
return data
class IdentStr(str):
'''useful for identifying things that get split'''
def __new__(cls,value):
if isinstance(value,IdentStr):
inc = value.__inc
value = value[:-(2+len(str(inc)))]
inc += 1
else:
inc = 0
value += '[%d]' % inc
self = str.__new__(cls,value)
self.__inc = inc
return self
class RLString(str):
'''allows specification of extra properties of a string using a dictionary of extra attributes
eg fontName = RLString('proxima-nova-bold',
svgAttrs=dict(family='"proxima-nova"',weight='bold'))
'''
def __new__(cls,v,**kwds):
self = str.__new__(cls,v)
for k,v in kwds.iteritems():
setattr(self,k,v)
return self
def makeFileName(s):
'''force filename strings to unicode so python can handle encoding stuff'''
assert isinstance(s,basestring),"filename is %r should be str or unicode" % s
if isinstance(s,str):
s = s.decode('utf8')
return s
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/thirdparty/src/reportlab/lib/utils.py
|
Python
|
gpl-3.0
| 39,368
|
#!/usr/bin/env python3
## Copyright (C) 2021 David Miguel Susano Pinto <carandraug@gmail.com>
##
## This file is part of Microscope.
##
## Microscope is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Microscope is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Microscope. If not, see <http://www.gnu.org/licenses/>.
"""Module kept for backwards compatibility. Look into microscope.lights."""
from microscope.lights.toptica import TopticaiBeam
|
carandraug/microscope
|
microscope/lasers/toptica.py
|
Python
|
gpl-3.0
| 907
|
import re
from core import plugins
class Plugin:
"""Triggering plugin attributes.
Get attributes of the currently running plugin.
This object is generally imported like this:
>>> from api import plugin
The following attributes descriptions include some
examples, based on an imaginative plugin located at
'/home/user/phpsploit/plugins/parent_dir/foobar/' path.
ATTRIBUTES:
* name (type: str)
# Plugin name.
>>> plugin.name
'foobar'
* help (type: str)
# Plugin's docstring (detailed help).
>>> print(plugin.help)
[*] foobar: An imaginary phpsploit plugin
DESCRIPTION:
An imaginary foobar plugin description.
...
* path (type: str)
# Absolute path of plugin's root directory.
>>> plugin.path
'/home/user/phpsploit/plugins/parent_dir/foobar/'
* category (type: str)
# Plugin's category name (parent directory).
>>> plugin.category
'Parent Dir'
"""
def __init__(self):
pass
def __getattr__(self, attr):
errmsg = "type object '%s' has no attribute '%s'"
if attr in dir(self):
return getattr(plugins.current_plugin, attr)
raise AttributeError(errmsg % (self.__class__.__name__, str(attr)))
def __dir__(self):
result = []
for attr in dir(plugins.current_plugin):
obj = getattr(plugins.current_plugin, attr)
if re.match("^[a-z]+$", attr) and not callable(obj):
result.append(attr)
return result
# instanciate plugin object (for use within python API)
plugin = Plugin()
|
0x0mar/phpsploit
|
src/api/plugin.py
|
Python
|
gpl-3.0
| 1,672
|
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael Gruener <michael.gruener@chaosmoon.net>, 2016
#
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import base64
import binascii
import copy
import datetime
import hashlib
import json
import os
import re
import shutil
import sys
import tempfile
import traceback
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.compat import ipaddress as compat_ipaddress
from ansible.module_utils.six.moves.urllib.parse import unquote
try:
import cryptography
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.asymmetric.utils
import cryptography.x509
import cryptography.x509.oid
from distutils.version import LooseVersion
CRYPTOGRAPHY_VERSION = cryptography.__version__
HAS_CURRENT_CRYPTOGRAPHY = (LooseVersion(CRYPTOGRAPHY_VERSION) >= LooseVersion('1.5'))
if HAS_CURRENT_CRYPTOGRAPHY:
_cryptography_backend = cryptography.hazmat.backends.default_backend()
except Exception as dummy:
HAS_CURRENT_CRYPTOGRAPHY = False
class ModuleFailException(Exception):
'''
If raised, module.fail_json() will be called with the given parameters after cleanup.
'''
def __init__(self, msg, **args):
super(ModuleFailException, self).__init__(self, msg)
self.msg = msg
self.module_fail_args = args
def do_fail(self, module, **arguments):
module.fail_json(msg=self.msg, other=self.module_fail_args, **arguments)
def nopad_b64(data):
return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
def read_file(fn, mode='b'):
try:
with open(fn, 'r' + mode) as f:
return f.read()
except Exception as e:
raise ModuleFailException('Error while reading file "{0}": {1}'.format(fn, e))
# function source: network/basics/uri.py
def write_file(module, dest, content):
'''
Write content to destination file dest, only if the content
has changed.
'''
changed = False
# create a tempfile
fd, tmpsrc = tempfile.mkstemp(text=False)
f = os.fdopen(fd, 'wb')
try:
f.write(content)
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
os.remove(tmpsrc)
raise ModuleFailException("failed to create temporary content file: %s" % to_native(err), exception=traceback.format_exc())
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
try:
os.remove(tmpsrc)
except Exception as dummy:
pass
raise ModuleFailException("Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
raise ModuleFailException("Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
dirname = os.path.dirname(dest) or '.'
if not os.access(dirname, os.W_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination dir %s not writable" % (dirname))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
changed = True
except Exception as err:
os.remove(tmpsrc)
raise ModuleFailException("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(err)), exception=traceback.format_exc())
os.remove(tmpsrc)
return changed
def pem_to_der(pem_filename):
'''
Load PEM file, and convert to DER.
If PEM contains multiple entities, the first entity will be used.
'''
certificate_lines = []
try:
with open(pem_filename, "rt") as f:
header_line_count = 0
for line in f:
if line.startswith('-----'):
header_line_count += 1
if header_line_count == 2:
# If certificate file contains other certs appended
# (like intermediate certificates), ignore these.
break
continue
certificate_lines.append(line.strip())
except Exception as err:
raise ModuleFailException("cannot load PEM file {0}: {1}".format(pem_filename, to_native(err)), exception=traceback.format_exc())
return base64.b64decode(''.join(certificate_lines))
def _parse_key_openssl(openssl_binary, module, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
# If key_file isn't given, but key_content, write that to a temporary file
if key_file is None:
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(key_content.encode('utf-8'))
key_file = tmpsrc
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
raise ModuleFailException("failed to create temporary content file: %s" % to_native(err), exception=traceback.format_exc())
f.close()
# Parse key
account_key_type = None
with open(key_file, "rt") as f:
for line in f:
m = re.match(r"^\s*-{5,}BEGIN\s+(EC|RSA)\s+PRIVATE\s+KEY-{5,}\s*$", line)
if m is not None:
account_key_type = m.group(1).lower()
break
if account_key_type is None:
# This happens for example if openssl_privatekey created this key
# (as opposed to the OpenSSL binary). For now, we assume this is
# an RSA key.
# FIXME: add some kind of auto-detection
account_key_type = "rsa"
if account_key_type not in ("rsa", "ec"):
return 'unknown key type "%s"' % account_key_type, {}
openssl_keydump_cmd = [openssl_binary, account_key_type, "-in", key_file, "-noout", "-text"]
dummy, out, dummy = module.run_command(openssl_keydump_cmd, check_rc=True)
if account_key_type == 'rsa':
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
if len(pub_exp) % 2:
pub_exp = "0{0}".format(pub_exp)
return None, {
'key_file': key_file,
'type': 'rsa',
'alg': 'RS256',
'jwk': {
"kty": "RSA",
"e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
'hash': 'sha256',
}
elif account_key_type == 'ec':
pub_data = re.search(
r"pub:\s*\n\s+04:([a-f0-9\:\s]+?)\nASN1 OID: (\S+)(?:\nNIST CURVE: (\S+))?",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL)
if pub_data is None:
return 'cannot parse elliptic curve key', {}
pub_hex = binascii.unhexlify(re.sub(r"(\s|:)", "", pub_data.group(1)).encode("utf-8"))
asn1_oid_curve = pub_data.group(2).lower()
nist_curve = pub_data.group(3).lower() if pub_data.group(3) else None
if asn1_oid_curve == 'prime256v1' or nist_curve == 'p-256':
bits = 256
alg = 'ES256'
hashalg = 'sha256'
point_size = 32
curve = 'P-256'
elif asn1_oid_curve == 'secp384r1' or nist_curve == 'p-384':
bits = 384
alg = 'ES384'
hashalg = 'sha384'
point_size = 48
curve = 'P-384'
elif asn1_oid_curve == 'secp521r1' or nist_curve == 'p-521':
# Not yet supported on Let's Encrypt side, see
# https://github.com/letsencrypt/boulder/issues/2217
bits = 521
alg = 'ES512'
hashalg = 'sha512'
point_size = 66
curve = 'P-521'
else:
return 'unknown elliptic curve: %s / %s' % (asn1_oid_curve, nist_curve), {}
num_bytes = (bits + 7) // 8
if len(pub_hex) != 2 * num_bytes:
return 'bad elliptic curve point (%s / %s)' % (asn1_oid_curve, nist_curve), {}
return None, {
'key_file': key_file,
'type': 'ec',
'alg': alg,
'jwk': {
"kty": "EC",
"crv": curve,
"x": nopad_b64(pub_hex[:num_bytes]),
"y": nopad_b64(pub_hex[num_bytes:]),
},
'hash': hashalg,
'point_size': point_size,
}
def _sign_request_openssl(openssl_binary, module, payload64, protected64, key_data):
openssl_sign_cmd = [openssl_binary, "dgst", "-{0}".format(key_data['hash']), "-sign", key_data['key_file']]
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
dummy, out, dummy = module.run_command(openssl_sign_cmd, data=sign_payload, check_rc=True, binary_data=True)
if key_data['type'] == 'ec':
dummy, der_out, dummy = module.run_command(
[openssl_binary, "asn1parse", "-inform", "DER"],
data=out, binary_data=True)
expected_len = 2 * key_data['point_size']
sig = re.findall(
r"prim:\s+INTEGER\s+:([0-9A-F]{1,%s})\n" % expected_len,
to_text(der_out, errors='surrogate_or_strict'))
if len(sig) != 2:
raise ModuleFailException(
"failed to generate Elliptic Curve signature; cannot parse DER output: {0}".format(
to_text(der_out, errors='surrogate_or_strict')))
sig[0] = (expected_len - len(sig[0])) * '0' + sig[0]
sig[1] = (expected_len - len(sig[1])) * '0' + sig[1]
out = binascii.unhexlify(sig[0]) + binascii.unhexlify(sig[1])
return {
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(to_bytes(out)),
}
if sys.version_info[0] >= 3:
# Python 3 (and newer)
def _count_bytes(n):
return (n.bit_length() + 7) // 8 if n > 0 else 0
def _convert_int_to_bytes(count, no):
return no.to_bytes(count, byteorder='big')
def _pad_hex(n, digits):
res = hex(n)[2:]
if len(res) < digits:
res = '0' * (digits - len(res)) + res
return res
else:
# Python 2
def _count_bytes(n):
if n <= 0:
return 0
h = '%x' % n
return (len(h) + 1) // 2
def _convert_int_to_bytes(count, n):
h = '%x' % n
if len(h) > 2 * count:
raise Exception('Number {1} needs more than {0} bytes!'.format(count, n))
return ('0' * (2 * count - len(h)) + h).decode('hex')
def _pad_hex(n, digits):
h = '%x' % n
if len(h) < digits:
h = '0' * (digits - len(h)) + h
return h
def _parse_key_cryptography(module, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
# If key_content isn't given, read key_file
if key_content is None:
key_content = read_file(key_file)
else:
key_content = to_bytes(key_content)
# Parse key
try:
key = cryptography.hazmat.primitives.serialization.load_pem_private_key(key_content, password=None, backend=_cryptography_backend)
except Exception as e:
return 'error while loading key: {0}'.format(e), None
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
pk = key.public_key().public_numbers()
return None, {
'key_obj': key,
'type': 'rsa',
'alg': 'RS256',
'jwk': {
"kty": "RSA",
"e": nopad_b64(_convert_int_to_bytes(_count_bytes(pk.e), pk.e)),
"n": nopad_b64(_convert_int_to_bytes(_count_bytes(pk.n), pk.n)),
},
'hash': 'sha256',
}
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
pk = key.public_key().public_numbers()
if pk.curve.name == 'secp256r1':
bits = 256
alg = 'ES256'
hashalg = 'sha256'
point_size = 32
curve = 'P-256'
elif pk.curve.name == 'secp384r1':
bits = 384
alg = 'ES384'
hashalg = 'sha384'
point_size = 48
curve = 'P-384'
elif pk.curve.name == 'secp521r1':
# Not yet supported on Let's Encrypt side, see
# https://github.com/letsencrypt/boulder/issues/2217
bits = 521
alg = 'ES512'
hashalg = 'sha512'
point_size = 66
curve = 'P-521'
else:
return 'unknown elliptic curve: {0}'.format(pk.curve.name), {}
num_bytes = (bits + 7) // 8
return None, {
'key_obj': key,
'type': 'ec',
'alg': alg,
'jwk': {
"kty": "EC",
"crv": curve,
"x": nopad_b64(_convert_int_to_bytes(num_bytes, pk.x)),
"y": nopad_b64(_convert_int_to_bytes(num_bytes, pk.y)),
},
'hash': hashalg,
'point_size': point_size,
}
else:
return 'unknown key type "{0}"'.format(type(key)), {}
def _sign_request_cryptography(module, payload64, protected64, key_data):
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
if isinstance(key_data['key_obj'], cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
hashalg = cryptography.hazmat.primitives.hashes.SHA256
signature = key_data['key_obj'].sign(sign_payload, padding, hashalg())
elif isinstance(key_data['key_obj'], cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if key_data['hash'] == 'sha256':
hashalg = cryptography.hazmat.primitives.hashes.SHA256
elif key_data['hash'] == 'sha384':
hashalg = cryptography.hazmat.primitives.hashes.SHA384
elif key_data['hash'] == 'sha512':
hashalg = cryptography.hazmat.primitives.hashes.SHA512
ecdsa = cryptography.hazmat.primitives.asymmetric.ec.ECDSA(hashalg())
r, s = cryptography.hazmat.primitives.asymmetric.utils.decode_dss_signature(key_data['key_obj'].sign(sign_payload, ecdsa))
rr = _pad_hex(r, 2 * key_data['point_size'])
ss = _pad_hex(s, 2 * key_data['point_size'])
signature = binascii.unhexlify(rr) + binascii.unhexlify(ss)
return {
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(signature),
}
def _assert_fetch_url_success(response, info, allow_redirect=False, allow_client_error=True, allow_server_error=True):
if info['status'] < 0:
raise ModuleFailException(msg="Failure downloading %s, %s" % (info['url'], info['msg']))
if (300 <= info['status'] < 400 and not allow_redirect) or \
(400 <= info['status'] < 500 and not allow_client_error) or \
(info['status'] >= 500 and not allow_server_error):
raise ModuleFailException("ACME request failed: CODE: {0} MGS: {1} RESULT: {2}".format(info['status'], info['msg'], response))
class ACMEDirectory(object):
'''
The ACME server directory. Gives access to the available resources,
and allows to obtain a Replay-Nonce. The acme_directory URL
needs to support unauthenticated GET requests; ACME endpoints
requiring authentication are not supported.
https://tools.ietf.org/html/rfc8555#section-7.1.1
'''
def __init__(self, module, account):
self.module = module
self.directory_root = module.params['acme_directory']
self.version = module.params['acme_version']
self.directory, dummy = account.get_request(self.directory_root, get_only=True)
# Check whether self.version matches what we expect
if self.version == 1:
for key in ('new-reg', 'new-authz', 'new-cert'):
if key not in self.directory:
raise ModuleFailException("ACME directory does not seem to follow protocol ACME v1")
if self.version == 2:
for key in ('newNonce', 'newAccount', 'newOrder'):
if key not in self.directory:
raise ModuleFailException("ACME directory does not seem to follow protocol ACME v2")
def __getitem__(self, key):
return self.directory[key]
def get_nonce(self, resource=None):
url = self.directory_root if self.version == 1 else self.directory['newNonce']
if resource is not None:
url = resource
dummy, info = fetch_url(self.module, url, method='HEAD')
if info['status'] not in (200, 204):
raise ModuleFailException("Failed to get replay-nonce, got status {0}".format(info['status']))
return info['replay-nonce']
class ACMEAccount(object):
'''
ACME account object. Handles the authorized communication with the
ACME server. Provides access to account bound information like
the currently active authorizations and valid certificates
'''
def __init__(self, module):
# Set to true to enable logging of all signed requests
self._debug = False
self.module = module
self.version = module.params['acme_version']
# account_key path and content are mutually exclusive
self.key = module.params['account_key_src']
self.key_content = module.params['account_key_content']
# Grab account URI from module parameters.
# Make sure empty string is treated as None.
self.uri = module.params.get('account_uri') or None
self._openssl_bin = module.get_bin_path('openssl', True)
if self.key is not None or self.key_content is not None:
error, self.key_data = self.parse_key(self.key, self.key_content)
if error:
raise ModuleFailException("error while parsing account key: %s" % error)
self.jwk = self.key_data['jwk']
self.jws_header = {
"alg": self.key_data['alg'],
"jwk": self.jwk,
}
if self.uri:
# Make sure self.jws_header is updated
self.set_account_uri(self.uri)
self.directory = ACMEDirectory(module, self)
def get_keyauthorization(self, token):
'''
Returns the key authorization for the given token
https://tools.ietf.org/html/rfc8555#section-8.1
'''
accountkey_json = json.dumps(self.jwk, sort_keys=True, separators=(',', ':'))
thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
return "{0}.{1}".format(token, thumbprint)
def parse_key(self, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
if key_file is None and key_content is None:
raise AssertionError('One of key_file and key_content must be specified!')
if HAS_CURRENT_CRYPTOGRAPHY:
return _parse_key_cryptography(self.module, key_file, key_content)
else:
return _parse_key_openssl(self._openssl_bin, self.module, key_file, key_content)
def sign_request(self, protected, payload, key_data, encode_payload=True):
try:
if payload is None:
# POST-as-GET
payload64 = ''
else:
# POST
if encode_payload:
payload = self.module.jsonify(payload).encode('utf8')
payload64 = nopad_b64(to_bytes(payload))
protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
except Exception as e:
raise ModuleFailException("Failed to encode payload / headers as JSON: {0}".format(e))
if HAS_CURRENT_CRYPTOGRAPHY:
return _sign_request_cryptography(self.module, payload64, protected64, key_data)
else:
return _sign_request_openssl(self._openssl_bin, self.module, payload64, protected64, key_data)
def _log(self, msg, data=None):
'''
Write arguments to acme.log when logging is enabled.
'''
if self._debug:
with open('acme.log', 'ab') as f:
f.write('[{0}] {1}\n'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%s'), msg).encode('utf-8'))
if data is not None:
f.write('{0}\n\n'.format(json.dumps(data, indent=2, sort_keys=True)).encode('utf-8'))
def send_signed_request(self, url, payload, key_data=None, jws_header=None, parse_json_result=True, encode_payload=True):
'''
Sends a JWS signed HTTP POST request to the ACME server and returns
the response as dictionary
https://tools.ietf.org/html/rfc8555#section-6.2
If payload is None, a POST-as-GET is performed.
(https://tools.ietf.org/html/rfc8555#section-6.3)
'''
key_data = key_data or self.key_data
jws_header = jws_header or self.jws_header
failed_tries = 0
while True:
protected = copy.deepcopy(jws_header)
protected["nonce"] = self.directory.get_nonce()
if self.version != 1:
protected["url"] = url
self._log('URL', url)
self._log('protected', protected)
self._log('payload', payload)
data = self.sign_request(protected, payload, key_data, encode_payload=encode_payload)
if self.version == 1:
data["header"] = jws_header.copy()
for k, v in protected.items():
hv = data["header"].pop(k, None)
self._log('signed request', data)
data = self.module.jsonify(data)
headers = {
'Content-Type': 'application/jose+json',
}
resp, info = fetch_url(self.module, url, data=data, headers=headers, method='POST')
_assert_fetch_url_success(resp, info)
result = {}
try:
content = resp.read()
except AttributeError:
content = info.pop('body', None)
if content or not parse_json_result:
if (parse_json_result and info['content-type'].startswith('application/json')) or 400 <= info['status'] < 600:
try:
decoded_result = self.module.from_json(content.decode('utf8'))
self._log('parsed result', decoded_result)
# In case of badNonce error, try again (up to 5 times)
# (https://tools.ietf.org/html/rfc8555#section-6.7)
if (400 <= info['status'] < 600 and
decoded_result.get('type') == 'urn:ietf:params:acme:error:badNonce' and
failed_tries <= 5):
failed_tries += 1
continue
if parse_json_result:
result = decoded_result
else:
result = content
except ValueError:
raise ModuleFailException("Failed to parse the ACME response: {0} {1}".format(url, content))
else:
result = content
return result, info
def get_request(self, uri, parse_json_result=True, headers=None, get_only=False, fail_on_error=True):
'''
Perform a GET-like request. Will try POST-as-GET for ACMEv2, with fallback
to GET if server replies with a status code of 405.
'''
if not get_only and self.version != 1:
# Try POST-as-GET
content, info = self.send_signed_request(uri, None, parse_json_result=False)
if info['status'] == 405:
# Instead, do unauthenticated GET
get_only = True
else:
# Do unauthenticated GET
get_only = True
if get_only:
# Perform unauthenticated GET
resp, info = fetch_url(self.module, uri, method='GET', headers=headers)
_assert_fetch_url_success(resp, info)
try:
content = resp.read()
except AttributeError:
content = info.pop('body', None)
# Process result
if parse_json_result:
result = {}
if content:
if info['content-type'].startswith('application/json'):
try:
result = self.module.from_json(content.decode('utf8'))
except ValueError:
raise ModuleFailException("Failed to parse the ACME response: {0} {1}".format(uri, content))
else:
result = content
else:
result = content
if fail_on_error and (info['status'] < 200 or info['status'] >= 400):
raise ModuleFailException("ACME request failed: CODE: {0} RESULT: {1}".format(info['status'], result))
return result, info
def set_account_uri(self, uri):
'''
Set account URI. For ACME v2, it needs to be used to sending signed
requests.
'''
self.uri = uri
if self.version != 1:
self.jws_header.pop('jwk')
self.jws_header['kid'] = self.uri
def _new_reg(self, contact=None, agreement=None, terms_agreed=False, allow_creation=True):
'''
Registers a new ACME account. Returns a pair ``(created, data)``.
Here, ``created`` is ``True`` if the account was created and
``False`` if it already existed (e.g. it was not newly created),
or does not exist. In case the account was created or exists,
``data`` contains the account data; otherwise, it is ``None``.
https://tools.ietf.org/html/rfc8555#section-7.3
'''
contact = contact or []
if self.version == 1:
new_reg = {
'resource': 'new-reg',
'contact': contact
}
if agreement:
new_reg['agreement'] = agreement
else:
new_reg['agreement'] = self.directory['meta']['terms-of-service']
url = self.directory['new-reg']
else:
new_reg = {
'contact': contact
}
if not allow_creation:
# https://tools.ietf.org/html/rfc8555#section-7.3.1
new_reg['onlyReturnExisting'] = True
if terms_agreed:
new_reg['termsOfServiceAgreed'] = True
url = self.directory['newAccount']
result, info = self.send_signed_request(url, new_reg)
if info['status'] in ([200, 201] if self.version == 1 else [201]):
# Account did not exist
if 'location' in info:
self.set_account_uri(info['location'])
return True, result
elif info['status'] == (409 if self.version == 1 else 200):
# Account did exist
if result.get('status') == 'deactivated':
# A bug in Pebble (https://github.com/letsencrypt/pebble/issues/179) and
# Boulder (https://github.com/letsencrypt/boulder/issues/3971): this should
# not return a valid account object according to
# https://tools.ietf.org/html/rfc8555#section-7.3.6:
# "Once an account is deactivated, the server MUST NOT accept further
# requests authorized by that account's key."
if not allow_creation:
return False, None
else:
raise ModuleFailException("Account is deactivated")
if 'location' in info:
self.set_account_uri(info['location'])
return False, result
elif info['status'] == 400 and result['type'] == 'urn:ietf:params:acme:error:accountDoesNotExist' and not allow_creation:
# Account does not exist (and we didn't try to create it)
return False, None
elif info['status'] == 403 and result['type'] == 'urn:ietf:params:acme:error:unauthorized' and 'deactivated' in (result.get('detail') or ''):
# Account has been deactivated; currently works for Pebble; hasn't been
# implemented for Boulder (https://github.com/letsencrypt/boulder/issues/3971),
# might need adjustment in error detection.
if not allow_creation:
return False, None
else:
raise ModuleFailException("Account is deactivated")
else:
raise ModuleFailException("Error registering: {0} {1}".format(info['status'], result))
def get_account_data(self):
'''
Retrieve account information. Can only be called when the account
URI is already known (such as after calling setup_account).
Return None if the account was deactivated, or a dict otherwise.
'''
if self.uri is None:
raise ModuleFailException("Account URI unknown")
if self.version == 1:
data = {}
data['resource'] = 'reg'
result, info = self.send_signed_request(self.uri, data)
else:
# try POST-as-GET first (draft-15 or newer)
data = None
result, info = self.send_signed_request(self.uri, data)
# check whether that failed with a malformed request error
if info['status'] >= 400 and result.get('type') == 'urn:ietf:params:acme:error:malformed':
# retry as a regular POST (with no changed data) for pre-draft-15 ACME servers
data = {}
result, info = self.send_signed_request(self.uri, data)
if info['status'] in (400, 403) and result.get('type') == 'urn:ietf:params:acme:error:unauthorized':
# Returned when account is deactivated
return None
if info['status'] in (400, 404) and result.get('type') == 'urn:ietf:params:acme:error:accountDoesNotExist':
# Returned when account does not exist
return None
if info['status'] < 200 or info['status'] >= 300:
raise ModuleFailException("Error getting account data from {2}: {0} {1}".format(info['status'], result, self.uri))
return result
def setup_account(self, contact=None, agreement=None, terms_agreed=False, allow_creation=True, remove_account_uri_if_not_exists=False):
'''
Detect or create an account on the ACME server. For ACME v1,
as the only way (without knowing an account URI) to test if an
account exists is to try and create one with the provided account
key, this method will always result in an account being present
(except on error situations). For ACME v2, a new account will
only be created if ``allow_creation`` is set to True.
For ACME v2, ``check_mode`` is fully respected. For ACME v1, the
account might be created if it does not yet exist.
Return a pair ``(created, account_data)``. Here, ``created`` will
be ``True`` in case the account was created or would be created
(check mode). ``account_data`` will be the current account data,
or ``None`` if the account does not exist.
The account URI will be stored in ``self.uri``; if it is ``None``,
the account does not exist.
https://tools.ietf.org/html/rfc8555#section-7.3
'''
if self.uri is not None:
created = False
# Verify that the account key belongs to the URI.
# (If update_contact is True, this will be done below.)
account_data = self.get_account_data()
if account_data is None:
if remove_account_uri_if_not_exists and not allow_creation:
self.uri = None
else:
raise ModuleFailException("Account is deactivated or does not exist!")
else:
created, account_data = self._new_reg(
contact,
agreement=agreement,
terms_agreed=terms_agreed,
allow_creation=allow_creation and not self.module.check_mode
)
if self.module.check_mode and self.uri is None and allow_creation:
created = True
account_data = {
'contact': contact or []
}
return created, account_data
def update_account(self, account_data, contact=None):
'''
Update an account on the ACME server. Check mode is fully respected.
The current account data must be provided as ``account_data``.
Return a pair ``(updated, account_data)``, where ``updated`` is
``True`` in case something changed (contact info updated) or
would be changed (check mode), and ``account_data`` the updated
account data.
https://tools.ietf.org/html/rfc8555#section-7.3.2
'''
# Create request
update_request = {}
if contact is not None and account_data.get('contact', []) != contact:
update_request['contact'] = list(contact)
# No change?
if not update_request:
return False, dict(account_data)
# Apply change
if self.module.check_mode:
account_data = dict(account_data)
account_data.update(update_request)
else:
if self.version == 1:
update_request['resource'] = 'reg'
account_data, dummy = self.send_signed_request(self.uri, update_request)
return True, account_data
def _normalize_ip(ip):
try:
return to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
except ValueError:
# We don't want to error out on something IPAddress() can't parse
return ip
def openssl_get_csr_identifiers(openssl_binary, module, csr_filename):
'''
Return a set of requested identifiers (CN and SANs) for the CSR.
Each identifier is a pair (type, identifier), where type is either
'dns' or 'ip'.
'''
openssl_csr_cmd = [openssl_binary, "req", "-in", csr_filename, "-noout", "-text"]
dummy, out, dummy = module.run_command(openssl_csr_cmd, check_rc=True)
identifiers = set([])
common_name = re.search(r"Subject:.* CN\s?=\s?([^\s,;/]+)", to_text(out, errors='surrogate_or_strict'))
if common_name is not None:
identifiers.add(('dns', common_name.group(1)))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.lower().startswith("dns:"):
identifiers.add(('dns', san[4:]))
elif san.lower().startswith("ip:"):
identifiers.add(('ip', _normalize_ip(san[3:])))
elif san.lower().startswith("ip address:"):
identifiers.add(('ip', _normalize_ip(san[11:])))
else:
raise ModuleFailException('Found unsupported SAN identifier "{0}"'.format(san))
return identifiers
def cryptography_get_csr_identifiers(module, csr_filename):
'''
Return a set of requested identifiers (CN and SANs) for the CSR.
Each identifier is a pair (type, identifier), where type is either
'dns' or 'ip'.
'''
identifiers = set([])
csr = cryptography.x509.load_pem_x509_csr(read_file(csr_filename), _cryptography_backend)
for sub in csr.subject:
if sub.oid == cryptography.x509.oid.NameOID.COMMON_NAME:
identifiers.add(('dns', sub.value))
for extension in csr.extensions:
if extension.oid == cryptography.x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME:
for name in extension.value:
if isinstance(name, cryptography.x509.DNSName):
identifiers.add(('dns', name.value))
elif isinstance(name, cryptography.x509.IPAddress):
identifiers.add(('ip', name.value.compressed))
else:
raise ModuleFailException('Found unsupported SAN identifier {0}'.format(name))
return identifiers
def cryptography_get_cert_days(module, cert_file, now=None):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found. If cert_file contains more than one
certificate, only the first one will be considered.
'''
if not os.path.exists(cert_file):
return -1
try:
cert = cryptography.x509.load_pem_x509_certificate(read_file(cert_file), _cryptography_backend)
except Exception as e:
raise ModuleFailException('Cannot parse certificate {0}: {1}'.format(cert_file, e))
if now is None:
now = datetime.datetime.now()
return (cert.not_valid_after - now).days
def set_crypto_backend(module):
'''
Sets which crypto backend to use (default: auto detection).
Does not care whether a new enough cryptoraphy is available or not. Must
be called before any real stuff is done which might evaluate
``HAS_CURRENT_CRYPTOGRAPHY``.
'''
global HAS_CURRENT_CRYPTOGRAPHY
# Choose backend
backend = module.params['select_crypto_backend']
if backend == 'auto':
pass
elif backend == 'openssl':
HAS_CURRENT_CRYPTOGRAPHY = False
elif backend == 'cryptography':
try:
cryptography.__version__
except Exception as dummy:
module.fail_json(msg='Cannot find cryptography module!')
HAS_CURRENT_CRYPTOGRAPHY = True
else:
module.fail_json(msg='Unknown crypto backend "{0}"!'.format(backend))
# Inform about choices
if HAS_CURRENT_CRYPTOGRAPHY:
module.debug('Using cryptography backend (library version {0})'.format(CRYPTOGRAPHY_VERSION))
else:
module.debug('Using OpenSSL binary backend')
def process_links(info, callback):
'''
Process link header, calls callback for every link header with the URL and relation as options.
'''
if 'link' in info:
link = info['link']
for url, relation in re.findall(r'<([^>]+)>;\s*rel="(\w+)"', link):
callback(unquote(url), relation)
|
kvar/ansible
|
lib/ansible/module_utils/acme.py
|
Python
|
gpl-3.0
| 40,296
|
../../../../share/pyshared/orca/outloud.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/outloud.py
|
Python
|
gpl-3.0
| 42
|
import unittest
from math import sqrt
import numpy as np
from geomalgo import Point2D, Line2D
class TestLine2D(unittest.TestCase):
def test_create_line(self):
A = Point2D(1,2)
B = Point2D(3,4)
# ======================
# Create line2d.
# ======================
line = Line2D(A,B)
self.assertEqual(line.B.y, 4)
# ======================
# Modify B.y
# ======================
line.B.y = 5
self.assertEqual(line.B.y, 5)
# ======================
# Modify B
# ======================
line.B = Point2D(-1, -2)
self.assertEqual(line.B.y, -2)
class TestPoint2dDistance(unittest.TestCase):
def test_horizontal_line(self):
"""
P
A-----------B
"""
A = Point2D(1, 2)
B = Point2D(3, 2)
AB = Line2D(A, B)
P = Point2D(2, 4)
self.assertEqual(AB.point_distance(P), 2)
P = Point2D(10, 4)
self.assertEqual(AB.point_distance(P), 2)
def test_vertical_line(self):
"""
B
|
| B
|
A
"""
A = Point2D(1, 2)
B = Point2D(1, 4)
AB = Line2D(A, B)
P = Point2D(3, 3)
self.assertEqual(AB.point_distance(P), 2)
P = Point2D(3, 10)
self.assertEqual(AB.point_distance(P), 2)
def test_on_horizontal_line(self):
"""
A-----P-----B
"""
A = Point2D(1, 2)
B = Point2D(3, 2)
AB = Line2D(A, B)
P = Point2D(2, 2)
self.assertEqual(AB.point_distance(P), 0)
P = Point2D(10, 2)
self.assertEqual(AB.point_distance(P), 0)
if __name__ == '__main__':
unittest.main()
|
dfroger/geomalgo
|
test/base2d/test_line2d.py
|
Python
|
gpl-3.0
| 1,777
|
# Copyright (C) 2016 Canonical Ltd.
# Copyright (C) 2016 VMware Inc.
#
# Author: Sankar Tanguturi <stanguturi@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
class GuestCustStateEnum(object):
"""Specifies different states of Guest Customization engine"""
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
# vi: ts=4 expandtab
|
larsks/cloud-init
|
cloudinit/sources/helpers/vmware/imc/guestcust_state.py
|
Python
|
gpl-3.0
| 388
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------- #
# #
# Plugin for iSida Jabber Bot #
# Copyright (C) diSabler <dsy@dsy.name> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# --------------------------------------------------------------------------- #
calc_last_res = {}
def exec_ute(type, jid, nick, text):
try: text = remove_sub_space(unicode(eval(text)))
except Exception, SM:
try: SM = str(SM)
except: SM = unicode(SM)
text = L('I can\'t execute it! Error: %s','%s/%s'%(jid,nick)) % SM[:int(msg_limit/2)]
send_msg(type, jid, nick, text)
def calc(type, jid, nick, text):
global calc_last_res
if 'Ans' in text and calc_last_res.has_key(jid) and calc_last_res[jid].has_key(nick) and calc_last_res[jid][nick]:
text = text.replace('Ans', calc_last_res[jid][nick])
legal = string.digits + string.letters + '*/+-()=^!<>. '
ppc = 1
if '**' in text or 'pow' in text or 'factorial' in text: ppc = 0
else:
for tt in text:
if tt not in legal:
ppc = 0
break
if ppc:
text = re.sub('([^.0-9]\d+)(?=([^.0-9]|$))', r'\1.0', text)
try:
text = remove_sub_space(str(eval(re.sub('([^a-zA-Z]|\A)([a-zA-Z])', r'\1math.\2', text))))
if text[-2:] == '.0': text = text[:-2]
if calc_last_res.has_key(jid): calc_last_res[jid][nick] = text
else: calc_last_res[jid] = {nick: text}
except:
text = L('I can\'t calculate it','%s/%s'%(jid,nick))
if calc_last_res.has_key(jid): calc_last_res[jid][nick] = None
else: calc_last_res[jid] = {nick: None}
else:
text = L('Expression unacceptable!','%s/%s'%(jid,nick))
if calc_last_res.has_key(jid): calc_last_res[jid][nick] = None
else: calc_last_res[jid] = {nick: None}
send_msg(type, jid, nick, text)
def dpi_calc(type, jid, nick, text):
text = text.strip().replace(',','.')
if text:
tupl = re.findall('([0-9.]+)',text)[:3]
if len(tupl) == 3:
if '.' in tupl[0] or '.' in tupl[1]: msg = L('Width and height must be integer!','%s/%s'%(jid,nick))
elif not float(tupl[2]): msg = L('Incorrect diagonal value!','%s/%s'%(jid,nick))
else:
dpi_type = [0,'L'],[160,'M'],[240,'H'],[320,'XH']
dpi = int((math.sqrt(int(tupl[0])**2+int(tupl[1])**2))/float(tupl[2]))
dpi_name = 'unknown'
for t in dpi_type:
if dpi > t[0]: dpi_name = '%sDPI' % t[1]
msg = u'%s %s×%s×%s\" - %sdpi [%s]' % (L('Screen','%s/%s'%(jid,nick)),tupl[0],tupl[1],tupl[2],dpi,dpi_name)
else: msg = L('Not enough parameters!','%s/%s'%(jid,nick))
else: msg = L('What?','%s/%s'%(jid,nick))
send_msg(type, jid, nick, msg)
def calc_clear(room,jid,nick,type,arr):
if type == 'unavailable' and calc_last_res.has_key(room) and calc_last_res[room].has_key(nick): del calc_last_res[room][nick]
global execute, presence_control
presence_control = [calc_clear]
if not GT('paranoia_mode'): execute = [(3, 'calc', calc, 2, 'Calculator.'),
(9, 'exec', exec_ute, 2, 'Execution of external code.')]
else: execute = []
execute.append((3, 'dpi', dpi_calc, 2, 'DPI calculator.\ndpi <width height size>'))
|
disabler/isida3
|
plugins/execute.py
|
Python
|
gpl-3.0
| 4,289
|
"""
Tuple astroid node
This node represents the Python tuple objects.
Attributes:
- elts (List[Expr])
- The elements in this tuple, which can be any expression.
- ctx (class[expr_context])
- The context in which this list is to be used, either Load or Store.
Example 1:
- elts -> []
- ctx -> Load
Example 2:
- elts -> [Num(1), Num(2)]
- ctx -> Load
Example 3:
- elts -> [Name('x', Store()), Name('y', Store())]
- ctx -> Store
"""
# Example 1:
()
# Example 2:
(1, 2)
# Example 3:
(x, y) = [7, 8]
|
RyanDJLee/pyta
|
nodes/Tuple.py
|
Python
|
gpl-3.0
| 563
|
import os
import datetime
import lib.maglib as MSG
#这是一个对结果进行初步处理的库
#用来分离抓取结果,作者,发帖时间
#抓取结果应该储存在【用户端根目录】并以result命名
#在测试情况下,抓取结果文件为results.txt
#重要全局变量
PATH_SUFFIX = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
PATH_SUFFIX = PATH_SUFFIX[:len(PATH_SUFFIX)-8]
PATH_RESULT_FILE = PATH_SUFFIX + "\\result.txt"
#该函数返回帖子列表,进行第一步分离,用于分离帖子基本信息和回帖信息
#返回格式:2个元素的list v
# [ [[帖子标题,作者,发帖时间] , [回帖列表:[回帖内容,作者,回帖时间],[回帖内容,作者,回帖时间],[[......]],.....]] ]
def getPostDataList(rawresult):
#rawresult = openResult()
rawpost = spiltRawPost(rawresult)
del rawresult
SPILT_TITLE_PDATA = "@#@"
SPILT_INNER_DATA = "*#*"
SPILT_INNER_REPLY = "$#$"
postdata = []
for post in rawpost:
if len(post) < 9:
continue
spd = post.split(SPILT_TITLE_PDATA) #spd[0]=标题数据 spd[1]=回帖数据
titledata = spd[0].split(SPILT_INNER_DATA)
try:
replylist = spd[1].split(SPILT_INNER_REPLY)
replydata = []
for reply in replylist:
rep = reply.split(SPILT_INNER_DATA)
replydata.append(rep)
postdata.append([titledata,replydata])
except:
print("replydata error,no index 2")
return postdata
#该函数的作用是返回贴吧标题与回帖列表
#返回格式:类型为字符串的list
def getContentList(rawdata):
postdata = getPostDataList(rawdata)
contentlist = []
# [ [[帖子标题,作者,发帖时间] , [回帖列表:[回帖内容,作者,回帖时间],[回帖内容,作者,回帖时间],[[......]],.....]] ]
for post in postdata:
contentlist.append(post[0][0])
replylist = post[1]
for reply in replylist:
contentlist.append(reply[0])
del postdata
return contentlist
#该函数的作用是返回所有发帖日期的集合
#返回格式:被分割的时间list
# [[年,月,日,小时,分钟],[.....],.....] (int)
def getDateList(rawdata):
postdata = getPostDataList(rawdata)
datelist = []
# [ [[帖子标题,作者,发帖时间] , [回帖列表:[回帖内容,作者,回帖时间],[回帖内容,作者,回帖时间],[[......]],.....]] ]
for post in postdata:
datelist.append(datetime.datetime.strptime(post[0][2], "%Y-%m-%d %H:%M"))
replylist = post[1]
for reply in replylist:
if len(reply) < 3:
continue
try:
datelist.append(datetime.datetime.strptime(reply[2], "%Y-%m-%d %H:%M"))
except Exception as e:
print("x",end="")
del postdata
return datelist
#该函数的作用是返回所有作者集合
#返回格式:类型为字符串的list
def getAuthorList():
postdata = getPostDataList()
authorlist = []
# [ [[帖子标题,作者,发帖时间] , [回帖列表:[回帖内容,作者,回帖时间],[回帖内容,作者,回帖时间],[[......]],.....]] ]
for post in postdata:
authorlist.append(post[0][1])
replylist = post[1]
for reply in replylist:
authorlist.append(reply[1])
del postdata
return authorlist
#该函数用于统计各个词语的出现次数
#函数返回:一个任意字符串和指定词语的出现次数
def satisticWord(word,datalist):
os.system('cls')
print('>>>>>开始统计【',word,'】出现次数....')
sum=1
mlist=[]
for item in datalist:
if item.find(word) != -1:
sum+=1
mlist.append(item)
print('>',end='')
print('>>>>>统计完成!\n\n')
MSG.printline2x35(2)
print('\r\n>>>>>统计结果>----->共【',sum-1,'/',len(datalist),'】条匹配数据,结果如下','\r\n')
MSG.printline2x35(2)
for item in mlist:
print('\t◆\t',item)
MSG.printline2x35(2)
print('\r\n>>>>>统计结果>----->共【',sum-1,'/',len(datalist),'】条匹配数据,结果如下','\r\n')
MSG.printline2x35(2)
return 'SW',sum-1
#=======================本文件内的辅助函数<主要用于文件操作>==========================
#打开抓取结果文件
#函数返回:文件内容
def openResult():
print("加载任务结果文件:",PATH_RESULT_FILE)
f = open(PATH_RESULT_FILE,'rb')
data = f.read()
f.close()
data = data.decode('gbk', 'ignore')
return data
#将openResult()读取出来的数据按行分开,因为一行就是一个post
#函数返回:list -> 每一行的数据
def spiltRawPost(rawdata):
datalist = rawdata.split('\r\n\t\t')
return datalist
|
ankanch/tieba-zhuaqu
|
user-application/KCrawlerControal/Debug/plugins/wordstimeline/lib/result_functions_file.py
|
Python
|
gpl-3.0
| 4,862
|
"""
Dyson equation
==============
A dyson equation.
"""
import matplotlib.pyplot as plt
from feynman import Diagram
# Set up the figure and ax
fig = plt.figure(figsize=(10,1.5))
ax = fig.add_axes([.0,.0,1.,1.], frameon=False)
ax.set_xlim(0, 1.0)
ax.set_ylim(0, .15)
l = 0.15 # Length of the propagator
txt_l = 0.05 # Padding around the symbol
op_l = 0.08 # Size of the operator
G_style = dict(arrow=True, arrow_param={'width':0.02, 'length': 0.05}, style = 'double')
G0_style = dict(arrow=True, arrow_param={'width':0.02, 'length': 0.05}, style = 'simple')
text_prop = dict(y=0.02, fontsize=20)
D = Diagram(ax)
# Left hand side
v11 = D.vertex(xy=[0.05, 0.06])
v12 = D.vertex(v11.xy, dx=l)
G = D.line(v11, v12, **G_style)
G.text("$G$", **text_prop)
# Symbol
D.text(v12.x + txt_l, v12.y, "=")
# First term
v21 = D.vertex(v12.xy, dx=2*txt_l)
v22 = D.vertex(v21.xy, dx=l)
G0 = D.line(v21, v22, **G0_style)
G0.text("$G_0$", **text_prop)
# Symbol
D.text(v22.x + txt_l, v22.y, "+")
# Second term
v31 = D.vertex(v22.xy, dx=2*txt_l)
v32 = D.vertex(v31.xy, dx=l)
v33 = D.vertex(v32.xy, dx=op_l)
v34 = D.vertex(v33.xy, dx=l)
D.line(v31, v32, **G0_style)
D.line(v33, v34, **G_style)
O = D.operator([v32,v33])
O.text("$\Sigma$")
# Plot and show
D.plot()
plt.show()
|
GkAntonius/feynman
|
examples/Solid_State_Physics/plot_Dyson.py
|
Python
|
gpl-3.0
| 1,267
|
#!/usr/bin/env python
"""
CPAchecker is a tool for configurable software verification.
This file is part of CPAchecker.
Copyright (C) 2007-2014 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
CPAchecker web page:
http://cpachecker.sosy-lab.org
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
import subprocess
import logging
import os.path
import benchexec.tools.cpachecker
cpachecker = benchexec.tools.cpachecker.Tool()
executable = cpachecker.executable()
required_files = cpachecker.program_files(executable)
# install cloud and dependencies
ant = subprocess.Popen(["ant", "resolve-benchmark-dependencies"])
ant.communicate()
ant.wait()
# assume that last parameter is the input file
argv = sys.argv
parameters = argv[1:-1]
in_file = argv[-1]
# start cloud and wait for exit
logging.debug("Starting cloud.")
logLevel = "FINER"
cpachecker_dir = os.path.normpath(os.path.join(os.path.dirname(argv[0]), os.pardir)) # directory above script directory
lib_dir = os.path.abspath(os.path.join("lib", "java-benchmark"))
cmd_line = ["java", "-jar", os.path.join(lib_dir, "vcloud.jar"), "cpachecker",
"--loglevel", logLevel,
"--input", in_file,
"--required_files", ','.join(required_files),
"--cpachecker-dir", cpachecker_dir,
"--", executable
]
cmd_line.extend(parameters)
logging.debug("CPAchecker command: ", cmd_line)
cloud = subprocess.Popen(cmd_line)
cloud.communicate()
cloud.wait()
# vim:sts=4:sw=4:expandtab:
|
nishanttotla/predator
|
cpachecker/scripts/cpa_cloud.py
|
Python
|
gpl-3.0
| 2,148
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, cPickle, subprocess
from setup import Command
import __builtin__
def set_builtins(builtins):
for x in builtins:
if not hasattr(__builtin__, x):
setattr(__builtin__, x, True)
yield x
class Message:
def __init__(self, filename, lineno, msg):
self.filename, self.lineno, self.msg = filename, lineno, msg
def __str__(self):
return '%s:%s: %s'%(self.filename, self.lineno, self.msg)
def check_for_python_errors(code_string, filename):
import _ast
# First, compile into an AST and handle syntax errors.
try:
tree = compile(code_string, filename, "exec", _ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as value:
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
msg = "%s: problem decoding source" % filename
return [Message(filename, lineno, msg)]
else:
checker = __import__('pyflakes.checker').checker
# Okay, it's syntactically valid. Now check it.
w = checker.Checker(tree, filename)
w.messages.sort(lambda a, b: cmp(a.lineno, b.lineno))
return [Message(x.filename, x.lineno, x.message%x.message_args) for x in
w.messages]
class Check(Command):
description = 'Check for errors in the calibre source code'
BUILTINS = ['_', '__', 'dynamic_property', 'I', 'P', 'lopen', 'icu_lower',
'icu_upper', 'icu_title', 'ngettext']
CACHE = '.check-cache.pickle'
def get_files(self, cache):
for x in os.walk(self.j(self.SRC, 'calibre')):
for f in x[-1]:
y = self.j(x[0], f)
mtime = os.stat(y).st_mtime
if cache.get(y, 0) == mtime:
continue
if (f.endswith('.py') and f not in ('feedparser.py',
'pyparsing.py', 'markdown.py') and
'prs500/driver.py' not in y):
yield y, mtime
if f.endswith('.coffee'):
yield y, mtime
for x in os.walk(self.j(self.d(self.SRC), 'recipes')):
for f in x[-1]:
f = self.j(x[0], f)
mtime = os.stat(f).st_mtime
if f.endswith('.recipe') and cache.get(f, 0) != mtime:
yield f, mtime
def run(self, opts):
cache = {}
if os.path.exists(self.CACHE):
cache = cPickle.load(open(self.CACHE, 'rb'))
builtins = list(set_builtins(self.BUILTINS))
for f, mtime in self.get_files(cache):
self.info('\tChecking', f)
errors = False
ext = os.path.splitext(f)[1]
if ext in {'.py', '.recipe'}:
w = check_for_python_errors(open(f, 'rb').read(), f)
if w:
errors = True
self.report_errors(w)
else:
try:
subprocess.check_call(['coffee', '-c', '-p', f],
stdout=open(os.devnull, 'wb'))
except:
errors = True
if errors:
cPickle.dump(cache, open(self.CACHE, 'wb'), -1)
subprocess.call(['gvim', '-f', f])
raise SystemExit(1)
cache[f] = mtime
for x in builtins:
delattr(__builtin__, x)
cPickle.dump(cache, open(self.CACHE, 'wb'), -1)
wn_path = os.path.expanduser('~/work/servers/src/calibre_servers/main')
if os.path.exists(wn_path):
sys.path.insert(0, wn_path)
self.info('\tChecking Changelog...')
os.environ['DJANGO_SETTINGS_MODULE'] = 'calibre_servers.status.settings'
import whats_new
whats_new.test()
sys.path.remove(wn_path)
def report_errors(self, errors):
for err in errors:
self.info('\t\t', str(err))
|
yeyanchao/calibre
|
setup/check.py
|
Python
|
gpl-3.0
| 4,465
|
"""
relatorio
=========
A templating library which provides a way to easily output all kind of
different files (odt, ods, png, svg, ...). Adding support for more filetype is
easy: you just have to create a plugin for this.
relatorio also provides a report repository allowing you to link python objects
and report together, find reports by mimetypes/name/python objects.
"""
from relatorio.reporting import MIMETemplateLoader, ReportRepository, Report
import templates
__version__ = '0.6.1'
|
bitsworking/relatorio
|
relatorio/__init__.py
|
Python
|
gpl-3.0
| 494
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
NBSP = '\xa0'
def mergeable(previous, current):
if previous.tail or current.tail:
return False
if previous.get('class', None) != current.get('class', None):
return False
if current.get('id', False):
return False
for attr in ('style', 'lang', 'dir'):
if previous.get(attr) != current.get(attr):
return False
try:
return next(previous.itersiblings()) is current
except StopIteration:
return False
def append_text(parent, text):
if len(parent) > 0:
parent[-1].tail = (parent[-1].tail or '') + text
else:
parent.text = (parent.text or '') + text
def merge(parent, span):
if span.text:
append_text(parent, span.text)
for child in span:
parent.append(child)
if span.tail:
append_text(parent, span.tail)
span.getparent().remove(span)
def merge_run(run):
parent = run[0]
for span in run[1:]:
merge(parent, span)
def liftable(css):
# A <span> is liftable if all its styling would work just as well if it is
# specified on the parent element.
prefixes = {x.partition('-')[0] for x in css.iterkeys()}
return not (prefixes - {'text', 'font', 'letter', 'color', 'background'})
def add_text(elem, attr, text):
old = getattr(elem, attr) or ''
setattr(elem, attr, old + text)
def lift(span):
# Replace an element by its content (text, children and tail)
parent = span.getparent()
idx = parent.index(span)
try:
last_child = span[-1]
except IndexError:
last_child = None
if span.text:
if idx == 0:
add_text(parent, 'text', span.text)
else:
add_text(parent[idx - 1], 'tail', span.text)
for child in reversed(span):
parent.insert(idx, child)
parent.remove(span)
if span.tail:
if last_child is None:
if idx == 0:
add_text(parent, 'text', span.tail)
else:
add_text(parent[idx - 1], 'tail', span.tail)
else:
add_text(last_child, 'tail', span.tail)
def before_count(root, tag, limit=10):
body = root.xpath('//body[1]')
if not body:
return limit
ans = 0
for elem in body[0].iterdescendants():
if elem is tag:
return ans
ans += 1
if ans > limit:
return limit
def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
# Move <hr>s outside paragraphs, if possible.
pancestor = XPath('|'.join('ancestor::%s[1]' % x for x in ('p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
for hr in root.xpath('//span/hr'):
p = pancestor(hr)
if p:
p = p[0]
descendants = tuple(p.iterdescendants())
if descendants[-1] is hr:
parent = p.getparent()
idx = parent.index(p)
parent.insert(idx+1, hr)
hr.tail = '\n\t'
# Merge consecutive spans that have the same styling
current_run = []
for span in root.xpath('//span'):
if not current_run:
current_run.append(span)
else:
last = current_run[-1]
if mergeable(last, span):
current_run.append(span)
else:
if len(current_run) > 1:
merge_run(current_run)
current_run = [span]
# Process dir attributes
class_map = dict(styles.classes.itervalues())
parents = ('p', 'div') + tuple('h%d' % i for i in xrange(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an
# explicit dir set. Also, remove dir from children if it is the same as
# that of the parent.
if len(parent):
parent_dir = parent.get('dir')
for child in parent.iterchildren('span'):
child_dir = child.get('dir')
if parent_dir == 'rtl' and child_dir != 'rtl':
child_dir = 'ltr'
child.set('dir', child_dir)
if child_dir and child_dir == parent_dir:
child.attrib.pop('dir')
# Remove unnecessary span tags that are the only child of a parent block
# element
for parent in root.xpath('//*[(%s) and count(span)=1]' % ' or '.join('name()="%s"' % t for t in parents)):
if len(parent) == 1 and not parent.text and not parent[0].tail and not parent[0].get('id', None):
# We have a block whose contents are entirely enclosed in a <span>
span = parent[0]
span_class = span.get('class', None)
span_css = class_map.get(span_class, {})
span_dir = span.get('dir')
if liftable(span_css) and (not span_dir or span_dir == parent.get('dir')):
pclass = parent.get('class', None)
if span_class:
pclass = (pclass + ' ' + span_class) if pclass else span_class
parent.set('class', pclass)
parent.text = span.text
parent.remove(span)
if span.get('lang'):
parent.set('lang', span.get('lang'))
if span.get('dir'):
parent.set('dir', span.get('dir'))
for child in span:
parent.append(child)
# Make spans whose only styling is bold or italic into <b> and <i> tags
for span in root.xpath('//span[@class and not(@style)]'):
css = class_map.get(span.get('class', None), {})
if len(css) == 1:
if css == {'font-style':'italic'}:
span.tag = 'i'
del span.attrib['class']
elif css == {'font-weight':'bold'}:
span.tag = 'b'
del span.attrib['class']
# Get rid of <span>s that have no styling
for span in root.xpath('//span[not(@class or @id or @style or @lang or @dir)]'):
lift(span)
# Convert <p><br style="page-break-after:always"> </p> style page breaks
# into something the viewer will render as a page break
for p in root.xpath('//p[br[@style="page-break-after:always"]]'):
if len(p) == 1 and (not p[0].tail or not p[0].tail.strip()):
p.remove(p[0])
prefix = p.get('style', '')
if prefix:
prefix += '; '
p.set('style', prefix + 'page-break-after:always')
p.text = NBSP if not p.text else p.text
if detect_cover:
# Check if the first image in the document is possibly a cover
img = root.xpath('//img[@src][1]')
if img:
img = img[0]
path = os.path.join(dest_dir, img.get('src'))
if os.path.exists(path) and before_count(root, img, limit=10) < 5:
from calibre.utils.imghdr import identify
try:
with lopen(path, 'rb') as imf:
fmt, width, height = identify(imf)
except:
width, height, fmt = 0, 0, None # noqa
del fmt
try:
is_cover = 0.8 <= height/width <= 1.8 and height*width >= 160000
except ZeroDivisionError:
is_cover = False
if is_cover:
log.debug('Detected an image that looks like a cover')
img.getparent().remove(img)
return path
|
jelly/calibre
|
src/calibre/ebooks/docx/cleanup.py
|
Python
|
gpl-3.0
| 7,809
|
# -*- coding: utf-8 -*-
bold = '\033[1m'
white = '\033[3m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
reset = '\033[0m'
def error(msg):
print("[%sERROR%s] %s" % (red, reset, msg))
def warning(msg):
print("[%sWARNING%s] %s" % (yellow, reset, msg))
def info(msg):
print("[%sINFO%s] %s" % (green, reset, msg))
def debug(msg):
print("[%sDEBUG%s] %s" % (blue, reset, msg))
|
libcrack/iker
|
iker/color.py
|
Python
|
gpl-3.0
| 424
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#Autor: Antoine "0x010C" Lamielle
#Date: 2 April 2016
#License: GNU GPL v3
import sys
import time
import json
from datetime import date, timedelta
import requests
import urllib
import pywiki
DAYS = 10
def json_findall(v, k):
r = []
if type(v) == type({}):
for k1 in v:
if k1 == k:
r += [int(v[k1])]
r += json_findall(v[k1], k)
return r
def get_last_edition_time_flow_thread(self, thread):
r = self.session.post(self.api_endpoint, data={
"action":"flow",
"submodule":"view-topic",
"page":thread,
"vtformat":"wikitext",
"format":"json"
})
response = json.loads(r.text)
return max(json_findall(response, "timestamp"))
pywiki.Pywiki.get_last_edition_time_flow_thread = get_last_edition_time_flow_thread
def update_flow_topic_summary(self, thread):
r = self.session.post(self.api_endpoint, data={
"action":"flow",
"submodule":"view-topic-summary",
"page":thread,
"vtsformat":"wikitext",
"format":"json"
})
response = json.loads(r.text)
prev_revision = ""
if "revisionId" in response["flow"]["view-topic-summary"]["result"]["topicsummary"]["revision"]:
prev_revision = response["flow"]["view-topic-summary"]["result"]["topicsummary"]["revision"]["revisionId"]
r = self.session.post(self.api_endpoint, data={
"action":"flow",
"submodule":"edit-topic-summary",
"page":thread,
"etsprev_revision":prev_revision,
"etssummary":"{{Réponse ff|obsolète}}",
"etsformat":"wikitext",
"token":self.get_csrf_token(),
"format":"json"
})
pywiki.Pywiki.update_flow_topic_summary = update_flow_topic_summary
def close_flow_topic(self, thread):
r = self.session.post(self.api_endpoint, data={
"action":"flow",
"submodule":"close-open-topic",
"page":thread,
"cotmoderationState":"close",
"cotreason":"Discussion inactive depuis plus de "+str(DAYS)+" jours",
"token":self.get_csrf_token(),
"format":"json"
})
pywiki.Pywiki.close_flow_topic = close_flow_topic
# Main
def main():
pw = pywiki.Pywiki("frwiki-NeoBOT")
pw.login()
threads_in_cat = pw.get_all_pages_in_cat("Catégorie:Requête en attente d'une réponse", "2600")
threads_in_cat += pw.get_all_pages_in_cat("Catégorie:Attenteinfo", "2600")
threads_in_cat += pw.get_all_pages_in_cat("Catégorie:Autreavis", "2600")
threads_in_cat += pw.get_all_pages_in_cat("Catégorie:Encours", "2600")
date_threshold = int((date.today() - timedelta(days=DAYS)).strftime("%Y%m%d%H%M%S"))
for thread in threads_in_cat:
if pw.get_last_edition_time_flow_thread("Sujet:"+thread) < date_threshold:
print "Archivage de Sujet:"+thread
pw.update_flow_topic_summary("Sujet:"+thread)
pw.close_flow_topic("Sujet:"+thread)
main()
|
0x010C/Pywikibot-scripts
|
archivage_forum_relecture.py
|
Python
|
gpl-3.0
| 2,697
|
# fix a bug in uuid, import it first !!
import uuid
import os
import sys
import time
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import datetime
import types
import cuon.DMS.documentTools
class misc:
def __init__(self):
pass
def getRandomFilename(self, sPrefix='.tmp'):
return str(uuid.uuid4())+ sPrefix
# n = random.randint(0,1000000000)
# for i in range(13):
# ok = 1
# while ok:
# r = random.randint(65,122)
# if r < 91 or r > 96:
# ok = 0
# s = s + chr(r)
#
# s = s + `n` + sPrefix
#
# return s
#
class sendAsEmail():
def __init__(self):
pass
def sendNormal(self, Type, sTo, dicUser, singleDMS, dicVars ):
sSubject = 'email'
sText = 'information below'
print 'Path for ', Type, dicUser['prefPath']['templates']+'/supply_subject.tpl'
if Type == 'Supply':
fname_text = dicUser['prefPath']['templates']+'/supply_text.tpl'
fname_subject = dicUser['prefPath']['templates']+'/supply_subject.tpl'
elif Type == 'Incomming2':
fname_text = dicUser['prefPath']['templates']+'/incomming2_text.tpl'
fname_subject = dicUser['prefPath']['templates']+'/incomming2_subject.tpl'
try:
f = open(fname_subject)
sSubject = f.read()
f.close()
except:
print 'error read subject'
try:
f = open(fname_text)
sText = f.read()
f.close()
except:
print 'error read Text'
dicVars['From'] = dicUser['Email']['From']
dicVars['To'] = sTo
dicVars['Signatur'] = '\n\n -- \n' + dicUser['Email']['Signatur']
dicVars['Body'] = sText
dicVars['email_subject'] = sSubject
print dicVars
dt = cuon.DMS.documentTools.documentTools()
dt.viewDocument(singleDMS,dicUser, dicVars,'sentAutomaticEmail', sTo)
class Treeview:
def __init__(self):
pass
def start(self,ts, sType='Text',sTitle = 'Title'):
#ts = self.getWidget(sName)
#treeview.set_model(liststore)
if sType == 'Text':
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(sTitle, renderer, text=0)
ts.append_column(column)
def fillTree(self, ts, liGroups,liNames,sConnect):
''' ts = Widget, '''
print 'fill Tree'
try:
treestore = gtk.TreeStore(object)
treestore = gtk.TreeStore(str)
ts.set_model(treestore)
if liGroups:
lastGroup = None
#iter = treestore.append(None,[_('Schedul')])
#print 'iter = ', iter
iter2 = None
iter3 = None
#liDates.reverse()
for oneGroup in liGroups:
groupname = ''
for name in liNames:
if isinstance(oneGroup[name], types.StringType):
groupname += oneGroup[name] + ', '
else:
groupname += `oneGroup[name]` + ', '
iter = treestore.append(None,[groupname + ' ###' +`oneGroup['id']` ])
#print 'add iter', [groupname + '###' +`oneGroup['id']` ]
#iter2 = treestore.insert_after(iter,None,['TESTEN'])
#print 'End liDates'
ts.show()
#self.getWidget('scrolledwindow10').show()
#exec (sConnect)
print 'ts', ts
except Exception, params:
print Exception, params
|
BackupTheBerlios/cuon-svn
|
cuon_client/cuon/Misc/misc.py
|
Python
|
gpl-3.0
| 4,038
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_facts
version_added: "2.1"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running Juniper Junos
description:
- Collects fact information from a remote device running the Junos
operating system. By default, the module will collect basic fact
information from the device to be included with the hostvars.
Additional fact information can be collected based on the
configured set of arguments.
extends_documentation_fragment: junos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected. To maintain backward compatbility old style facts
can be retrieved using all value, this reqires junos-eznc to be installed
as a prerequisite. Valid value of gather_subset are default, hardware,
config, interfaces, ofacts. If C(ofacts) is present in the list it fetches
the old style facts (fact keys without 'ansible_' prefix) and it requires
junos-eznc library to be installed on control node and the device login credentials
must be given in C(provider) option.
required: false
default: ['!config', '!ofacts']
version_added: "2.3"
config_format:
description:
- The I(config_format) argument specifies the format of the configuration
when serializing output from the device. This argument is applicable
only when C(config) value is present in I(gather_subset).
The I(config_format) should be supported by the junos version running on
device. This value is not applicable while fetching old style facts that is
when value of I(gather_subset) C(all) or C(ofacts) is present in the value.
required: false
default: 'text'
choices: ['xml', 'text', 'set', 'json']
version_added: "2.3"
requirements:
- ncclient (>=v0.5.2)
notes:
- Ensure I(config_format) used to retrieve configuration from device
is supported by junos version running on device.
- With I(config_format = json), configuration in the results will be a dictionary(and not a JSON string)
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
- Fetching old style facts requires junos-eznc library to be installed on control node and the device login credentials
must be given in provider option.
"""
EXAMPLES = """
- name: collect default set of facts
junos_facts:
- name: collect default set of facts and configuration
junos_facts:
gather_subset: config
"""
RETURN = """
ansible_facts:
description: Returns the facts collect from the device
returned: always
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.netconf import exec_rpc
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_param, tostring
from ansible.module_utils.network.junos.junos import get_configuration, get_connection
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement
except ImportError:
from xml.etree.ElementTree import Element, SubElement
try:
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
USE_PERSISTENT_CONNECTION = True
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
def populate(self):
raise NotImplementedError
def cli(self, command):
reply = command(self.module, command)
output = reply.find('.//output')
if not output:
self.module.fail_json(msg='failed to retrieve facts for command %s' % command)
return str(output.text).strip()
def rpc(self, rpc):
return exec_rpc(self.module, tostring(Element(rpc)))
def get_text(self, ele, tag):
try:
return str(ele.find(tag).text).strip()
except AttributeError:
pass
class Default(FactsBase):
def populate(self):
reply = self.rpc('get-software-information')
data = reply.find('.//software-information')
self.facts.update({
'hostname': self.get_text(data, 'host-name'),
'version': self.get_text(data, 'junos-version'),
'model': self.get_text(data, 'product-model')
})
reply = self.rpc('get-chassis-inventory')
data = reply.find('.//chassis-inventory/chassis')
self.facts['serialnum'] = self.get_text(data, 'serial-number')
class Config(FactsBase):
def populate(self):
config_format = self.module.params['config_format']
reply = get_configuration(self.module, format=config_format)
if config_format == 'xml':
config = tostring(reply.find('configuration')).strip()
elif config_format == 'text':
config = self.get_text(reply, 'configuration-text')
elif config_format == 'json':
config = self.module.from_json(reply.text.strip())
elif config_format == 'set':
config = self.get_text(reply, 'configuration-set')
self.facts['config'] = config
class Hardware(FactsBase):
def populate(self):
reply = self.rpc('get-system-memory-information')
data = reply.find('.//system-memory-information/system-memory-summary-information')
self.facts.update({
'memfree_mb': int(self.get_text(data, 'system-memory-free')),
'memtotal_mb': int(self.get_text(data, 'system-memory-total'))
})
reply = self.rpc('get-system-storage')
data = reply.find('.//system-storage-information')
filesystems = list()
for obj in data:
filesystems.append(self.get_text(obj, 'filesystem-name'))
self.facts['filesystems'] = filesystems
reply = self.rpc('get-route-engine-information')
data = reply.find('.//route-engine-information')
routing_engines = dict()
for obj in data:
slot = self.get_text(obj, 'slot')
routing_engines.update({slot: {}})
routing_engines[slot].update({'slot': slot})
for child in obj:
if child.text != "\n":
routing_engines[slot].update({child.tag.replace("-", "_"): child.text})
self.facts['routing_engines'] = routing_engines
if len(data) > 1:
self.facts['has_2RE'] = True
else:
self.facts['has_2RE'] = False
reply = self.rpc('get-chassis-inventory')
data = reply.findall('.//chassis-module')
modules = list()
for obj in data:
mod = dict()
for child in obj:
if child.text != "\n":
mod.update({child.tag.replace("-", "_"): child.text})
modules.append(mod)
self.facts['modules'] = modules
class Interfaces(FactsBase):
def populate(self):
ele = Element('get-interface-information')
SubElement(ele, 'detail')
reply = exec_rpc(self.module, tostring(ele))
interfaces = {}
for item in reply[0]:
name = self.get_text(item, 'name')
obj = {
'oper-status': self.get_text(item, 'oper-status'),
'admin-status': self.get_text(item, 'admin-status'),
'speed': self.get_text(item, 'speed'),
'macaddress': self.get_text(item, 'hardware-physical-address'),
'mtu': self.get_text(item, 'mtu'),
'type': self.get_text(item, 'if-type'),
}
interfaces[name] = obj
self.facts['interfaces'] = interfaces
class OFacts(FactsBase):
def _connect(self, module):
host = get_param(module, 'host')
kwargs = {
'port': get_param(module, 'port') or 830,
'user': get_param(module, 'username')
}
if get_param(module, 'password'):
kwargs['passwd'] = get_param(module, 'password')
if get_param(module, 'ssh_keyfile'):
kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile')
kwargs['gather_facts'] = False
try:
device = Device(host, **kwargs)
device.open()
device.timeout = get_param(module, 'timeout') or 10
except ConnectError as exc:
module.fail_json('unable to connect to %s: %s' % (host, to_native(exc)))
return device
def populate(self):
device = self._connect(self.module)
facts = dict(device.facts)
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
facts['version_info'] = dict(facts['version_info'])
if 'junos_info' in facts:
for key, value in facts['junos_info'].items():
if 'object' in value:
value['object'] = dict(value['object'])
return facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
config=Config,
interfaces=Interfaces,
ofacts=OFacts
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Main entry point for AnsibleModule
"""
argument_spec = dict(
gather_subset=dict(default=['!config', '!ofacts'], type='list'),
config_format=dict(default='text', choices=['xml', 'text', 'set', 'json']),
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
get_connection(module)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
ansible_facts = dict()
if 'ofacts' in runable_subsets:
if HAS_PYEZ:
ansible_facts.update(OFacts(module).populate())
else:
warnings += ['junos-eznc is required to gather old style facts but does not appear to be installed. '
'It can be installed using `pip install junos-eznc`']
runable_subsets.remove('ofacts')
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
tareqalayan/ansible
|
lib/ansible/modules/network/junos/junos_facts.py
|
Python
|
gpl-3.0
| 12,532
|
# -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests.common import Form
from odoo.tests import tagged
from odoo import fields, Command
@tagged('post_install', '-at_install')
class TestAccountMoveOutRefundOnchanges(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.invoice = cls.init_invoice('out_refund', products=cls.product_a+cls.product_b)
cls.product_line_vals_1 = {
'name': cls.product_a.name,
'product_id': cls.product_a.id,
'account_id': cls.product_a.property_account_income_id.id,
'partner_id': cls.partner_a.id,
'product_uom_id': cls.product_a.uom_id.id,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 1000.0,
'price_subtotal': 1000.0,
'price_total': 1150.0,
'tax_ids': cls.product_a.taxes_id.ids,
'tax_line_id': False,
'currency_id': cls.company_data['currency'].id,
'amount_currency': 1000.0,
'debit': 1000.0,
'credit': 0.0,
'date_maturity': False,
}
cls.product_line_vals_2 = {
'name': cls.product_b.name,
'product_id': cls.product_b.id,
'account_id': cls.product_b.property_account_income_id.id,
'partner_id': cls.partner_a.id,
'product_uom_id': cls.product_b.uom_id.id,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 200.0,
'price_subtotal': 200.0,
'price_total': 260.0,
'tax_ids': cls.product_b.taxes_id.ids,
'tax_line_id': False,
'currency_id': cls.company_data['currency'].id,
'amount_currency': 200.0,
'debit': 200.0,
'credit': 0.0,
'date_maturity': False,
}
cls.tax_line_vals_1 = {
'name': cls.tax_sale_a.name,
'product_id': False,
'account_id': cls.company_data['default_account_tax_sale'].id,
'partner_id': cls.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 180.0,
'price_subtotal': 180.0,
'price_total': 180.0,
'tax_ids': [],
'tax_line_id': cls.tax_sale_a.id,
'currency_id': cls.company_data['currency'].id,
'amount_currency': 180.0,
'debit': 180.0,
'credit': 0.0,
'date_maturity': False,
}
cls.tax_line_vals_2 = {
'name': cls.tax_sale_b.name,
'product_id': False,
'account_id': cls.company_data['default_account_tax_sale'].id,
'partner_id': cls.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 30.0,
'price_subtotal': 30.0,
'price_total': 30.0,
'tax_ids': [],
'tax_line_id': cls.tax_sale_b.id,
'currency_id': cls.company_data['currency'].id,
'amount_currency': 30.0,
'debit': 30.0,
'credit': 0.0,
'date_maturity': False,
}
cls.term_line_vals_1 = {
'name': '',
'product_id': False,
'account_id': cls.company_data['default_account_receivable'].id,
'partner_id': cls.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': -1410.0,
'price_subtotal': -1410.0,
'price_total': -1410.0,
'tax_ids': [],
'tax_line_id': False,
'currency_id': cls.company_data['currency'].id,
'amount_currency': -1410.0,
'debit': 0.0,
'credit': 1410.0,
'date_maturity': fields.Date.from_string('2019-01-01'),
}
cls.move_vals = {
'partner_id': cls.partner_a.id,
'currency_id': cls.company_data['currency'].id,
'journal_id': cls.company_data['default_journal_sale'].id,
'date': fields.Date.from_string('2019-01-01'),
'fiscal_position_id': False,
'payment_reference': '',
'invoice_payment_term_id': cls.pay_terms_a.id,
'amount_untaxed': 1200.0,
'amount_tax': 210.0,
'amount_total': 1410.0,
}
def setUp(self):
super(TestAccountMoveOutRefundOnchanges, self).setUp()
self.assertInvoiceValues(self.invoice, [
self.product_line_vals_1,
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
self.term_line_vals_1,
], self.move_vals)
def test_out_refund_line_onchange_product_1(self):
move_form = Form(self.invoice)
with move_form.invoice_line_ids.edit(0) as line_form:
line_form.product_id = self.product_b
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'name': self.product_b.name,
'product_id': self.product_b.id,
'product_uom_id': self.product_b.uom_id.id,
'account_id': self.product_b.property_account_income_id.id,
'price_unit': 200.0,
'price_subtotal': 200.0,
'price_total': 260.0,
'tax_ids': self.product_b.taxes_id.ids,
'amount_currency': 200.0,
'debit': 200.0,
},
self.product_line_vals_2,
{
**self.tax_line_vals_1,
'price_unit': 60.0,
'price_subtotal': 60.0,
'price_total': 60.0,
'amount_currency': 60.0,
'debit': 60.0,
},
{
**self.tax_line_vals_2,
'price_unit': 60.0,
'price_subtotal': 60.0,
'price_total': 60.0,
'amount_currency': 60.0,
'debit': 60.0,
},
{
**self.term_line_vals_1,
'price_unit': -520.0,
'price_subtotal': -520.0,
'price_total': -520.0,
'amount_currency': -520.0,
'credit': 520.0,
},
], {
**self.move_vals,
'amount_untaxed': 400.0,
'amount_tax': 120.0,
'amount_total': 520.0,
})
def test_out_refund_line_onchange_business_fields_1(self):
move_form = Form(self.invoice)
with move_form.invoice_line_ids.edit(0) as line_form:
# Current price_unit is 1000.
# We set quantity = 4, discount = 50%, price_unit = 400. The debit/credit fields don't change because (4 * 500) * 0.5 = 1000.
line_form.quantity = 4
line_form.discount = 50
line_form.price_unit = 500
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'quantity': 4,
'discount': 50.0,
'price_unit': 500.0,
},
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
self.term_line_vals_1,
], self.move_vals)
move_form = Form(self.invoice)
with move_form.line_ids.edit(2) as line_form:
# Reset field except the discount that becomes 100%.
# /!\ The modification is made on the accounting tab.
line_form.quantity = 1
line_form.discount = 100
line_form.price_unit = 1000
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'discount': 100.0,
'price_subtotal': 0.0,
'price_total': 0.0,
'amount_currency': 0.0,
'debit': 0.0,
},
self.product_line_vals_2,
{
**self.tax_line_vals_1,
'price_unit': 30.0,
'price_subtotal': 30.0,
'price_total': 30.0,
'amount_currency': 30.0,
'debit': 30.0,
},
self.tax_line_vals_2,
{
**self.term_line_vals_1,
'price_unit': -260.0,
'price_subtotal': -260.0,
'price_total': -260.0,
'amount_currency': -260.0,
'credit': 260.0,
},
], {
**self.move_vals,
'amount_untaxed': 200.0,
'amount_tax': 60.0,
'amount_total': 260.0,
})
def test_out_refund_line_onchange_accounting_fields_1(self):
move_form = Form(self.invoice)
with move_form.line_ids.edit(2) as line_form:
# Custom debit on the first product line.
line_form.debit = 3000
with move_form.line_ids.edit(3) as line_form:
# Custom credit on the second product line. Credit should be reset by onchange.
# /!\ It's a negative line.
line_form.credit = 500
with move_form.line_ids.edit(0) as line_form:
# Custom debit on the first tax line.
line_form.debit = 800
with move_form.line_ids.edit(4) as line_form:
# Custom debit on the second tax line.
line_form.debit = 250
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'price_unit': 3000.0,
'price_subtotal': 3000.0,
'price_total': 3450.0,
'amount_currency': 3000.0,
'debit': 3000.0,
},
{
**self.product_line_vals_2,
'price_unit': -500.0,
'price_subtotal': -500.0,
'price_total': -650.0,
'amount_currency': -500.0,
'debit': 0.0,
'credit': 500.0,
},
{
**self.tax_line_vals_1,
'price_unit': 800.0,
'price_subtotal': 800.0,
'price_total': 800.0,
'amount_currency': 800.0,
'debit': 800.0,
},
{
**self.tax_line_vals_2,
'price_unit': 250.0,
'price_subtotal': 250.0,
'price_total': 250.0,
'amount_currency': 250.0,
'debit': 250.0,
},
{
**self.term_line_vals_1,
'price_unit': -3550.0,
'price_subtotal': -3550.0,
'price_total': -3550.0,
'amount_currency': -3550.0,
'credit': 3550.0,
},
], {
**self.move_vals,
'amount_untaxed': 2500.0,
'amount_tax': 1050.0,
'amount_total': 3550.0,
})
def test_out_refund_line_onchange_partner_1(self):
move_form = Form(self.invoice)
move_form.partner_id = self.partner_b
move_form.payment_reference = 'turlututu'
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'partner_id': self.partner_b.id,
},
{
**self.product_line_vals_2,
'partner_id': self.partner_b.id,
},
{
**self.tax_line_vals_1,
'partner_id': self.partner_b.id,
},
{
**self.tax_line_vals_2,
'partner_id': self.partner_b.id,
},
{
**self.term_line_vals_1,
'name': 'turlututu',
'partner_id': self.partner_b.id,
'account_id': self.partner_b.property_account_receivable_id.id,
'price_unit': -987.0,
'price_subtotal': -987.0,
'price_total': -987.0,
'amount_currency': -987.0,
'credit': 987.0,
'date_maturity': fields.Date.from_string('2019-02-28'),
},
{
**self.term_line_vals_1,
'name': 'turlututu',
'partner_id': self.partner_b.id,
'account_id': self.partner_b.property_account_receivable_id.id,
'price_unit': -423.0,
'price_subtotal': -423.0,
'price_total': -423.0,
'amount_currency': -423.0,
'credit': 423.0,
},
], {
**self.move_vals,
'partner_id': self.partner_b.id,
'payment_reference': 'turlututu',
'fiscal_position_id': self.fiscal_pos_a.id,
'invoice_payment_term_id': self.pay_terms_b.id,
'amount_untaxed': 1200.0,
'amount_tax': 210.0,
'amount_total': 1410.0,
})
# Remove lines and recreate them to apply the fiscal position.
move_form = Form(self.invoice)
move_form.invoice_line_ids.remove(0)
move_form.invoice_line_ids.remove(0)
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_b
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'account_id': self.product_b.property_account_income_id.id,
'partner_id': self.partner_b.id,
'tax_ids': self.tax_sale_b.ids,
},
{
**self.product_line_vals_2,
'partner_id': self.partner_b.id,
'price_total': 230.0,
'tax_ids': self.tax_sale_b.ids,
},
{
**self.tax_line_vals_1,
'name': self.tax_sale_b.name,
'partner_id': self.partner_b.id,
'tax_line_id': self.tax_sale_b.id,
},
{
**self.term_line_vals_1,
'name': 'turlututu',
'account_id': self.partner_b.property_account_receivable_id.id,
'partner_id': self.partner_b.id,
'price_unit': -966.0,
'price_subtotal': -966.0,
'price_total': -966.0,
'amount_currency': -966.0,
'credit': 966.0,
'date_maturity': fields.Date.from_string('2019-02-28'),
},
{
**self.term_line_vals_1,
'name': 'turlututu',
'account_id': self.partner_b.property_account_receivable_id.id,
'partner_id': self.partner_b.id,
'price_unit': -414.0,
'price_subtotal': -414.0,
'price_total': -414.0,
'amount_currency': -414.0,
'credit': 414.0,
},
], {
**self.move_vals,
'partner_id': self.partner_b.id,
'payment_reference': 'turlututu',
'fiscal_position_id': self.fiscal_pos_a.id,
'invoice_payment_term_id': self.pay_terms_b.id,
'amount_untaxed': 1200.0,
'amount_tax': 180.0,
'amount_total': 1380.0,
})
def test_out_refund_line_onchange_taxes_1(self):
move_form = Form(self.invoice)
with move_form.invoice_line_ids.edit(0) as line_form:
line_form.price_unit = 1200
line_form.tax_ids.add(self.tax_armageddon)
move_form.save()
child_tax_1 = self.tax_armageddon.children_tax_ids[0]
child_tax_2 = self.tax_armageddon.children_tax_ids[1]
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'price_unit': 1200.0,
'price_subtotal': 1000.0,
'price_total': 1470.0,
'tax_ids': (self.tax_sale_a + self.tax_armageddon).ids,
},
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
{
'name': child_tax_1.name,
'product_id': False,
'account_id': self.company_data['default_account_tax_sale'].id,
'partner_id': self.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 80.0,
'price_subtotal': 80.0,
'price_total': 88.0,
'tax_ids': child_tax_2.ids,
'tax_line_id': child_tax_1.id,
'currency_id': self.company_data['currency'].id,
'amount_currency': 80.0,
'debit': 80.0,
'credit': 0.0,
'date_maturity': False,
},
{
'name': child_tax_1.name,
'product_id': False,
'account_id': self.company_data['default_account_revenue'].id,
'partner_id': self.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 120.0,
'price_subtotal': 120.0,
'price_total': 132.0,
'tax_ids': child_tax_2.ids,
'tax_line_id': child_tax_1.id,
'currency_id': self.company_data['currency'].id,
'amount_currency': 120.0,
'debit': 120.0,
'credit': 0.0,
'date_maturity': False,
},
{
'name': child_tax_2.name,
'product_id': False,
'account_id': child_tax_2.cash_basis_transition_account_id.id,
'partner_id': self.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 120.0,
'price_subtotal': 120.0,
'price_total': 120.0,
'tax_ids': [],
'tax_line_id': child_tax_2.id,
'currency_id': self.company_data['currency'].id,
'amount_currency': 120.0,
'debit': 120.0,
'credit': 0.0,
'date_maturity': False,
},
{
**self.term_line_vals_1,
'price_unit': -1730.0,
'price_subtotal': -1730.0,
'price_total': -1730.0,
'amount_currency': -1730.0,
'credit': 1730.0,
},
], {
**self.move_vals,
'amount_untaxed': 1200.0,
'amount_tax': 530.0,
'amount_total': 1730.0,
})
def test_out_refund_line_onchange_cash_rounding_1(self):
move_form = Form(self.invoice)
# Add a cash rounding having 'add_invoice_line'.
move_form.invoice_cash_rounding_id = self.cash_rounding_a
move_form.save()
# The cash rounding does nothing as the total is already rounded.
self.assertInvoiceValues(self.invoice, [
self.product_line_vals_1,
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
self.term_line_vals_1,
], self.move_vals)
move_form = Form(self.invoice)
with move_form.invoice_line_ids.edit(0) as line_form:
line_form.price_unit = 999.99
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
'name': 'add_invoice_line',
'product_id': False,
'account_id': self.cash_rounding_a.loss_account_id.id,
'partner_id': self.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': 0.01,
'price_subtotal': 0.01,
'price_total': 0.01,
'tax_ids': [],
'tax_line_id': False,
'currency_id': self.company_data['currency'].id,
'amount_currency': 0.01,
'debit': 0.01,
'credit': 0.0,
'date_maturity': False,
},
{
**self.product_line_vals_1,
'price_unit': 999.99,
'price_subtotal': 999.99,
'price_total': 1149.99,
'amount_currency': 999.99,
'debit': 999.99,
},
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
self.term_line_vals_1,
], self.move_vals)
move_form = Form(self.invoice)
# Change the cash rounding to one having 'biggest_tax'.
move_form.invoice_cash_rounding_id = self.cash_rounding_b
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'price_unit': 999.99,
'price_subtotal': 999.99,
'price_total': 1149.99,
'amount_currency': 999.99,
'debit': 999.99,
},
self.product_line_vals_2,
self.tax_line_vals_1,
self.tax_line_vals_2,
{
'name': '%s (rounding)' % self.tax_sale_a.name,
'product_id': False,
'account_id': self.company_data['default_account_tax_sale'].id,
'partner_id': self.partner_a.id,
'product_uom_id': False,
'quantity': 1.0,
'discount': 0.0,
'price_unit': -0.04,
'price_subtotal': -0.04,
'price_total': -0.04,
'tax_ids': [],
'tax_line_id': self.tax_sale_a.id,
'currency_id': self.company_data['currency'].id,
'amount_currency': -0.04,
'debit': 0.0,
'credit': 0.04,
'date_maturity': False,
},
{
**self.term_line_vals_1,
'price_unit': -1409.95,
'price_subtotal': -1409.95,
'price_total': -1409.95,
'amount_currency': -1409.95,
'credit': 1409.95,
},
], {
**self.move_vals,
'amount_untaxed': 1199.99,
'amount_tax': 209.96,
'amount_total': 1409.95,
})
def test_out_refund_line_onchange_currency_1(self):
move_form = Form(self.invoice)
move_form.currency_id = self.currency_data['currency']
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 1000.0,
'debit': 500.0,
},
{
**self.product_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 200.0,
'debit': 100.0,
},
{
**self.tax_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 180.0,
'debit': 90.0,
},
{
**self.tax_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.0,
'debit': 15.0,
},
{
**self.term_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -1410.0,
'credit': 705.0,
},
], {
**self.move_vals,
'currency_id': self.currency_data['currency'].id,
})
move_form = Form(self.invoice)
# Change the date to get another rate: 1/3 instead of 1/2.
move_form.date = fields.Date.from_string('2016-01-01')
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 1000.0,
'debit': 333.33,
},
{
**self.product_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 200.0,
'debit': 66.67,
},
{
**self.tax_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 180.0,
'debit': 60.0,
},
{
**self.tax_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.0,
'debit': 10.0,
},
{
**self.term_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -1410.0,
'credit': 470.0,
},
], {
**self.move_vals,
'currency_id': self.currency_data['currency'].id,
'date': fields.Date.from_string('2016-01-01'),
})
move_form = Form(self.invoice)
with move_form.invoice_line_ids.edit(0) as line_form:
# 0.045 * 0.1 = 0.0045. As the foreign currency has a 0.001 rounding,
# the result should be 0.005 after rounding.
line_form.quantity = 0.1
line_form.price_unit = 0.045
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'quantity': 0.1,
'price_unit': 0.05,
'price_subtotal': 0.005,
'price_total': 0.006,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 0.005,
'debit': 0.0,
},
{
**self.product_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 200.0,
'debit': 66.67,
},
{
**self.tax_line_vals_1,
'price_unit': 30.0,
'price_subtotal': 30.001,
'price_total': 30.001,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.001,
'debit': 10.0,
},
{
**self.tax_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.0,
'debit': 10.0,
},
{
**self.term_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'price_unit': -260.01,
'price_subtotal': -260.006,
'price_total': -260.006,
'amount_currency': -260.006,
'credit': 86.67,
},
], {
**self.move_vals,
'currency_id': self.currency_data['currency'].id,
'date': fields.Date.from_string('2016-01-01'),
'amount_untaxed': 200.005,
'amount_tax': 60.001,
'amount_total': 260.006,
})
# Exit the multi-currencies.
move_form = Form(self.invoice)
move_form.currency_id = self.company_data['currency']
move_form.save()
self.assertInvoiceValues(self.invoice, [
{
**self.product_line_vals_1,
'quantity': 0.1,
'price_unit': 0.05,
'price_subtotal': 0.01,
'price_total': 0.01,
'amount_currency': 0.01,
'debit': 0.01,
},
self.product_line_vals_2,
{
**self.tax_line_vals_1,
'price_unit': 30.0,
'price_subtotal': 30.0,
'price_total': 30.0,
'amount_currency': 30.0,
'debit': 30.0,
},
self.tax_line_vals_2,
{
**self.term_line_vals_1,
'price_unit': -260.01,
'price_subtotal': -260.01,
'price_total': -260.01,
'amount_currency': -260.01,
'credit': 260.01,
},
], {
**self.move_vals,
'currency_id': self.company_data['currency'].id,
'date': fields.Date.from_string('2016-01-01'),
'amount_untaxed': 200.01,
'amount_tax': 60.0,
'amount_total': 260.01,
})
def test_out_refund_create_1(self):
# Test creating an account_move with the least information.
move = self.env['account.move'].create({
'move_type': 'out_refund',
'partner_id': self.partner_a.id,
'invoice_date': fields.Date.from_string('2019-01-01'),
'currency_id': self.currency_data['currency'].id,
'invoice_payment_term_id': self.pay_terms_a.id,
'invoice_line_ids': [
Command.create({
'product_id': self.product_line_vals_1['product_id'],
'product_uom_id': self.product_line_vals_1['product_uom_id'],
'price_unit': self.product_line_vals_1['price_unit'],
'tax_ids': [Command.set(self.product_line_vals_1['tax_ids'])],
}),
Command.create({
'product_id': self.product_line_vals_2['product_id'],
'product_uom_id': self.product_line_vals_2['product_uom_id'],
'price_unit': self.product_line_vals_2['price_unit'],
'tax_ids': [Command.set(self.product_line_vals_2['tax_ids'])],
}),
],
})
self.assertInvoiceValues(move, [
{
**self.product_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 1000.0,
'debit': 500.0,
},
{
**self.product_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 200.0,
'debit': 100.0,
},
{
**self.tax_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 180.0,
'debit': 90.0,
},
{
**self.tax_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.0,
'debit': 15.0,
},
{
**self.term_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -1410.0,
'credit': 705.0,
},
], {
**self.move_vals,
'currency_id': self.currency_data['currency'].id,
})
def test_out_refund_write_1(self):
# Test creating an account_move with the least information.
move = self.env['account.move'].create({
'move_type': 'out_refund',
'partner_id': self.partner_a.id,
'invoice_date': fields.Date.from_string('2019-01-01'),
'currency_id': self.currency_data['currency'].id,
'invoice_payment_term_id': self.pay_terms_a.id,
'invoice_line_ids': [
Command.create({
'product_id': self.product_line_vals_1['product_id'],
'product_uom_id': self.product_line_vals_1['product_uom_id'],
'price_unit': self.product_line_vals_1['price_unit'],
'tax_ids': [Command.set(self.product_line_vals_1['tax_ids'])],
}),
],
})
move.write({
'invoice_line_ids': [
Command.create({
'product_id': self.product_line_vals_2['product_id'],
'product_uom_id': self.product_line_vals_2['product_uom_id'],
'price_unit': self.product_line_vals_2['price_unit'],
'tax_ids': [Command.set(self.product_line_vals_2['tax_ids'])],
}),
],
})
self.assertInvoiceValues(move, [
{
**self.product_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 1000.0,
'debit': 500.0,
},
{
**self.product_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 200.0,
'debit': 100.0,
},
{
**self.tax_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 180.0,
'debit': 90.0,
},
{
**self.tax_line_vals_2,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 30.0,
'debit': 15.0,
},
{
**self.term_line_vals_1,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -1410.0,
'credit': 705.0,
},
], {
**self.move_vals,
'currency_id': self.currency_data['currency'].id,
})
|
jeremiahyan/odoo
|
addons/account/tests/test_account_move_out_refund.py
|
Python
|
gpl-3.0
| 34,577
|
from odoo import models, fields
import logging
# from dateutil.relativedelta import relativedelta
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit = "res.partner"
# TODO renombrar campo
arba_alicuot_ids = fields.One2many(
'res.partner.arba_alicuot',
'partner_id',
'Alícuotas PERC-RET',
)
drei = fields.Selection([
('activo', 'Activo'),
('no_activo', 'No Activo'),
],
string='DREI',
)
# TODO agregarlo en mig a v10 ya que fix dbs no es capaz de arreglarlo
# porque da el error antes de empezar a arreglar
# drei_number = fields.Char(
# )
default_regimen_ganancias_id = fields.Many2one(
'afip.tabla_ganancias.alicuotasymontos',
'Regimen Ganancias por Defecto',
)
class ResPartnerArbaAlicuot(models.Model):
# TODO rename model to res.partner.tax or similar
_name = "res.partner.arba_alicuot"
_description = "res.partner.arba_alicuot"
_order = "to_date desc, from_date desc, tag_id, company_id"
partner_id = fields.Many2one(
'res.partner',
required=True,
ondelete='cascade',
)
tag_id = fields.Many2one(
'account.account.tag',
domain=[('applicability', '=', 'taxes')],
required=True,
change_default=True,
)
company_id = fields.Many2one(
'res.company',
required=True,
ondelete='cascade',
default=lambda self: self.env.company,
)
from_date = fields.Date(
)
to_date = fields.Date(
)
numero_comprobante = fields.Char(
)
codigo_hash = fields.Char(
)
alicuota_percepcion = fields.Float(
)
alicuota_retencion = fields.Float(
)
grupo_percepcion = fields.Char(
)
grupo_retencion = fields.Char(
)
withholding_amount_type = fields.Selection([
('untaxed_amount', 'Untaxed Amount'),
('total_amount', 'Total Amount'),
],
'Base para retenciones',
help='Base amount used to get withholding amount',
)
regimen_percepcion = fields.Char(
size=3,
help="Utilizado para la generación del TXT para SIRCAR.\n"
"Tipo de Régimen de Percepción (código correspondiente según "
"tabla definida por la jurisdicción)"
)
regimen_retencion = fields.Char(
size=3,
help="Utilizado para la generación del TXT para SIRCAR.\n"
"Tipo de Régimen de Retención (código correspondiente según "
"tabla definida por la jurisdicción)"
)
api_codigo_articulo_retencion = fields.Selection([
('001', '001: Art.1 - inciso A - (Res. Gral. 15/97 y Modif.)'),
('002', '002: Art.1 - inciso B - (Res. Gral. 15/97 y Modif.)'),
('003', '003: Art.1 - inciso C - (Res. Gral. 15/97 y Modif.)'),
('004', '004: Art.1 - inciso D pto.1 - (Res. Gral. 15/97 y Modif.)'),
('005', '005: Art.1 - inciso D pto.2 - (Res. Gral. 15/97 y Modif.)'),
('006', '006: Art.1 - inciso D pto.3 - (Res. Gral. 15/97 y Modif.)'),
('007', '007: Art.1 - inciso E - (Res. Gral. 15/97 y Modif.)'),
('008', '008: Art.1 - inciso F - (Res. Gral. 15/97 y Modif.)'),
('009', '009: Art.1 - inciso H - (Res. Gral. 15/97 y Modif.)'),
('010', '010: Art.1 - inciso I - (Res. Gral. 15/97 y Modif.)'),
('011', '011: Art.1 - inciso J - (Res. Gral. 15/97 y Modif.)'),
('012', '012: Art.1 - inciso K - (Res. Gral. 15/97 y Modif.)'),
('013', '013: Art.1 - inciso L - (Res. Gral. 15/97 y Modif.)'),
('014', '014: Art.1 - inciso LL pto.1 - (Res. Gral. 15/97 y Modif.)'),
('015', '015: Art.1 - inciso LL pto.2 - (Res. Gral. 15/97 y Modif.)'),
('016', '016: Art.1 - inciso LL pto.3 - (Res. Gral. 15/97 y Modif.)'),
('017', '017: Art.1 - inciso LL pto.4 - (Res. Gral. 15/97 y Modif.)'),
('018', '018: Art.1 - inciso LL pto.5 - (Res. Gral. 15/97 y Modif.)'),
('019', '019: Art.1 - inciso M - (Res. Gral. 15/97 y Modif.)'),
('020', '020: Art.2 - (Res. Gral. 15/97 y Modif.)'),
],
string='Código de Artículo/Inciso por el que retiene',
)
api_codigo_articulo_percepcion = fields.Selection([
('021', '021: Art.10 - inciso A - (Res. Gral. 15/97 y Modif.)'),
('022', '022: Art.10 - inciso B - (Res. Gral. 15/97 y Modif.)'),
('023', '023: Art.10 - inciso D - (Res. Gral. 15/97 y Modif.)'),
('024', '024: Art.10 - inciso E - (Res. Gral. 15/97 y Modif.)'),
('025', '025: Art.10 - inciso F - (Res. Gral. 15/97 y Modif.)'),
('026', '026: Art.10 - inciso G - (Res. Gral. 15/97 y Modif.)'),
('027', '027: Art.10 - inciso H - (Res. Gral. 15/97 y Modif.)'),
('028', '028: Art.10 - inciso I - (Res. Gral. 15/97 y Modif.)'),
('029', '029: Art.10 - inciso J - (Res. Gral. 15/97 y Modif.)'),
('030', '030: Art.11 - (Res. Gral. 15/97 y Modif.)'),
],
string='Código de artículo Inciso por el que percibe',
)
api_articulo_inciso_calculo_selection = [
('001', '001: Art. 5º 1er. párrafo (Res. Gral. 15/97 y Modif.)'),
('002', '002: Art. 5º inciso 1)(Res. Gral. 15/97 y Modif.)'),
('003', '003: Art. 5° inciso 2)(Res. Gral. 15/97 y Modif.)'),
('004', '004: Art. 5º inciso 4)(Res. Gral. 15/97 y Modif.)'),
('005', '005: Art. 5° inciso 5)(Res. Gral. 15/97 y Modif.)'),
('006', '006: Art. 6º inciso a)(Res. Gral. 15/97 y Modif.)'),
('007', '007: Art. 6º inciso b)(Res. Gral. 15/97 y Modif.)'),
('008', '008: Art. 6º inciso c)(Res. Gral. 15/97 y Modif.)'),
('009', '009: Art. 12º)(Res. Gral. 15/97 y Modif.)'),
('010', '010: Art. 6º inciso d)(Res. Gral. 15/97 y Modif.)'),
('011', '011: Art. 5° inciso 6)(Res. Gral. 15/97 y Modif.)'),
('012', '012: Art. 5° inciso 3)(Res. Gral. 15/97 y Modif.)'),
('013', '013: Art. 5° inciso 7)(Res. Gral. 15/97 y Modif.)'),
('014', '014: Art. 5° inciso 8)(Res. Gral. 15/97 y Modif.)'),
]
api_articulo_inciso_calculo_percepcion = fields.Selection(
api_articulo_inciso_calculo_selection,
string='Artículo/Inciso para el cálculo percepción',
)
api_articulo_inciso_calculo_retencion = fields.Selection(
api_articulo_inciso_calculo_selection,
string='Artículo/Inciso para el cálculo retención',
)
|
ingadhoc/odoo-argentina
|
l10n_ar_account_withholding/models/res_partner.py
|
Python
|
agpl-3.0
| 6,436
|
import unittest
from mock import Mock
from opaque_keys.edx.locator import CourseLocator
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.html_module import CourseInfoModule, HtmlDescriptor, HtmlModule
from . import get_test_descriptor_system, get_test_system
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = CourseLocator('org', 'course', 'run')
usage_key = course_key.make_usage_key('html', 'SampleHtml')
return system.construct_xblock_from_class(
HtmlDescriptor,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
)
class HtmlModuleSubstitutionTestCase(unittest.TestCase):
descriptor = Mock()
def test_substitution_works(self):
sample_xml = '''%%USER_ID%%'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), str(module_system.anonymous_student_id))
def test_substitution_without_magic_string(self):
sample_xml = '''
<html>
<p>Hi USER_ID!11!</p>
</html>
'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), sample_xml)
def test_substitution_without_anonymous_student_id(self):
sample_xml = '''%%USER_ID%%'''
field_data = DictFieldData({'data': sample_xml})
module_system = get_test_system()
module_system.anonymous_student_id = None
module = HtmlModule(self.descriptor, module_system, field_data, Mock())
self.assertEqual(module.get_html(), sample_xml)
class HtmlDescriptorIndexingTestCase(unittest.TestCase):
"""
Make sure that HtmlDescriptor can format data for indexing as expected.
"""
def test_index_dictionary_simple_html_module(self):
sample_xml = '''
<html>
<p>Hello World!</p>
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " Hello World! ", "display_name": "Text"},
"content_type": "Text"
})
def test_index_dictionary_cdata_html_module(self):
sample_xml_cdata = '''
<html>
<p>This has CDATA in it.</p>
<![CDATA[This is just a CDATA!]]>
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml_cdata)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " This has CDATA in it. ", "display_name": "Text"},
"content_type": "Text"
})
def test_index_dictionary_multiple_spaces_html_module(self):
sample_xml_tab_spaces = '''
<html>
<p> Text has spaces :) </p>
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml_tab_spaces)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " Text has spaces :) ", "display_name": "Text"},
"content_type": "Text"
})
def test_index_dictionary_html_module_with_comment(self):
sample_xml_comment = '''
<html>
<p>This has HTML comment in it.</p>
<!-- Html Comment -->
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml_comment)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " This has HTML comment in it. ", "display_name": "Text"},
"content_type": "Text"
})
def test_index_dictionary_html_module_with_both_comments_and_cdata(self):
sample_xml_mix_comment_cdata = '''
<html>
<!-- Beginning of the html -->
<p>This has HTML comment in it.<!-- Commenting Content --></p>
<!-- Here comes CDATA -->
<![CDATA[This is just a CDATA!]]>
<p>HTML end.</p>
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml_mix_comment_cdata)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " This has HTML comment in it. HTML end. ", "display_name": "Text"},
"content_type": "Text"
})
def test_index_dictionary_html_module_with_script_and_style_tags(self):
sample_xml_style_script_tags = '''
<html>
<style>p {color: green;}</style>
<!-- Beginning of the html -->
<p>This has HTML comment in it.<!-- Commenting Content --></p>
<!-- Here comes CDATA -->
<![CDATA[This is just a CDATA!]]>
<p>HTML end.</p>
<script>
var message = "Hello world!"
</script>
</html>
'''
descriptor = instantiate_descriptor(data=sample_xml_style_script_tags)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"html_content": " This has HTML comment in it. HTML end. ", "display_name": "Text"},
"content_type": "Text"
})
class CourseInfoModuleTestCase(unittest.TestCase):
"""
Make sure that CourseInfoModule renders updates properly.
"""
def test_updates_render(self):
"""
Tests that a course info module will render its updates, even if they are malformed.
"""
sample_update_data = [
{
"id": i,
"date": data,
"content": "This is a very important update!",
"status": CourseInfoModule.STATUS_VISIBLE,
} for i, data in enumerate(
[
'January 1, 1970',
'Marchtober 45, -1963',
'Welcome!',
'Date means "title", right?'
]
)
]
info_module = CourseInfoModule(
Mock(),
get_test_system(),
DictFieldData({'items': sample_update_data, 'data': ""}),
Mock()
)
# Prior to TNL-4115, an exception would be raised when trying to parse invalid dates in this method
try:
info_module.get_html()
except ValueError:
self.fail("CourseInfoModule could not parse an invalid date!")
def test_updates_order(self):
"""
Tests that a course info module will render its updates in the correct order.
"""
sample_update_data = [
{
"id": 3,
"date": "March 18, 1982",
"content": "This is a very important update that was inserted last with an older date!",
"status": CourseInfoModule.STATUS_VISIBLE,
},
{
"id": 1,
"date": "January 1, 2012",
"content": "This is a very important update that was inserted first!",
"status": CourseInfoModule.STATUS_VISIBLE,
},
{
"id": 2,
"date": "January 1, 2012",
"content": "This is a very important update that was inserted second!",
"status": CourseInfoModule.STATUS_VISIBLE,
}
]
info_module = CourseInfoModule(
Mock(),
Mock(),
DictFieldData({'items': sample_update_data, 'data': ""}),
Mock()
)
# This is the expected context that should be used by the render function
expected_context = {
'visible_updates': [
{
"id": 2,
"date": "January 1, 2012",
"content": "This is a very important update that was inserted second!",
"status": CourseInfoModule.STATUS_VISIBLE,
},
{
"id": 1,
"date": "January 1, 2012",
"content": "This is a very important update that was inserted first!",
"status": CourseInfoModule.STATUS_VISIBLE,
},
{
"id": 3,
"date": "March 18, 1982",
"content": "This is a very important update that was inserted last with an older date!",
"status": CourseInfoModule.STATUS_VISIBLE,
}
],
'hidden_updates': [],
}
template_name = "{0}/course_updates.html".format(info_module.TEMPLATE_DIR)
info_module.get_html()
# Assertion to validate that render function is called with the expected context
info_module.system.render_template.assert_called_once_with(
template_name,
expected_context
)
|
Lektorium-LLC/edx-platform
|
common/lib/xmodule/xmodule/tests/test_html_module.py
|
Python
|
agpl-3.0
| 9,284
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from frappe.desk.reportview import get_match_cond
import frappe
@frappe.whitelist()
def get_items(price_list, sales_or_purchase, item=None):
company = frappe.db.sql("""select company from `tabUser` where name='%s'"""%(frappe.session.user),as_list=1)
condition = ""
order_by = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item=1 and i.company = '%s'"%(company[0][0])
else:
condition = "i.is_purchase_item=1"
if item:
# search serial no
item_code = frappe.db.sql("""select name as serial_no, item_code
from `tabSerial No` where name=%s""", (item), as_dict=1)
if item_code:
item_code[0]["name"] = item_code[0]["item_code"]
return item_code
# search barcode
item_code = frappe.db.sql("""select name, item_code from `tabItem`
where barcode=%s""",
(item), as_dict=1)
if item_code:
item_code[0]["barcode"] = item
return item_code
condition += " and ((CONCAT(i.name, i.item_name) like %(name)s) or (i.variant_of like %(name)s) or (i.item_group like %(name)s))"
order_by = """if(locate(%(_name)s, i.name), locate(%(_name)s, i.name), 99999),
if(locate(%(_name)s, i.item_name), locate(%(_name)s, i.item_name), 99999),
if(locate(%(_name)s, i.variant_of), locate(%(_name)s, i.variant_of), 99999),
if(locate(%(_name)s, i.item_group), locate(%(_name)s, i.item_group), 99999),"""
args["name"] = "%%%s%%" % frappe.db.escape(item)
args["_name"] = item.replace("%", "")
# locate function is used to sort by closest match from the beginning of the value
return frappe.db.sql("""select i.name, i.item_name, i.image,
item_det.price_list_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, price_list_rate, currency from
`tabItem Price` where price_list=%(price_list)s) item_det
ON
(item_det.item_code=i.name or item_det.item_code=i.variant_of)
where
ifnull(i.has_variants, 0) = 0 and
{condition}
order by
{order_by}
i.name """.format(condition=condition, order_by=order_by), args, as_dict=1)
# @frappe.whitelist()
# def get_mobile_no(doctype, txt, searchfield, start, page_len, filters):
# get_cont = frappe.db.sql("""select mobile_no, customer from `tabContact` where customer is not null""",as_list=1)
# return get_cont
@frappe.whitelist()
def get_customer(mob_no):
get_cust = frappe.db.sql("""select customer from `tabContact` where mobile_no='%s'"""%(mob_no),as_list=1)
return get_cust
@frappe.whitelist()
def get_all_employee(doctype, txt, searchfield, start, page_len, filters):
company = frappe.db.sql("""select company from `tabUser` where name = '%s'"""%(frappe.session.user),as_list=1)
employees = frappe.db.sql("""select name from `tabEmployee` where company = '%s'"""%(company[0][0]),as_list=1)
return employees
@frappe.whitelist()
def get_mobile_no(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select mobile_no, customer from `tabContact` where customer is not null
and ({key} like %(txt)s
or mobile_no like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, mobile_no), locate(%(_txt)s, mobile_no), 99999),
if(locate(%(_txt)s, customer), locate(%(_txt)s, customer), 99999),
mobile_no, customer
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
|
gangadharkadam/saloon_erp_install
|
erpnext/accounts/doctype/sales_invoice/pos.py
|
Python
|
agpl-3.0
| 3,588
|
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from base.ddd.utils import business_validator
from program_management.ddd.business_types import *
from program_management.ddd.domain.exception import InvalidTreeVersionToFillFrom
class CheckValidTreeVersionToFillFrom(business_validator.BusinessValidator):
def __init__(self, tree_version_to_fill_from: 'ProgramTreeVersion', tree_version_to_fill: 'ProgramTreeVersion'):
self.tree_version_from = tree_version_to_fill_from
self.tree_version_to = tree_version_to_fill
super().__init__()
def validate(self, *args, **kwargs):
if self.tree_version_to.is_standard and self.__is_tree_from_last_year_tree():
return
if self.tree_version_to.is_specific_official and self.__is_tree_from_last_year_tree():
return
if self.tree_version_to.is_transition and self.__is_tree_from_last_year_tree():
return
if self.tree_version_to.is_transition and self.__is_tree_from_last_year_same_version_tree():
return
if self.tree_version_to.is_transition and self.__is_tree_from_same_year_same_version_tree():
return
raise InvalidTreeVersionToFillFrom(self.tree_version_from)
def __is_tree_from_last_year_tree(self) -> bool:
return self.tree_version_from.academic_year == self.tree_version_to.academic_year.past() and \
self.tree_version_from.program_tree_identity.code == self.tree_version_to.program_tree_identity.code
def __is_tree_from_last_year_same_version_tree(self):
return self.tree_version_from.academic_year == self.tree_version_to.academic_year.past() and \
self.tree_version_from.version_name == self.tree_version_to.version_name and \
not self.tree_version_from.is_transition
def __is_tree_from_same_year_same_version_tree(self):
return self.tree_version_from.academic_year == self.tree_version_to.academic_year and \
self.tree_version_from.version_name == self.tree_version_to.version_name and \
not self.tree_version_from.is_transition
|
uclouvain/OSIS-Louvain
|
program_management/ddd/validators/_fill_check_tree_from.py
|
Python
|
agpl-3.0
| 3,276
|
# This file is part of Fedora Community.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`fedoracommunity.connectors.jsonconnector` - Simple Json Connector
=======================================================================
This Connector works with any url which returns valid simplejson data
.. moduleauthor:: Seth Vidal <skvidal@fedoraproject.org>
"""
import logging
log = logging.getLogger(__name__)
from urllib import urlopen
import simplejson
from fedoracommunity.connectors.api import IConnector, ICall, IQuery
class SimpleJsonConnector(IConnector, ICall, IQuery):
_method_paths = {}
_query_paths = {}
def __init__(self, environ=None, request=None):
super(SimpleJsonConnector, self).__init__(environ, request)
# FIXME - sanity check this url or run it past a whitelist or what not
def call(self, url):
log.info('JsonConnector.call(%s)' % url)
self._url = url
json_cache = self._request.environ['beaker.cache'].get_cache('json')
return json_cache.get_value(key=url,
createfunc=self._get_json_url,
expiretime=1800)
def _get_json_url(self):
# FIXME - LOTS OF ERROR CHECKING PLEASE
# grab the json_url
json_fp = urlopen(self._url)
# decode it into python using simplejson
json_data = simplejson.load(json_fp)
json_fp.close()
# return the object you get from it
return json_data
|
Fale/fedora-packages
|
fedoracommunity/connectors/jsonconnector.py
|
Python
|
agpl-3.0
| 2,157
|
__doc__ = """
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| step | owner | R1 | R2 | R3 | R4 |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 2 | create dir | create dir | create dir | create dir | create dir |
| | share /test | | | | |
| | -> R1 R2 | | | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 3 | | | reshare /test | | |
| | | | -> R3 | | |
| | | | reshare /test/sub | | |
| | | | -> R4 | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 4 | get etags | get etags | get etags | get etags | get etags |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 5 | upload to | | | | |
| | -> /test | | | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 6 | propagation | propagation | propagation | propagation | NOT propagation |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 7 | | | upload to | | |
| | | | -> /test | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 8 | propagation | propagation | propagation | propagation | NOT propagation |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 9 | upload to | | | | |
| | -> /test/sub | | | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 10 | propagation | propagation | propagation | propagation | propagation |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 11 | | upload to | | | |
| | | -> /test/sub | | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 12 | propagation | propagation | propagation | propagation | propagation |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 13 | | | | | upload to /sub |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 14 | propagation | propagation | propagation | propagation | propagation |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 15 | | | unshare | | |
| | | | -> /test/sub | | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
| 16 | NOT propagation | NOT | NOT propagation | NOT | propagation |
| | | propagation | | propagation | |
+-------+-----------------+----------------+-------------------+-------------+-----------------+
"""
from smashbox.utilities import *
import itertools
import os.path
import re
@add_worker
def setup(step):
step(1, 'create test users')
num_users = 5
# Create additional accounts
if config.oc_number_test_users < num_users:
for i in range(config.oc_number_test_users + 1, num_users + 1):
username = "%s%i" % (config.oc_account_name, i)
delete_owncloud_account(username)
create_owncloud_account(username, config.oc_account_password)
login_owncloud_account(username, config.oc_account_password)
check_users(num_users)
@add_worker
def owner(step):
user = '%s%i' % (config.oc_account_name, 1)
step (2, 'Create workdir')
d = make_workdir()
mkdir(os.path.join(d, 'test', 'sub'))
run_ocsync(d, user_num=1)
client = get_oc_api()
client.login(user, config.oc_account_password)
# make sure folder is shared
user2 = '%s%i' % (config.oc_account_name, 2)
share1_data = client.share_file_with_user('/test', user2, perms=31)
fatal_check(share1_data, 'failed sharing a file with %s' % (user2,))
user3 = '%s%i' % (config.oc_account_name, 3)
share1_data = client.share_file_with_user('/test', user3, perms=31)
fatal_check(share1_data, 'failed sharing a file with %s' % (user3,))
step(4, 'get base etags to compare')
root_etag = client.file_info('/').get_etag()
test_etag = client.file_info('/test').get_etag()
step(5, 'Upload to /test')
createfile(os.path.join(d, 'test', 'test2.txt'), '2', count=1000, bs=10)
run_ocsync(d, user_num=1)
step(6, 'verify etag propagation')
root_etag2 = client.file_info('/').get_etag()
error_check(root_etag != root_etag2, 'owner uploads to /test/test2.txt '
'etag for / previous [%s] new [%s]' % (root_etag, root_etag2))
step(8, 'verify etag propagation')
root_etag3 = client.file_info('/').get_etag()
error_check(root_etag2 != root_etag3, 'recipient2 uploads to /test/test3.txt '
'etag for / previous [%s] new [%s]' % (root_etag2, root_etag3))
step(9, 'Upload to /test/sub')
createfile(os.path.join(d, 'test', 'sub', 'test4.txt'), '4', count=1000, bs=10)
run_ocsync(d, user_num=1)
step(10, 'verify etag propagation')
root_etag4 = client.file_info('/').get_etag()
test_etag2 = client.file_info('/test').get_etag()
error_check(root_etag3 != root_etag4, 'owner uploads to /test/sub/test4.txt '
'etag for / previous [%s] new [%s]' % (root_etag3, root_etag4))
error_check(test_etag != test_etag2, 'owner uploads to /test/sub/test4.txt '
'etag for /test previous [%s] new [%s]' % (test_etag, test_etag2))
step(12, 'verify etag propagation')
root_etag5 = client.file_info('/').get_etag()
test_etag3 = client.file_info('/test').get_etag()
error_check(root_etag4 != root_etag5, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for / previous [%s] new [%s]' % (root_etag4, root_etag5))
error_check(test_etag2 != test_etag3, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for /test previous [%s] new [%s]' % (test_etag2, test_etag3))
step(14, 'verify etag propagation')
root_etag6 = client.file_info('/').get_etag()
test_etag4 = client.file_info('/test').get_etag()
error_check(root_etag5 != root_etag6, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for / previous [%s] new [%s]' % (root_etag5, root_etag6))
error_check(test_etag3 != test_etag4, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for /test previous [%s] new [%s]' % (test_etag3, test_etag4))
step(16, 'verify etag is NOT propagated')
root_etag7 = client.file_info('/').get_etag()
test_etag5 = client.file_info('/test').get_etag()
error_check(root_etag6 == root_etag7, 'recipient 2 unshares reshare '
'etag for / previous [%s] new [%s]' % (root_etag6, root_etag7))
error_check(test_etag4 == test_etag5, 'recipient 2 unshares reshare '
'etag for /test previous [%s] new [%s]' % (test_etag4, test_etag5))
@add_worker
def recipient1(step):
user = '%s%i' % (config.oc_account_name, 2)
step (2, 'Create workdir')
d = make_workdir()
run_ocsync(d, user_num=2)
client = get_oc_api()
client.login(user, config.oc_account_password)
step(4, 'get base etags to compare')
root_etag = client.file_info('/').get_etag()
test_etag = client.file_info('/test').get_etag()
step(6, 'verify etag propagation')
root_etag2 = client.file_info('/').get_etag()
error_check(root_etag != root_etag2, 'owner uploads to /test/test2.txt '
'etag for / previous [%s] new [%s]' % (root_etag, root_etag2))
step(8, 'verify etag propagation')
root_etag3 = client.file_info('/').get_etag()
error_check(root_etag2 != root_etag3, 'recipient2 uploads to /test/test3.txt '
'etag for / previous [%s] new [%s]' % (root_etag2, root_etag3))
step(10, 'verify etag propagation')
root_etag4 = client.file_info('/').get_etag()
test_etag2 = client.file_info('/test').get_etag()
error_check(root_etag3 != root_etag4, 'owner uploads to /test/sub/test4.txt '
'etag for / previous [%s] new [%s]' % (root_etag3, root_etag4))
error_check(test_etag != test_etag2, 'owner uploads to /test/sub/test4.txt '
'etag for /test previous [%s] new [%s]' % (test_etag, test_etag2))
step(11, 'Upload to /test/sub')
run_ocsync(d, user_num=2)
createfile(os.path.join(d, 'test', 'sub', 'test5.txt'), '5', count=1000, bs=10)
run_ocsync(d, user_num=2)
step(12, 'verify etag propagation')
root_etag5 = client.file_info('/').get_etag()
test_etag3 = client.file_info('/test').get_etag()
error_check(root_etag4 != root_etag5, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for / previous [%s] new [%s]' % (root_etag4, root_etag5))
error_check(test_etag2 != test_etag3, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for /test previous [%s] new [%s]' % (test_etag2, test_etag3))
step(14, 'verify etag propagation')
root_etag6 = client.file_info('/').get_etag()
test_etag4 = client.file_info('/test').get_etag()
error_check(root_etag5 != root_etag6, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for / previous [%s] new [%s]' % (root_etag5, root_etag6))
error_check(test_etag3 != test_etag4, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for /test previous [%s] new [%s]' % (test_etag3, test_etag4))
step(16, 'verify etag propagation')
root_etag7 = client.file_info('/').get_etag()
test_etag5 = client.file_info('/test').get_etag()
# not affected by the unshare
error_check(root_etag6 == root_etag7, 'recipient 2 unshares reshare '
'etag for / previous [%s] new [%s]' % (root_etag6, root_etag7))
error_check(test_etag4 == test_etag5, 'recipient 2 unshares reshare '
'etag for /test previous [%s] new [%s]' % (test_etag4, test_etag5))
@add_worker
def recipient2(step):
user = '%s%i' % (config.oc_account_name, 3)
step (2, 'Create workdir')
d = make_workdir()
run_ocsync(d, user_num=3)
client = get_oc_api()
client.login(user, config.oc_account_password)
root_etag = client.file_info('/').get_etag()
user4 = '%s%i' % (config.oc_account_name, 4)
user5 = '%s%i' % (config.oc_account_name, 5)
step(3, 'Reshare /test folder with %s and /test/sub with %s' % (user4, user5))
share1_data = client.share_file_with_user('/test', user4, perms=31)
fatal_check(share1_data, 'failed sharing a file with %s' % (user4,))
share2_data = client.share_file_with_user('/test/sub', user5, perms=31)
fatal_check(share2_data, 'failed sharing a file with %s' % (user5,))
step(4, 'get base etags to compare')
root_etag = client.file_info('/').get_etag()
test_etag = client.file_info('/test').get_etag()
step(6, 'verify etag propagation')
root_etag2 = client.file_info('/').get_etag()
error_check(root_etag != root_etag2, 'owner uploads to /test/test2.txt '
'etag for / previous [%s] new [%s]' % (root_etag, root_etag2))
step(7, 'Upload to /test')
run_ocsync(d, user_num=3)
createfile(os.path.join(d, 'test', 'test3.txt'), '3', count=1000, bs=10)
run_ocsync(d, user_num=3)
step(8, 'verify etag propagation')
root_etag3 = client.file_info('/').get_etag()
error_check(root_etag2 != root_etag3, 'recipient2 uploads to /test/test3.txt '
'etag for / previous [%s] new [%s]' % (root_etag2, root_etag3))
step(10, 'verify etag propagation')
root_etag4 = client.file_info('/').get_etag()
test_etag2 = client.file_info('/test').get_etag()
error_check(root_etag3 != root_etag4, 'owner uploads to /test/sub/test4.txt '
'etag for / previous [%s] new [%s]' % (root_etag3, root_etag4))
error_check(test_etag != test_etag2, 'owner uploads to /test/sub/test4.txt '
'etag for /test previous [%s] new [%s]' % (test_etag, test_etag2))
step(12, 'verify etag propagation')
root_etag5 = client.file_info('/').get_etag()
test_etag3 = client.file_info('/test').get_etag()
error_check(root_etag4 != root_etag5, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for / previous [%s] new [%s]' % (root_etag4, root_etag5))
error_check(test_etag2 != test_etag3, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for /test previous [%s] new [%s]' % (test_etag2, test_etag3))
step(14, 'verify etag propagation')
root_etag6 = client.file_info('/').get_etag()
test_etag4 = client.file_info('/test').get_etag()
error_check(root_etag5 != root_etag6, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for / previous [%s] new [%s]' % (root_etag5, root_etag6))
error_check(test_etag3 != test_etag4, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for /test previous [%s] new [%s]' % (test_etag3, test_etag4))
step(15, 'Unshare reshared /test/sub')
client.delete_share(share2_data.share_id)
step(16, 'verify etag propagation')
root_etag7 = client.file_info('/').get_etag()
test_etag5 = client.file_info('/test').get_etag()
error_check(root_etag6 == root_etag7, 'recipient 2 unshares reshare '
'etag for / previous [%s] new [%s]' % (root_etag6, root_etag7))
error_check(test_etag4 == test_etag5, 'recipient 2 unshares reshare '
'etag for /test previous [%s] new [%s]' % (test_etag4, test_etag5))
@add_worker
def recipient3(step):
user = '%s%i' % (config.oc_account_name, 4)
step (2, 'Create workdir')
d = make_workdir()
run_ocsync(d, user_num=4)
client = get_oc_api()
client.login(user, config.oc_account_password)
step(4, 'get base etags to compare')
root_etag = client.file_info('/').get_etag()
test_etag = client.file_info('/test').get_etag()
step(6, 'verify etag propagation')
root_etag2 = client.file_info('/').get_etag()
error_check(root_etag != root_etag2, 'owner uploads to /test/test2.txt '
'etag for / previous [%s] new [%s]' % (root_etag, root_etag2))
step(8, 'verify etag propagation')
root_etag3 = client.file_info('/').get_etag()
error_check(root_etag2 != root_etag3, 'recipient2 uploads to /test/test3.txt '
'etag for / previous [%s] new [%s]' % (root_etag2, root_etag3))
step(10, 'verify etag propagation')
root_etag4 = client.file_info('/').get_etag()
test_etag2 = client.file_info('/test').get_etag()
error_check(root_etag3 != root_etag4, 'owner uploads to /test/sub/test4.txt '
'etag for / previous [%s] new [%s]' % (root_etag3, root_etag4))
error_check(test_etag != test_etag2, 'owner uploads to /test/sub/test4.txt '
'etag for /test previous [%s] new [%s]' % (test_etag, test_etag2))
step(12, 'verify etag propagation')
root_etag5 = client.file_info('/').get_etag()
test_etag3 = client.file_info('/test').get_etag()
error_check(root_etag4 != root_etag5, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for / previous [%s] new [%s]' % (root_etag4, root_etag5))
error_check(test_etag2 != test_etag3, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for /test previous [%s] new [%s]' % (test_etag2, test_etag3))
step(14, 'verify etag propagation')
root_etag6 = client.file_info('/').get_etag()
test_etag4 = client.file_info('/test').get_etag()
error_check(root_etag5 != root_etag6, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for / previous [%s] new [%s]' % (root_etag5, root_etag6))
error_check(test_etag3 != test_etag4, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for /test previous [%s] new [%s]' % (test_etag3, test_etag4))
step(16, 'verify etag propagation')
root_etag7 = client.file_info('/').get_etag()
test_etag5 = client.file_info('/test').get_etag()
error_check(root_etag6 == root_etag7, 'recipient 2 unshares reshare '
'etag for / previous [%s] new [%s]' % (root_etag6, root_etag7))
error_check(test_etag4 == test_etag5, 'recipient 2 unshares reshare '
'etag for /test previous [%s] new [%s]' % (test_etag4, test_etag5))
@add_worker
def recipient4(step):
user = '%s%i' % (config.oc_account_name, 5)
step (2, 'Create workdir')
d = make_workdir()
run_ocsync(d, user_num=5)
client = get_oc_api()
client.login(user, config.oc_account_password)
step(4, 'get base etags to compare')
root_etag = client.file_info('/').get_etag()
sub_etag = client.file_info('/sub').get_etag()
step(6, 'verify etag is NOT propagated')
root_etag2 = client.file_info('/').get_etag()
error_check(root_etag == root_etag2, 'owner uploads to /test/test2.txt '
'etag for / previous [%s] new [%s]' % (root_etag, root_etag2))
step(8, 'verify etag is NOT propagated')
root_etag3 = client.file_info('/').get_etag()
error_check(root_etag2 == root_etag3, 'recipient2 uploads to /test/test3.txt '
'etag for / previous [%s] new [%s]' % (root_etag2, root_etag3))
step(10, 'verify etag propagation')
root_etag4 = client.file_info('/').get_etag()
sub_etag2 = client.file_info('/sub').get_etag()
error_check(root_etag3 != root_etag4, 'owner uploads to /test/sub/test4.txt '
'etag for / previous [%s] new [%s]' % (root_etag3, root_etag4))
error_check(sub_etag != sub_etag2, 'owner uploads to /test/sub/test4.txt '
'etag for /sub previous [%s] new [%s]' % (sub_etag, sub_etag2))
step(12, 'verify etag propagation')
root_etag5 = client.file_info('/').get_etag()
sub_etag3 = client.file_info('/sub').get_etag()
error_check(root_etag4 != root_etag5, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for / previous [%s] new [%s]' % (root_etag4, root_etag5))
error_check(sub_etag2 != sub_etag3, 'recipient 1 uploads to /test/sub/test5.txt '
'etag for /sub previous [%s] new [%s]' % (sub_etag2, sub_etag3))
step(13, 'Upload to /sub')
run_ocsync(d, user_num=5)
createfile(os.path.join(d, 'sub', 'test6.txt'), '6', count=1000, bs=10)
run_ocsync(d, user_num=5)
step(14, 'verify etag propagation')
root_etag6 = client.file_info('/').get_etag()
sub_etag4 = client.file_info('/sub').get_etag()
error_check(root_etag5 != root_etag6, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for / previous [%s] new [%s]' % (root_etag5, root_etag6))
error_check(sub_etag3 != sub_etag4, 'recipient 4 uploads to /sub/test6.txt through reshare '
'etag for /sub previous [%s] new [%s]' % (sub_etag3, sub_etag4))
step(16, 'verify etag propagation')
root_etag7 = client.file_info('/').get_etag()
error_check(root_etag6 != root_etag7, 'recipient 2 unshares reshare '
'etag for / previous [%s] new [%s]' % (root_etag6, root_etag7))
# /sub folder should be deleted at this point, so no checking
|
owncloud/smashbox
|
lib/owncloud/test_sharePropagationInside.py
|
Python
|
agpl-3.0
| 20,720
|
#!/usr/bin/env python
import mclevelbase
import mclevel
import infiniteworld
import sys
import os
from box import BoundingBox, Vector
import numpy
from numpy import zeros, bincount
import logging
import itertools
import traceback
import shlex
import operator
import codecs
from math import floor
try:
import readline # if available, used by raw_input()
except:
pass
class UsageError(RuntimeError):
pass
class BlockMatchError(RuntimeError):
pass
class PlayerNotFound(RuntimeError):
pass
class mce(object):
"""
Block commands:
{commandPrefix}clone <sourceBox> <destPoint> [noair] [nowater]
{commandPrefix}fill <blockType> [ <box> ]
{commandPrefix}replace <blockType> [with] <newBlockType> [ <box> ]
{commandPrefix}export <filename> <sourceBox>
{commandPrefix}import <filename> <destPoint> [noair] [nowater]
{commandPrefix}createChest <point> <item> [ <count> ]
{commandPrefix}analyze
Player commands:
{commandPrefix}player [ <player> [ <point> ] ]
{commandPrefix}spawn [ <point> ]
Entity commands:
{commandPrefix}removeEntities [ <EntityID> ]
{commandPrefix}dumpSigns [ <filename> ]
{commandPrefix}dumpChests [ <filename> ]
Chunk commands:
{commandPrefix}createChunks <box>
{commandPrefix}deleteChunks <box>
{commandPrefix}prune <box>
{commandPrefix}relight [ <box> ]
World commands:
{commandPrefix}create <filename>
{commandPrefix}dimension [ <dim> ]
{commandPrefix}degrief
{commandPrefix}time [ <time> ]
{commandPrefix}worldsize
{commandPrefix}heightmap <filename>
{commandPrefix}randomseed [ <seed> ]
{commandPrefix}gametype [ <player> [ <gametype> ] ]
Editor commands:
{commandPrefix}save
{commandPrefix}reload
{commandPrefix}load <filename> | <world number>
{commandPrefix}execute <filename>
{commandPrefix}quit
Informational:
{commandPrefix}blocks [ <block name> | <block ID> ]
{commandPrefix}help [ <command> ]
**IMPORTANT**
{commandPrefix}box
Type 'box' to learn how to specify points and areas.
"""
random_seed = os.getenv('MCE_RANDOM_SEED', None)
last_played = os.getenv("MCE_LAST_PLAYED", None)
def commandUsage(self, command):
" returns usage info for the named command - just give the docstring for the handler func "
func = getattr(self, "_" + command)
return func.__doc__
commands = [
"clone",
"fill",
"replace",
"export",
"execute",
"import",
"createchest",
"player",
"spawn",
"removeentities",
"dumpsigns",
"dumpchests",
"createchunks",
"deletechunks",
"prune",
"relight",
"create",
"degrief",
"time",
"worldsize",
"heightmap",
"randomseed",
"gametype",
"save",
"load",
"reload",
"dimension",
"repair",
"quit",
"exit",
"help",
"blocks",
"analyze",
"region",
"debug",
"log",
"box",
]
debug = False
needsSave = False
def readInt(self, command):
try:
val = int(command.pop(0))
except ValueError:
raise UsageError("Cannot understand numeric input")
return val
def prettySplit(self, command):
cmdstring = " ".join(command)
lex = shlex.shlex(cmdstring)
lex.whitespace_split = True
lex.whitespace += "(),"
command[:] = list(lex)
def readBox(self, command):
self.prettySplit(command)
sourcePoint = self.readIntPoint(command)
if command[0].lower() == "to":
command.pop(0)
sourcePoint2 = self.readIntPoint(command)
sourceSize = sourcePoint2 - sourcePoint
else:
sourceSize = self.readIntPoint(command, isPoint=False)
if len([p for p in sourceSize if p <= 0]):
raise UsageError("Box size cannot be zero or negative")
box = BoundingBox(sourcePoint, sourceSize)
return box
def readIntPoint(self, command, isPoint=True):
point = self.readPoint(command, isPoint)
point = map(int, map(floor, point))
return Vector(*point)
def readPoint(self, command, isPoint=True):
self.prettySplit(command)
try:
word = command.pop(0)
if isPoint and (word in self.level.players):
x, y, z = self.level.getPlayerPosition(word)
if len(command) and command[0].lower() == "delta":
command.pop(0)
try:
x += int(command.pop(0))
y += int(command.pop(0))
z += int(command.pop(0))
except ValueError:
raise UsageError("Error decoding point input (expected a number).")
return x, y, z
except IndexError:
raise UsageError("Error decoding point input (expected more values).")
try:
try:
x = float(word)
except ValueError:
if isPoint:
raise PlayerNotFound(word)
raise
y = float(command.pop(0))
z = float(command.pop(0))
except ValueError:
raise UsageError("Error decoding point input (expected a number).")
except IndexError:
raise UsageError("Error decoding point input (expected more values).")
return x, y, z
def readBlockInfo(self, command):
keyword = command.pop(0)
matches = self.level.materials.blocksMatching(keyword)
blockInfo = None
if len(matches):
if len(matches) == 1:
blockInfo = matches[0]
# eat up more words that possibly specify a block. stop eating when 0 matching blocks.
while len(command):
newMatches = self.level.materials.blocksMatching(keyword + " " + command[0])
if len(newMatches) == 1:
blockInfo = newMatches[0]
if len(newMatches) > 0:
matches = newMatches
keyword = keyword + " " + command.pop(0)
else:
break
else:
try:
data = 0
if ":" in keyword:
blockID, data = map(int, keyword.split(":"))
else:
blockID = int(keyword)
blockInfo = self.level.materials.blockWithID(blockID, data)
except ValueError:
blockInfo = None
if blockInfo is None:
print "Ambiguous block specifier: ", keyword
if len(matches):
print "Matches: "
for m in matches:
if m == self.level.materials.defaultName:
continue
print "{0:3}:{1:<2} : {2}".format(m.ID, m.blockData, m.name)
else:
print "No blocks matched."
raise BlockMatchError
return blockInfo
def readBlocksToCopy(self, command):
blocksToCopy = range(256)
while len(command):
word = command.pop()
if word == "noair":
blocksToCopy.remove(0)
if word == "nowater":
blocksToCopy.remove(8)
blocksToCopy.remove(9)
return blocksToCopy
def _box(self, command):
"""
Boxes:
Many commands require a <box> as arguments. A box can be specified with
a point and a size:
(12, 5, 15), (5, 5, 5)
or with two points, making sure to put the keyword "to" between them:
(12, 5, 15) to (17, 10, 20)
The commas and parentheses are not important.
You may add them for improved readability.
Points:
Points and sizes are triplets of numbers ordered X Y Z.
X is position north-south, increasing southward.
Y is position up-down, increasing upward.
Z is position east-west, increasing westward.
Players:
A player's name can be used as a point - it will use the
position of the player's head. Use the keyword 'delta' after
the name to specify a point near the player.
Example:
codewarrior delta 0 5 0
This refers to a point 5 blocks above codewarrior's head.
"""
raise UsageError
def _debug(self, command):
self.debug = not self.debug
print "Debug", ("disabled", "enabled")[self.debug]
def _log(self, command):
"""
log [ <number> ]
Get or set the log threshold. 0 logs everything; 50 only logs major errors.
"""
if len(command):
try:
logging.getLogger().level = int(command[0])
except ValueError:
raise UsageError("Cannot understand numeric input.")
else:
print "Log level: {0}".format(logging.getLogger().level)
def _clone(self, command):
"""
clone <sourceBox> <destPoint> [noair] [nowater]
Clone blocks in a cuboid starting at sourcePoint and extending for
sourceSize blocks in each direction. Blocks and entities in the area
are cloned at destPoint.
"""
if len(command) == 0:
self.printUsage("clone")
return
box = self.readBox(command)
destPoint = self.readPoint(command)
destPoint = map(int, map(floor, destPoint))
blocksToCopy = self.readBlocksToCopy(command)
tempSchematic = self.level.extractSchematic(box)
self.level.copyBlocksFrom(tempSchematic, BoundingBox((0, 0, 0), box.origin), destPoint, blocksToCopy)
self.needsSave = True
print "Cloned 0 blocks."
def _fill(self, command):
"""
fill <blockType> [ <box> ]
Fill blocks with blockType in a cuboid starting at point and
extending for size blocks in each direction. Without a
destination, fills the whole world. blockType and may be a
number from 0-255 or a name listed by the 'blocks' command.
"""
if len(command) == 0:
self.printUsage("fill")
return
blockInfo = self.readBlockInfo(command)
if len(command):
box = self.readBox(command)
else:
box = None
print "Filling with {0}".format(blockInfo.name)
self.level.fillBlocks(box, blockInfo)
self.needsSave = True
print "Filled {0} blocks.".format("all" if box is None else box.volume)
def _replace(self, command):
"""
replace <blockType> [with] <newBlockType> [ <box> ]
Replace all blockType blocks with newBlockType in a cuboid
starting at point and extending for size blocks in
each direction. Without a destination, replaces blocks over
the whole world. blockType and newBlockType may be numbers
from 0-255 or names listed by the 'blocks' command.
"""
if len(command) == 0:
self.printUsage("replace")
return
blockInfo = self.readBlockInfo(command)
if command[0].lower() == "with":
command.pop(0)
newBlockInfo = self.readBlockInfo(command)
if len(command):
box = self.readBox(command)
else:
box = None
print "Replacing {0} with {1}".format(blockInfo.name, newBlockInfo.name)
self.level.fillBlocks(box, newBlockInfo, blocksToReplace=[blockInfo])
self.needsSave = True
print "Done."
def _createchest(self, command):
"""
createChest <point> <item> [ <count> ]
Create a chest filled with the specified item.
Stacks are 64 if count is not given.
"""
point = map(lambda x: int(floor(float(x))), self.readPoint(command))
itemID = self.readInt(command)
count = 64
if len(command):
count = self.readInt(command)
chest = mclevel.MCSchematic.chestWithItemID(itemID, count)
self.level.copyBlocksFrom(chest, chest.bounds, point)
self.needsSave = True
def _analyze(self, command):
"""
analyze
Counts all of the block types in every chunk of the world.
"""
blockCounts = zeros((4096,), 'uint64')
sizeOnDisk = 0
print "Analyzing {0} chunks...".format(self.level.chunkCount)
# for input to bincount, create an array of uint16s by
# shifting the data left and adding the blocks
for i, cPos in enumerate(self.level.allChunks, 1):
ch = self.level.getChunk(*cPos)
btypes = numpy.array(ch.Data.ravel(), dtype='uint16')
btypes <<= 8
btypes += ch.Blocks.ravel()
counts = bincount(btypes)
blockCounts[:counts.shape[0]] += counts
if i % 100 == 0:
logging.info("Chunk {0}...".format(i))
for blockID in range(256):
block = self.level.materials.blockWithID(blockID, 0)
if block.hasVariants:
for data in range(16):
i = (data << 8) + blockID
if blockCounts[i]:
idstring = "({id}:{data})".format(id=blockID, data=data)
print "{idstring:9} {name:30}: {count:<10}".format(
idstring=idstring, name=self.level.materials.blockWithID(blockID, data).name, count=blockCounts[i])
else:
count = int(sum(blockCounts[(d << 8) + blockID] for d in range(16)))
if count:
idstring = "({id})".format(id=blockID)
print "{idstring:9} {name:30}: {count:<10}".format(
idstring=idstring, name=self.level.materials.blockWithID(blockID, 0).name, count=count)
self.needsSave = True
def _export(self, command):
"""
export <filename> <sourceBox>
Exports blocks in the specified region to a file in schematic format.
This file can be imported with mce or MCEdit.
"""
if len(command) == 0:
self.printUsage("export")
return
filename = command.pop(0)
box = self.readBox(command)
tempSchematic = self.level.extractSchematic(box)
tempSchematic.saveToFile(filename)
print "Exported {0} blocks.".format(tempSchematic.bounds.volume)
def _import(self, command):
"""
import <filename> <destPoint> [noair] [nowater]
Imports a level or schematic into this world, beginning at destPoint.
Supported formats include
- Alpha single or multiplayer world folder containing level.dat,
- Zipfile containing Alpha world folder,
- Classic single-player .mine,
- Classic multiplayer server_level.dat,
- Indev .mclevel
- Schematic from RedstoneSim, MCEdit, mce
- .inv from INVEdit (appears as a chest)
"""
if len(command) == 0:
self.printUsage("import")
return
filename = command.pop(0)
destPoint = self.readPoint(command)
blocksToCopy = self.readBlocksToCopy(command)
importLevel = mclevel.fromFile(filename)
self.level.copyBlocksFrom(importLevel, importLevel.bounds, destPoint, blocksToCopy, create=True)
self.needsSave = True
print "Imported {0} blocks.".format(importLevel.bounds.volume)
def _player(self, command):
"""
player [ <player> [ <point> ] ]
Move the named player to the specified point.
Without a point, prints the named player's position.
Without a player, prints all players and positions.
In a single-player world, the player is named Player.
"""
if len(command) == 0:
print "Players: "
for player in self.level.players:
print " {0}: {1}".format(player, self.level.getPlayerPosition(player))
return
player = command.pop(0)
if len(command) == 0:
print "Player {0}: {1}".format(player, self.level.getPlayerPosition(player))
return
point = self.readPoint(command)
self.level.setPlayerPosition(point, player)
self.needsSave = True
print "Moved player {0} to {1}".format(player, point)
def _spawn(self, command):
"""
spawn [ <point> ]
Move the world's spawn point.
Without a point, prints the world's spawn point.
"""
if len(command):
point = self.readPoint(command)
point = map(int, map(floor, point))
self.level.setPlayerSpawnPosition(point)
self.needsSave = True
print "Moved spawn point to ", point
else:
print "Spawn point: ", self.level.playerSpawnPosition()
def _dumpsigns(self, command):
"""
dumpSigns [ <filename> ]
Saves the text and location of every sign in the world to a text file.
With no filename, saves signs to <worldname>.signs
Output is newline-delimited. 5 lines per sign. Coordinates are
on the first line, followed by four lines of sign text. For example:
[229, 118, -15]
"To boldy go
where no man
has gone
before."
Coordinates are ordered the same as point inputs:
[North/South, Down/Up, East/West]
"""
if len(command):
filename = command[0]
else:
filename = self.level.displayName + ".signs"
# It appears that Minecraft interprets the sign text as UTF-8,
# so we should decode it as such too.
decodeSignText = codecs.getdecoder('utf-8')
# We happen to encode the output file in UTF-8 too, although
# we could use another UTF encoding. The '-sig' encoding puts
# a signature at the start of the output file that tools such
# as Microsoft Windows Notepad and Emacs understand to mean
# the file has UTF-8 encoding.
outFile = codecs.open(filename, "w", encoding='utf-8-sig')
print "Dumping signs..."
signCount = 0
for i, cPos in enumerate(self.level.allChunks):
try:
chunk = self.level.getChunk(*cPos)
except mclevelbase.ChunkMalformed:
continue
for tileEntity in chunk.TileEntities:
if tileEntity["id"].value == "Sign":
signCount += 1
outFile.write(str(map(lambda x: tileEntity[x].value, "xyz")) + "\n")
for i in range(4):
signText = tileEntity["Text{0}".format(i + 1)].value
outFile.write(decodeSignText(signText)[0] + u"\n")
if i % 100 == 0:
print "Chunk {0}...".format(i)
print "Dumped {0} signs to {1}".format(signCount, filename)
outFile.close()
def _region(self, command):
"""
region [rx rz]
List region files in this world.
"""
level = self.level
assert(isinstance(level, mclevel.MCInfdevOldLevel))
assert level.version
def getFreeSectors(rf):
runs = []
start = None
count = 0
for i, free in enumerate(rf.freeSectors):
if free:
if start is None:
start = i
count = 1
else:
count += 1
else:
if start is None:
pass
else:
runs.append((start, count))
start = None
count = 0
return runs
def printFreeSectors(runs):
for i, (start, count) in enumerate(runs):
if i % 4 == 3:
print ""
print "{start:>6}+{count:<4}".format(**locals()),
print ""
if len(command):
if len(command) > 1:
rx, rz = map(int, command[:2])
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
rf = level.regionFiles.get((rx, rz))
if rf is None:
print "Region {rx},{rz} not found.".format(**locals())
return
print "Region {rx:6}, {rz:6}: {used}/{sectors} sectors".format(used=rf.usedSectors, sectors=rf.sectorCount)
print "Offset Table:"
for cx in range(32):
for cz in range(32):
if cz % 4 == 0:
print ""
print "{0:3}, {1:3}: ".format(cx, cz),
off = rf.getOffset(cx, cz)
sector, length = off >> 8, off & 0xff
print "{sector:>6}+{length:<2} ".format(**locals()),
print ""
runs = getFreeSectors(rf)
if len(runs):
print "Free sectors:",
printFreeSectors(runs)
else:
if command[0] == "free":
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
for (rx, rz), rf in level.regionFiles.iteritems():
runs = getFreeSectors(rf)
if len(runs):
print "R {0:3}, {1:3}:".format(rx, rz),
printFreeSectors(runs)
else:
print "Calling allChunks to preload region files: %d chunks" % len(level.allChunks)
coords = (r for r in level.regionFiles)
for i, (rx, rz) in enumerate(coords):
print "({rx:6}, {rz:6}): {count}, ".format(count=level.regionFiles[rx, rz].chunkCount),
if i % 5 == 4:
print ""
def _repair(self, command):
"""
repair
Attempt to repair inconsistent region files.
MAKE A BACKUP. WILL DELETE YOUR DATA.
Scans for and repairs errors in region files:
Deletes chunks whose sectors overlap with another chunk
Rearranges chunks that are in the wrong slot in the offset table
Deletes completely unreadable chunks
Only usable with region-format saves.
"""
if self.level.version:
self.level.preloadRegions()
for rf in self.level.regionFiles.itervalues():
rf.repair()
def _dumpchests(self, command):
"""
dumpChests [ <filename> ]
Saves the content and location of every chest in the world to a text file.
With no filename, saves signs to <worldname>.chests
Output is delimited by brackets and newlines. A set of coordinates in
brackets begins a chest, followed by a line for each inventory slot.
For example:
[222, 51, 22]
2 String
3 String
3 Iron bar
Coordinates are ordered the same as point inputs:
[North/South, Down/Up, East/West]
"""
from items import items
if len(command):
filename = command[0]
else:
filename = self.level.displayName + ".chests"
outFile = file(filename, "w")
print "Dumping chests..."
chestCount = 0
for i, cPos in enumerate(self.level.allChunks):
try:
chunk = self.level.getChunk(*cPos)
except mclevelbase.ChunkMalformed:
continue
for tileEntity in chunk.TileEntities:
if tileEntity["id"].value == "Chest":
chestCount += 1
outFile.write(str(map(lambda x: tileEntity[x].value, "xyz")) + "\n")
itemsTag = tileEntity["Items"]
if len(itemsTag):
for itemTag in itemsTag:
try:
id = itemTag["id"].value
damage = itemTag["Damage"].value
item = items.findItem(id, damage)
itemname = item.name
except KeyError:
itemname = "Unknown Item {0}".format(itemTag)
except Exception, e:
itemname = repr(e)
outFile.write("{0} {1}\n".format(itemTag["Count"].value, itemname))
else:
outFile.write("Empty Chest\n")
if i % 100 == 0:
print "Chunk {0}...".format(i)
print "Dumped {0} chests to {1}".format(chestCount, filename)
outFile.close()
def _removeentities(self, command):
"""
removeEntities [ [except] [ <EntityID> [ <EntityID> ... ] ] ]
Remove all entities matching one or more entity IDs.
With the except keyword, removes all entities not
matching one or more entity IDs.
Without any IDs, removes all entities in the world,
except for Paintings.
Known Mob Entity IDs:
Mob Monster Creeper Skeleton Spider Giant
Zombie Slime Pig Sheep Cow Chicken
Known Item Entity IDs: Item Arrow Snowball Painting
Known Vehicle Entity IDs: Minecart Boat
Known Dynamic Tile Entity IDs: PrimedTnt FallingSand
"""
ENT_MATCHTYPE_ANY = 0
ENT_MATCHTYPE_EXCEPT = 1
ENT_MATCHTYPE_NONPAINTING = 2
def match(entityID, matchType, matchWords):
if ENT_MATCHTYPE_ANY == matchType:
return entityID.lower() in matchWords
elif ENT_MATCHTYPE_EXCEPT == matchType:
return not (entityID.lower() in matchWords)
else:
# ENT_MATCHTYPE_EXCEPT == matchType
return entityID != "Painting"
removedEntities = {}
match_words = []
if len(command):
if command[0].lower() == "except":
command.pop(0)
print "Removing all entities except ", command
match_type = ENT_MATCHTYPE_EXCEPT
else:
print "Removing {0}...".format(", ".join(command))
match_type = ENT_MATCHTYPE_ANY
match_words = map(lambda x: x.lower(), command)
else:
print "Removing all entities except Painting..."
match_type = ENT_MATCHTYPE_NONPAINTING
for cx, cz in self.level.allChunks:
chunk = self.level.getChunk(cx, cz)
entitiesRemoved = 0
for entity in list(chunk.Entities):
entityID = entity["id"].value
if match(entityID, match_type, match_words):
removedEntities[entityID] = removedEntities.get(entityID, 0) + 1
chunk.Entities.remove(entity)
entitiesRemoved += 1
if entitiesRemoved:
chunk.chunkChanged(False)
if len(removedEntities) == 0:
print "No entities to remove."
else:
print "Removed entities:"
for entityID in sorted(removedEntities.keys()):
print " {0}: {1:6}".format(entityID, removedEntities[entityID])
self.needsSave = True
def _createchunks(self, command):
"""
createChunks <box>
Creates any chunks not present in the specified region.
New chunks are filled with only air. New chunks are written
to disk immediately.
"""
if len(command) == 0:
self.printUsage("createchunks")
return
box = self.readBox(command)
chunksCreated = self.level.createChunksInBox(box)
print "Created {0} chunks." .format(len(chunksCreated))
self.needsSave = True
def _deletechunks(self, command):
"""
deleteChunks <box>
Removes all chunks contained in the specified region.
Chunks are deleted from disk immediately.
"""
if len(command) == 0:
self.printUsage("deletechunks")
return
box = self.readBox(command)
deletedChunks = self.level.deleteChunksInBox(box)
print "Deleted {0} chunks." .format(len(deletedChunks))
def _prune(self, command):
"""
prune <box>
Removes all chunks not contained in the specified region. Useful for enforcing a finite map size.
Chunks are deleted from disk immediately.
"""
if len(command) == 0:
self.printUsage("prune")
return
box = self.readBox(command)
i = 0
for cx, cz in list(self.level.allChunks):
if cx < box.mincx or cx >= box.maxcx or cz < box.mincz or cz >= box.maxcz:
self.level.deleteChunk(cx, cz)
i += 1
print "Pruned {0} chunks." .format(i)
def _relight(self, command):
"""
relight [ <box> ]
Recalculates lights in the region specified. If omitted,
recalculates the entire world.
"""
if len(command):
box = self.readBox(command)
chunks = itertools.product(range(box.mincx, box.maxcx), range(box.mincz, box.maxcz))
else:
chunks = self.level.allChunks
self.level.generateLights(chunks)
print "Relit 0 chunks."
self.needsSave = True
def _create(self, command):
"""
create [ <filename> ]
Create and load a new Minecraft Alpha world. This world will have no
chunks and a random terrain seed. If run from the shell, filename is not
needed because you already specified a filename earlier in the command.
For example:
mce.py MyWorld create
"""
if len(command) < 1:
raise UsageError("Expected a filename")
filename = command[0]
if not os.path.exists(filename):
os.mkdir(filename)
if not os.path.isdir(filename):
raise IOError("{0} already exists".format(filename))
if mclevel.MCInfdevOldLevel.isLevel(filename):
raise IOError("{0} is already a Minecraft Alpha world".format(filename))
level = mclevel.MCInfdevOldLevel(filename, create=True)
self.level = level
def _degrief(self, command):
"""
degrief [ <height> ]
Reverse a few forms of griefing by removing
Adminium, Obsidian, Fire, and Lava wherever
they occur above the specified height.
Without a height, uses height level 32.
Removes natural surface lava.
Also see removeEntities
"""
box = self.level.bounds
box = BoundingBox(box.origin + (0, 32, 0), box.size - (0, 32, 0))
if len(command):
try:
box.miny = int(command[0])
except ValueError:
pass
print "Removing grief matter and surface lava above height {0}...".format(box.miny)
self.level.fillBlocks(box,
self.level.materials.Air,
blocksToReplace=[self.level.materials.Bedrock,
self.level.materials.Obsidian,
self.level.materials.Fire,
self.level.materials.LavaActive,
self.level.materials.Lava,
]
)
self.needsSave = True
def _time(self, command):
"""
time [time of day]
Set or display the time of day. Acceptable values are "morning", "noon",
"evening", "midnight", or a time of day such as 8:02, 12:30 PM, or 16:45.
"""
ticks = self.level.Time
timeOfDay = ticks % 24000
ageInTicks = ticks - timeOfDay
if len(command) == 0:
days = ageInTicks / 24000
hours = timeOfDay / 1000
clockHours = (hours + 6) % 24
ampm = ("AM", "PM")[clockHours > 11]
minutes = (timeOfDay % 1000) / 60
print "It is {0}:{1:02} {2} on Day {3}".format(clockHours % 12 or 12, minutes, ampm, days)
else:
times = {"morning": 6, "noon": 12, "evening": 18, "midnight": 24}
word = command[0]
minutes = 0
if word in times:
hours = times[word]
else:
try:
if ":" in word:
h, m = word.split(":")
hours = int(h)
minutes = int(m)
else:
hours = int(word)
except Exception, e:
raise UsageError(("Cannot interpret time, ", e))
if len(command) > 1:
if command[1].lower() == "pm":
hours += 12
ticks = ageInTicks + hours * 1000 + minutes * 1000 / 60 - 6000
if ticks < 0:
ticks += 18000
ampm = ("AM", "PM")[hours > 11 and hours < 24]
print "Changed time to {0}:{1:02} {2}".format(hours % 12 or 12, minutes, ampm)
self.level.Time = ticks
self.needsSave = True
def _randomseed(self, command):
"""
randomseed [ <seed> ]
Set or display the world's random seed, a 64-bit integer that uniquely
defines the world's terrain.
"""
if len(command):
try:
seed = long(command[0])
except ValueError:
raise UsageError("Expected a long integer.")
self.level.RandomSeed = seed
self.needsSave = True
else:
print "Random Seed: ", self.level.RandomSeed
def _gametype(self, command):
"""
gametype [ <player> [ <gametype> ] ]
Set or display the player's game type, an integer that identifies whether
their game is survival (0) or creative (1). On single-player worlds, the
player is just 'Player'.
"""
if len(command) == 0:
print "Players: "
for player in self.level.players:
print " {0}: {1}".format(player, self.level.getPlayerGameType(player))
return
player = command.pop(0)
if len(command) == 0:
print "Player {0}: {1}".format(player, self.level.getPlayerGameType(player))
return
try:
gametype = int(command[0])
except ValueError:
raise UsageError("Expected an integer.")
self.level.setPlayerGameType(gametype, player)
self.needsSave = True
def _worldsize(self, command):
"""
worldsize
Computes and prints the dimensions of the world. For infinite worlds,
also prints the most negative corner.
"""
bounds = self.level.bounds
if isinstance(self.level, mclevel.MCInfdevOldLevel):
print "\nWorld size: \n {0[0]:7} north to south\n {0[2]:7} east to west\n".format(bounds.size)
print "Smallest and largest points: ({0[0]},{0[2]}), ({1[0]},{1[2]})".format(bounds.origin, bounds.maximum)
else:
print "\nWorld size: \n {0[0]:7} wide\n {0[1]:7} tall\n {0[2]:7} long\n".format(bounds.size)
def _heightmap(self, command):
"""
heightmap <filename>
Takes a png and imports it as the terrain starting at chunk 0,0.
Data is internally converted to greyscale and scaled to the maximum height.
The game will fill the terrain with trees and mineral deposits the next
time you play the level.
Please please please try out a small test image before using a big source.
Using the levels tool to get a good heightmap is an art, not a science.
A smaller map lets you experiment and get it right before having to blow
all night generating the really big map.
Requires the PIL library.
"""
if len(command) == 0:
self.printUsage("heightmap")
return
if not sys.stdin.isatty() or raw_input(
"This will destroy a large portion of the map and may take a long time. Did you really want to do this?"
).lower() in ("yes", "y", "1", "true"):
from PIL import Image
import datetime
filename = command.pop(0)
imgobj = Image.open(filename)
greyimg = imgobj.convert("L") # luminance
del imgobj
width, height = greyimg.size
water_level = 64
xchunks = (height + 15) / 16
zchunks = (width + 15) / 16
start = datetime.datetime.now()
for cx in range(xchunks):
for cz in range(zchunks):
try:
self.level.createChunk(cx, cz)
except:
pass
c = self.level.getChunk(cx, cz)
imgarray = numpy.asarray(greyimg.crop((cz * 16, cx * 16, cz * 16 + 16, cx * 16 + 16)))
imgarray = imgarray / 2 # scale to 0-127
for x in range(16):
for z in range(16):
if z + (cz * 16) < width - 1 and x + (cx * 16) < height - 1:
# world dimension X goes north-south
# first array axis goes up-down
h = imgarray[x, z]
c.Blocks[x, z, h + 1:] = 0 # air
c.Blocks[x, z, h:h + 1] = 2 # grass
c.Blocks[x, z, h - 4:h] = 3 # dirt
c.Blocks[x, z, :h - 4] = 1 # rock
if h < water_level:
c.Blocks[x, z, h + 1:water_level] = 9 # water
if h < water_level + 2:
c.Blocks[x, z, h - 2:h + 1] = 12 # sand if it's near water level
c.Blocks[x, z, 0] = 7 # bedrock
c.chunkChanged()
c.TerrainPopulated = False
# the quick lighting from chunkChanged has already lit this simple terrain completely
c.needsLighting = False
logging.info("%s Just did chunk %d,%d" % (datetime.datetime.now().strftime("[%H:%M:%S]"), cx, cz))
logging.info("Done with mapping!")
self.needsSave = True
stop = datetime.datetime.now()
logging.info("Took %s." % str(stop - start))
spawnz = width / 2
spawnx = height / 2
spawny = greyimg.getpixel((spawnx, spawnz))
logging.info("You probably want to change your spawn point. I suggest {0}".format((spawnx, spawny, spawnz)))
def _execute(self, command):
"""
execute <filename>
Execute all commands in a file and save.
"""
if len(command) == 0:
print "You must give the file with commands to execute"
else:
commandFile = open(command[0], "r")
commandsFromFile = commandFile.readlines()
for commandFromFile in commandsFromFile:
print commandFromFile
self.processCommand(commandFromFile)
self._save("")
def _quit(self, command):
"""
quit [ yes | no ]
Quits the program.
Without 'yes' or 'no', prompts to save before quitting.
In batch mode, an end of file automatically saves the level.
"""
if len(command) == 0 or not (command[0].lower() in ("yes", "no")):
if raw_input("Save before exit? ").lower() in ("yes", "y", "1", "true"):
self._save(command)
raise SystemExit
if len(command) and command[0].lower == "yes":
self._save(command)
raise SystemExit
def _exit(self, command):
self._quit(command)
def _save(self, command):
if self.needsSave:
self.level.generateLights()
self.level.saveInPlace()
self.needsSave = False
def _load(self, command):
"""
load [ <filename> | <world number> ]
Loads another world, discarding all changes to this world.
"""
if len(command) == 0:
self.printUsage("load")
self.loadWorld(command[0])
def _reload(self, command):
self.level = mclevel.fromFile(self.level.filename)
def _dimension(self, command):
"""
dimension [ <dim> ]
Load another dimension, a sub-world of this level. Without options, lists
all of the dimensions found in this world. <dim> can be a number or one of
these keywords:
nether, hell, slip: DIM-1
earth, overworld, parent: parent world
end: DIM1
"""
if len(command):
if command[0].lower() in ("earth", "overworld", "parent"):
if self.level.parentWorld:
self.level = self.level.parentWorld
return
else:
print "You are already on earth."
return
elif command[0].lower() in ("hell", "nether", "slip"):
dimNo = -1
elif command[0].lower() == "end":
dimNo = 1
else:
dimNo = self.readInt(command)
if dimNo in self.level.dimensions:
self.level = self.level.dimensions[dimNo]
return
if self.level.parentWorld:
print u"Parent world: {0} ('dimension parent' to return)".format(self.level.parentWorld.displayName)
if len(self.level.dimensions):
print u"Dimensions in {0}:".format(self.level.displayName)
for k in self.level.dimensions:
print "{0}: {1}".format(k, infiniteworld.MCAlphaDimension.dimensionNames.get(k, "Unknown"))
def _help(self, command):
if len(command):
self.printUsage(command[0])
else:
self.printUsage()
def _blocks(self, command):
"""
blocks [ <block name> | <block ID> ]
Prints block IDs matching the name, or the name matching the ID.
With nothing, prints a list of all blocks.
"""
searchName = None
if len(command):
searchName = " ".join(command)
try:
searchNumber = int(searchName)
except ValueError:
searchNumber = None
matches = self.level.materials.blocksMatching(searchName)
else:
matches = [b for b in self.level.materials.allBlocks if b.ID == searchNumber]
# print "{0:3}: {1}".format(searchNumber, self.level.materials.names[searchNumber])
# return
else:
matches = self.level.materials.allBlocks
print "{id:9} : {name} {aka}".format(id="(ID:data)", name="Block name", aka="[Other names]")
for b in sorted(matches):
idstring = "({ID}:{data})".format(ID=b.ID, data=b.blockData)
aka = b.aka and " [{aka}]".format(aka=b.aka) or ""
print "{idstring:9} : {name} {aka}".format(idstring=idstring, name=b.name, aka=aka)
def printUsage(self, command=""):
if command.lower() in self.commands:
print "Usage: ", self.commandUsage(command.lower())
else:
print self.__doc__.format(commandPrefix=("", "mce.py <world> ")[not self.batchMode])
def printUsageAndQuit(self):
self.printUsage()
raise SystemExit
def loadWorld(self, world):
worldpath = os.path.expanduser(world)
if os.path.exists(worldpath):
self.level = mclevel.fromFile(worldpath)
else:
self.level = mclevel.loadWorld(world)
level = None
batchMode = False
def run(self):
logging.basicConfig(format=u'%(levelname)s:%(message)s')
logging.getLogger().level = logging.INFO
sys.argv.pop(0)
if len(sys.argv):
world = sys.argv.pop(0)
if world.lower() in ("-h", "--help"):
self.printUsageAndQuit()
if len(sys.argv) and sys.argv[0].lower() == "create":
# accept the syntax, "mce world3 create"
self._create([world])
print "Created world {0}".format(world)
sys.exit(0)
else:
self.loadWorld(world)
else:
self.batchMode = True
self.printUsage()
while True:
try:
world = raw_input("Please enter world name or path to world folder: ")
self.loadWorld(world)
except EOFError, e:
print "End of input."
raise SystemExit
except Exception, e:
print "Cannot open {0}: {1}".format(world, e)
else:
break
if len(sys.argv):
# process one command from command line
try:
self.processCommand(" ".join(sys.argv))
except UsageError:
self.printUsageAndQuit()
self._save([])
else:
# process many commands on standard input, maybe interactively
command = [""]
self.batchMode = True
while True:
try:
command = raw_input(u"{0}> ".format(self.level.displayName))
print
self.processCommand(command)
except EOFError, e:
print "End of file. Saving automatically."
self._save([])
raise SystemExit
except Exception, e:
if self.debug:
traceback.print_exc()
print 'Exception during command: {0!r}'.format(e)
print "Use 'debug' to enable tracebacks."
# self.printUsage()
def processCommand(self, command):
command = command.strip()
if len(command) == 0:
return
if command[0] == "#":
return
commandWords = command.split()
keyword = commandWords.pop(0).lower()
if not keyword in self.commands:
matches = filter(lambda x: x.startswith(keyword), self.commands)
if len(matches) == 1:
keyword = matches[0]
elif len(matches):
print "Ambiguous command. Matches: "
for k in matches:
print " ", k
return
else:
raise UsageError("Command {0} not recognized.".format(keyword))
func = getattr(self, "_" + keyword)
try:
func(commandWords)
except PlayerNotFound, e:
print "Cannot find player {0}".format(e.args[0])
self._player([])
except UsageError, e:
print e
if self.debug:
traceback.print_exc()
self.printUsage(keyword)
def main(argv):
profile = os.getenv("MCE_PROFILE", None)
editor = mce()
if profile:
print "Profiling enabled"
import cProfile
cProfile.runctx('editor.run()', locals(), globals(), profile)
else:
editor.run()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/util/pymclevel/mce.py
|
Python
|
agpl-3.0
| 47,733
|
class ProxyQuestion:
def __init__(self, *args, **kwargs):
self._meta.get_field("type").default = self.proxy_name
super().__init__(*args, **kwargs)
class Meta:
proxy = True
|
project-callisto/callisto-core
|
callisto_core/wizard_builder/model_helpers.py
|
Python
|
agpl-3.0
| 205
|
from openquake.commonlib.tests.calculators import CalculatorTestCase
class HelloTestCase(CalculatorTestCase):
def test(self):
out = self.run_calc(__file__, 'hello.ini')
self.assertGot('hello world', out['hello'])
|
raoanirudh/oq-risklib
|
docs/hello_test.py
|
Python
|
agpl-3.0
| 235
|
from main import *
class DateViewResearch(TemplatePage):
cacheName = "DateViewResearch"
def generateContent(self):
s = """<div class="tocbox">
<ul>
<li><a href="/bydate" title="Recent posts">All</a> </li>
<li><a href="/byresearchdate" title="Recent posts in Research">Researchers</a>
</li>
<li><a href="/byartvishisdate" title="Recent posts in Art,Visual,History">Art/Vis/His</a>
</li>
<li><a href="/byteacherdate" title="Recent posts from Teachers">Teachers</a>
</li>
</ul>
</div>
<h2> The latests posts 'Pure, Applied' </h2>
<table class="bydate">
<thead>
<tr>
<th align="left" class="datecolumn">
Date
</th>
<th align="left" class="blogcolumn">
Blog
</th>
<th align="left" class="postcolumn">
Post
</th>
</tr>
</thead>
<tbody>"""
for post in Post.gql("WHERE category IN :1 ORDER BY timestamp_created DESC LIMIT 150", ['pure','applied']):
s = s + """
<tr>
<td valign="bottom" class="datecolumn">
<div>
%(time)s
</div>
</td>
<td valign="bottom" class="blogcolumn">
<div>
<a href="%(homepage)s" title="%(service)s">%(service)s</a>
</div>
</td>
<td valign="bottom" class="postcolumn">
<div>
<a href="%(link)s" title="%(title)s">%(title)s</a>
</div>
</td>
</tr>""" % {'time': post.printShortTime_created(), 'homepage': post.homepage, 'service': html_escape(post.service), 'title': html_escape(post.title), 'link': html_escape(post.link) }
return s + "</tbody></table>"
|
fbreuer/mathblogging
|
dateviewresearch.py
|
Python
|
agpl-3.0
| 1,631
|
# -*- coding: utf-8 -*-
# © 2015 Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import res_partner
|
open-synergy/account-payment
|
account_payment_extension/models/__init__.py
|
Python
|
agpl-3.0
| 167
|
from pymongo import Connection
import gridfs
from gridfs.errors import NoFile
from xmodule.modulestore import Location
from xmodule.modulestore.mongo.base import location_to_query
from xmodule.contentstore.content import XASSET_LOCATION_TAG
import logging
from .content import StaticContent, ContentStore, StaticContentStream
from xmodule.exceptions import NotFoundError
from fs.osfs import OSFS
import os
import json
class MongoContentStore(ContentStore):
def __init__(self, host, db, port=27017, user=None, password=None, bucket='fs', **kwargs):
logging.debug('Using MongoDB for static content serving at host={0} db={1}'.format(host, db))
_db = Connection(host=host, port=port, **kwargs)[db]
if user is not None and password is not None:
_db.authenticate(user, password)
self.fs = gridfs.GridFS(_db, bucket)
self.fs_files = _db[bucket + ".files"] # the underlying collection GridFS uses
def save(self, content):
content_id = content.get_id()
# Seems like with the GridFS we can't update existing ID's we have to do a delete/add pair
self.delete(content_id)
with self.fs.new_file(_id=content_id, filename=content.get_url_path(), content_type=content.content_type,
displayname=content.name, thumbnail_location=content.thumbnail_location,
import_path=content.import_path,
# getattr b/c caching may mean some pickled instances don't have attr
locked=getattr(content, 'locked', False)) as fp:
if hasattr(content.data, '__iter__'):
for chunk in content.data:
fp.write(chunk)
else:
fp.write(content.data)
return content
def delete(self, content_id):
if self.fs.exists({"_id": content_id}):
self.fs.delete(content_id)
def find(self, location, throw_on_not_found=True, as_stream=False):
content_id = StaticContent.get_id_from_location(location)
try:
if as_stream:
fp = self.fs.get(content_id)
return StaticContentStream(
location, fp.displayname, fp.content_type, fp, last_modified_at=fp.uploadDate,
thumbnail_location=getattr(fp, 'thumbnail_location', None),
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
else:
with self.fs.get(content_id) as fp:
return StaticContent(
location, fp.displayname, fp.content_type, fp.read(), last_modified_at=fp.uploadDate,
thumbnail_location=getattr(fp, 'thumbnail_location', None),
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False)
)
except NoFile:
if throw_on_not_found:
raise NotFoundError()
else:
return None
def get_stream(self, location):
content_id = StaticContent.get_id_from_location(location)
try:
handle = self.fs.get(content_id)
except NoFile:
raise NotFoundError()
return handle
def close_stream(self, handle):
try:
handle.close()
except:
pass
def export(self, location, output_directory):
content = self.find(location)
if content.import_path is not None:
output_directory = output_directory + '/' + os.path.dirname(content.import_path)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
disk_fs = OSFS(output_directory)
with disk_fs.open(content.name, 'wb') as asset_file:
asset_file.write(content.data)
def export_all_for_course(self, course_location, output_directory, assets_policy_file):
"""
Export all of this course's assets to the output_directory. Export all of the assets'
attributes to the policy file.
:param course_location: the Location of type 'course'
:param output_directory: the directory under which to put all the asset files
:param assets_policy_file: the filename for the policy file which should be in the same
directory as the other policy files.
"""
policy = {}
assets = self.get_all_content_for_course(course_location)
for asset in assets:
asset_location = Location(asset['_id'])
self.export(asset_location, output_directory)
for attr, value in asset.iteritems():
if attr not in ['_id', 'md5', 'uploadDate', 'length', 'chunkSize']:
policy.setdefault(asset_location.url(), {})[attr] = value
with open(assets_policy_file, 'w') as f:
json.dump(policy, f)
def get_all_content_thumbnails_for_course(self, location):
return self._get_all_content_for_course(location, get_thumbnails=True)
def get_all_content_for_course(self, location):
return self._get_all_content_for_course(location, get_thumbnails=False)
def _get_all_content_for_course(self, location, get_thumbnails=False):
'''
Returns a list of all static assets for a course. The return format is a list of dictionary elements. Example:
[
{u'displayname': u'profile.jpg', u'chunkSize': 262144, u'length': 85374,
u'uploadDate': datetime.datetime(2012, 10, 3, 5, 41, 54, 183000), u'contentType': u'image/jpeg',
u'_id': {u'category': u'asset', u'name': u'profile.jpg', u'course': u'6.002x', u'tag': u'c4x',
u'org': u'MITx', u'revision': None}, u'md5': u'36dc53519d4b735eb6beba51cd686a0e'},
{u'displayname': u'profile.thumbnail.jpg', u'chunkSize': 262144, u'length': 4073,
u'uploadDate': datetime.datetime(2012, 10, 3, 5, 41, 54, 196000), u'contentType': u'image/jpeg',
u'_id': {u'category': u'asset', u'name': u'profile.thumbnail.jpg', u'course': u'6.002x', u'tag': u'c4x',
u'org': u'MITx', u'revision': None}, u'md5': u'ff1532598830e3feac91c2449eaa60d6'},
....
]
'''
course_filter = Location(XASSET_LOCATION_TAG, category="asset" if not get_thumbnails else "thumbnail",
course=location.course, org=location.org)
# 'borrow' the function 'location_to_query' from the Mongo modulestore implementation
items = self.fs_files.find(location_to_query(course_filter))
return list(items)
def set_attr(self, location, attr, value=True):
"""
Add/set the given attr on the asset at the given location. Does not allow overwriting gridFS built in
attrs such as _id, md5, uploadDate, length. Value can be any type which pymongo accepts.
Returns nothing
Raises NotFoundError if no such item exists
Raises AttributeError is attr is one of the build in attrs.
:param location: a c4x asset location
:param attr: which attribute to set
:param value: the value to set it to (any type pymongo accepts such as datetime, number, string)
"""
self.set_attrs(location, {attr: value})
def get_attr(self, location, attr, default=None):
"""
Get the value of attr set on location. If attr is unset, it returns default. Unlike set, this accessor
does allow getting the value of reserved keywords.
:param location: a c4x asset location
"""
return self.get_attrs(location).get(attr, default)
def set_attrs(self, location, attr_dict):
"""
Like set_attr but sets multiple key value pairs.
Returns nothing.
Raises NotFoundError if no such item exists
Raises AttributeError is attr_dict has any attrs which are one of the build in attrs.
:param location: a c4x asset location
"""
for attr in attr_dict.iterkeys():
if attr in ['_id', 'md5', 'uploadDate', 'length']:
raise AttributeError("{} is a protected attribute.".format(attr))
item = self.fs_files.find_one(location_to_query(location))
if item is None:
raise NotFoundError()
self.fs_files.update({"_id": item["_id"]}, {"$set": attr_dict})
def get_attrs(self, location):
"""
Gets all of the attributes associated with the given asset. Note, returns even built in attrs
such as md5 which you cannot resubmit in an update; so, don't call set_attrs with the result of this
but only with the set of attrs you want to explicitly update.
The attrs will be a superset of _id, contentType, chunkSize, filename, uploadDate, & md5
:param location: a c4x asset location
"""
item = self.fs_files.find_one(location_to_query(location))
if item is None:
raise NotFoundError()
return item
|
morpheby/levelup-by
|
common/lib/xmodule/xmodule/contentstore/mongo.py
|
Python
|
agpl-3.0
| 9,173
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from base.models.enums.entity_type import EntityType
from ddd.logic.learning_unit.builder.ucl_entity_identity_builder import UclEntityIdentityBuilder
from ddd.logic.learning_unit.domain.model.responsible_entity import UclEntity
from ddd.logic.learning_unit.dtos import UclEntityDataDTO
from osis_common.ddd.interface import CommandRequest, RootEntityBuilder
class UclEntityBuilder(RootEntityBuilder):
@classmethod
def build_from_command(cls, cmd: 'CommandRequest') -> 'UclEntity':
raise NotImplementedError
@classmethod
def build_from_repository_dto(cls, dto_object: 'UclEntityDataDTO') -> 'UclEntity':
return UclEntity(
entity_id=UclEntityIdentityBuilder.build_from_code(code=dto_object.code),
type=EntityType[dto_object.type],
)
|
uclouvain/OSIS-Louvain
|
ddd/logic/learning_unit/builder/ucl_entity_builder.py
|
Python
|
agpl-3.0
| 2,078
|
"""Auth pipeline definitions."""
from social.pipeline import partial
@partial.partial
def step(*args, **kwargs):
"""Fake pipeline step; just throws loudly for now."""
raise NotImplementedError('%s, %s' % (args, kwargs))
|
jswope00/GAI
|
common/djangoapps/third_party_auth/pipeline.py
|
Python
|
agpl-3.0
| 231
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk import get_resource_service
from superdesk.tests import TestCase
from nose.tools import assert_raises
class SequencesTestCase(TestCase):
def setUp(self):
with self.app.app_context():
self.service = get_resource_service("sequences")
self.min_seq_number = 1
self.max_seq_number = 10
def test_empty_sequence_name_fails(self):
with self.app.app_context():
with assert_raises(KeyError):
self.service.get_next_sequence_number(None)
self.service.get_next_sequence_number("")
def test_next_sequence_number(self):
with self.app.app_context():
sequence_number1 = self.service.get_next_sequence_number("test_sequence_1")
sequence_number2 = self.service.get_next_sequence_number("test_sequence_1")
self.assertEqual(sequence_number1 + 1, sequence_number2)
def test_rotate_sequence_number(self):
with self.app.app_context():
last_sequence_number = None
for i in range(self.max_seq_number):
last_sequence_number = self.service.get_next_sequence_number(
"test_sequence_1", max_seq_number=self.max_seq_number, min_seq_number=self.min_seq_number
)
self.assertEqual(last_sequence_number, self.max_seq_number)
for i in range(0, 2):
last_sequence_number = self.service.get_next_sequence_number(
"test_sequence_1", max_seq_number=self.max_seq_number, min_seq_number=self.min_seq_number
)
self.assertEqual(last_sequence_number, self.min_seq_number + i, "failed for i={}".format(i))
|
petrjasek/superdesk-core
|
tests/sequences_test.py
|
Python
|
agpl-3.0
| 2,014
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# andrewmcgilvray, a.mcgilvray@gmail.com
# Frédéric MOHIER, frederic.mohier@ipmfrance.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Gerhard Lausser, gerhard.lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
# Christophe Simon, geektophe@gmail.com
# Jean Gabes, naparuba@gmail.com
# Olivier Hanesse, olivier.hanesse@gmail.com
# Romain Forlot, rforlot@yahoo.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This module provide Regenerator class used in
several Alignak modules to manage and regenerate objects
"""
import time
# Import all objects we will need
from alignak.objects.host import Host, Hosts
from alignak.objects.hostgroup import Hostgroup, Hostgroups
from alignak.objects.service import Service, Services
from alignak.objects.servicegroup import Servicegroup, Servicegroups
from alignak.objects.contact import Contact, Contacts
from alignak.objects.contactgroup import Contactgroup, Contactgroups
from alignak.objects.notificationway import NotificationWay, NotificationWays
from alignak.objects.timeperiod import Timeperiod, Timeperiods
from alignak.objects.command import Command, Commands
from alignak.objects.config import Config
from alignak.objects.schedulerlink import SchedulerLink, SchedulerLinks
from alignak.objects.reactionnerlink import ReactionnerLink, ReactionnerLinks
from alignak.objects.pollerlink import PollerLink, PollerLinks
from alignak.objects.brokerlink import BrokerLink, BrokerLinks
from alignak.objects.receiverlink import ReceiverLink, ReceiverLinks
from alignak.util import safe_print
from alignak.message import Message
class Regenerator(object):
"""
Class for a Regenerator.
It gets broks, and "regenerate" real objects from them
"""
def __init__(self):
# Our Real datas
self.configs = {}
self.hosts = Hosts([])
self.services = Services([])
self.notificationways = NotificationWays([])
self.contacts = Contacts([])
self.hostgroups = Hostgroups([])
self.servicegroups = Servicegroups([])
self.contactgroups = Contactgroups([])
self.timeperiods = Timeperiods([])
self.commands = Commands([])
self.schedulers = SchedulerLinks([])
self.pollers = PollerLinks([])
self.reactionners = ReactionnerLinks([])
self.brokers = BrokerLinks([])
self.receivers = ReceiverLinks([])
# From now we only look for realms names
self.realms = set()
self.tags = {}
self.services_tags = {}
# And in progress one
self.inp_hosts = {}
self.inp_services = {}
self.inp_hostgroups = {}
self.inp_servicegroups = {}
self.inp_contactgroups = {}
# Do not ask for full data resent too much
self.last_need_data_send = time.time()
# Flag to say if our data came from the scheduler or not
# (so if we skip *initial* broks)
self.in_scheduler_mode = False
# The Queue where to launch message, will be fill from the broker
self.from_q = None
def load_external_queue(self, from_q):
"""
Load an external queue for sending messages
Basically a from_q setter method.
:param from_q: queue to set
:type from_q: multiprocessing.Queue or Queue.Queue
:return: None
"""
self.from_q = from_q
def load_from_scheduler(self, sched):
"""
Load data from a scheduler
:param sched: the scheduler obj
:type sched: alignak.scheduler.Scheduler
:return: None
"""
# Ok, we are in a scheduler, so we will skip some useless
# steps
self.in_scheduler_mode = True
# Go with the data creation/load
conf = sched.conf
# Simulate a drop conf
brok = sched.get_program_status_brok()
brok.prepare()
self.manage_program_status_brok(brok)
# Now we will lie and directly map our objects :)
print "Regenerator::load_from_scheduler"
self.hosts = conf.hosts
self.services = conf.services
self.notificationways = conf.notificationways
self.contacts = conf.contacts
self.hostgroups = conf.hostgroups
self.servicegroups = conf.servicegroups
self.contactgroups = conf.contactgroups
self.timeperiods = conf.timeperiods
self.commands = conf.commands
# We also load the realm
for host in self.hosts:
self.realms.add(host.realm)
break
def want_brok(self, brok):
"""
Function to tell whether we need a specific type of brok or not.
Return always true if not in scheduler mode
:param brok: The brok to check
:type brok: alignak.objects.brok.Brok
:return: A boolean meaning that we this brok
:rtype: bool
"""
if self.in_scheduler_mode:
return brok.type not in ['program_status', 'initial_host_status',
'initial_hostgroup_status', 'initial_service_status',
'initial_servicegroup_status', 'initial_contact_status',
'initial_contactgroup_status', 'initial_timeperiod_status',
'initial_command_status']
# Ok you are wondering why we don't add initial_broks_done?
# It's because the LiveSTatus modules need this part to do internal things.
# But don't worry, the vanilla regenerator will just skip it in all_done_linking :D
# Not in don't want? so want! :)
return True
def manage_brok(self, brok):
"""Look for a manager function for a brok, and call it
:param brok:
:type brok: object
:return:
:rtype:
"""
manage = getattr(self, 'manage_' + brok.type + '_brok', None)
# If we can and want it, got for it :)
if manage and self.want_brok(brok):
return manage(brok)
def update_element(self, item, data):
"""
Update object attibute with value contained in data keys
:param item: A alignak object
:type item: alignak.object.Item
:param data: the dict containing attribute to update
:type data: dict
:return: None
"""
for prop in data:
setattr(item, prop, data[prop])
def all_done_linking(self, inst_id):
"""
Link all data (objects) in a specific instance
:param inst_id: Instance id from a config object
:type inst_id: int
:return: None
"""
# In a scheduler we are already "linked" so we can skip this
if self.in_scheduler_mode:
safe_print("Regenerator: We skip the all_done_linking phase "
"because we are in a scheduler")
return
start = time.time()
safe_print("In ALL Done linking phase for instance", inst_id)
# check if the instance is really defined, so got ALL the
# init phase
if inst_id not in self.configs.keys():
safe_print("Warning: the instance %d is not fully given, bailout" % inst_id)
return
# Try to load the in progress list and make them available for
# finding
try:
inp_hosts = self.inp_hosts[inst_id]
inp_hostgroups = self.inp_hostgroups[inst_id]
inp_contactgroups = self.inp_contactgroups[inst_id]
inp_services = self.inp_services[inst_id]
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception, exp:
print "Warning all done: ", exp
return
# Link HOSTGROUPS with hosts
for hostgroup in inp_hostgroups:
new_members = []
for (i, hname) in hostgroup.members:
host = inp_hosts.find_by_name(hname)
if host:
new_members.append(host)
hostgroup.members = new_members
# Merge HOSTGROUPS with real ones
for inphg in inp_hostgroups:
hgname = inphg.hostgroup_name
hostgroup = self.hostgroups.find_by_name(hgname)
# If hte hostgroup already exist, just add the new
# hosts into it
if hostgroup:
hostgroup.members.extend(inphg.members)
else: # else take the new one
self.hostgroups.add_item(inphg)
# Now link HOSTS with hostgroups, and commands
for host in inp_hosts:
# print "Linking %s groups %s" % (h.get_name(), h.hostgroups)
new_hostgroups = []
for hgname in host.hostgroups.split(','):
hgname = hgname.strip()
hostgroup = self.hostgroups.find_by_name(hgname)
if hostgroup:
new_hostgroups.append(hostgroup)
host.hostgroups = new_hostgroups
# Now link Command() objects
self.linkify_a_command(host, 'check_command')
self.linkify_a_command(host, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(host, 'notification_period')
self.linkify_a_timeperiod_by_name(host, 'check_period')
self.linkify_a_timeperiod_by_name(host, 'maintenance_period')
# And link contacts too
self.linkify_contacts(host, 'contacts')
# Linkify tags
for tag in host.tags:
if tag not in self.tags:
self.tags[tag] = 0
self.tags[tag] += 1
# We can really declare this host OK now
self.hosts.add_item(host)
# Link SERVICEGROUPS with services
for servicegroup in inp_servicegroups:
new_members = []
for (i, sname) in servicegroup.members:
if i not in inp_services:
continue
serv = inp_services[i]
new_members.append(serv)
servicegroup.members = new_members
# Merge SERVICEGROUPS with real ones
for inpsg in inp_servicegroups:
sgname = inpsg.servicegroup_name
servicegroup = self.servicegroups.find_by_name(sgname)
# If the servicegroup already exist, just add the new
# services into it
if servicegroup:
servicegroup.members.extend(inpsg.members)
else: # else take the new one
self.servicegroups.add_item(inpsg)
# Now link SERVICES with hosts, servicesgroups, and commands
for serv in inp_services:
new_servicegroups = []
for sgname in serv.servicegroups.split(','):
sgname = sgname.strip()
servicegroup = self.servicegroups.find_by_name(sgname)
if servicegroup:
new_servicegroups.append(servicegroup)
serv.servicegroups = new_servicegroups
# Now link with host
hname = serv.host_name
serv.host = self.hosts.find_by_name(hname)
if serv.host:
serv.host.services.append(serv)
# Now link Command() objects
self.linkify_a_command(serv, 'check_command')
self.linkify_a_command(serv, 'event_handler')
# Now link timeperiods
self.linkify_a_timeperiod_by_name(serv, 'notification_period')
self.linkify_a_timeperiod_by_name(serv, 'check_period')
self.linkify_a_timeperiod_by_name(serv, 'maintenance_period')
# And link contacts too
self.linkify_contacts(serv, 'contacts')
# Linkify services tags
for tag in serv.tags:
if tag not in self.services_tags:
self.services_tags[tag] = 0
self.services_tags[tag] += 1
# We can really declare this host OK now
self.services.add_item(serv, index=True)
# Add realm of theses hosts. Only the first is useful
for host in inp_hosts:
self.realms.add(host.realm)
break
# Now we can link all impacts/source problem list
# but only for the new ones here of course
for host in inp_hosts:
self.linkify_dict_srv_and_hosts(host, 'impacts')
self.linkify_dict_srv_and_hosts(host, 'source_problems')
self.linkify_host_and_hosts(host, 'parents')
self.linkify_host_and_hosts(host, 'childs')
self.linkify_dict_srv_and_hosts(host, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(host, 'child_dependencies')
# Now services too
for serv in inp_services:
self.linkify_dict_srv_and_hosts(serv, 'impacts')
self.linkify_dict_srv_and_hosts(serv, 'source_problems')
self.linkify_dict_srv_and_hosts(serv, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(serv, 'child_dependencies')
# Linking TIMEPERIOD exclude with real ones now
for timeperiod in self.timeperiods:
new_exclude = []
for ex in timeperiod.exclude:
exname = ex.timeperiod_name
tag = self.timeperiods.find_by_name(exname)
if tag:
new_exclude.append(tag)
timeperiod.exclude = new_exclude
# Link CONTACTGROUPS with contacts
for contactgroup in inp_contactgroups:
new_members = []
for (i, cname) in contactgroup.members:
contact = self.contacts.find_by_name(cname)
if contact:
new_members.append(contact)
contactgroup.members = new_members
# Merge contactgroups with real ones
for inpcg in inp_contactgroups:
cgname = inpcg.contactgroup_name
contactgroup = self.contactgroups.find_by_name(cgname)
# If the contactgroup already exist, just add the new
# contacts into it
if contactgroup:
contactgroup.members.extend(inpcg.members)
contactgroup.members = list(set(contactgroup.members))
else: # else take the new one
self.contactgroups.add_item(inpcg)
safe_print("ALL LINKING TIME" * 10, time.time() - start)
# clean old objects
del self.inp_hosts[inst_id]
del self.inp_hostgroups[inst_id]
del self.inp_contactgroups[inst_id]
del self.inp_services[inst_id]
del self.inp_servicegroups[inst_id]
def linkify_a_command(self, obj, prop):
"""
Replace the command_name by the command object in obj.prop
:param obj: A host or a service
:type obj: alignak.objects.schedulingitem.SchedulingItem
:param prop: an attribute to replace ("check_command" or "event_handler")
:type prop: str
:return: None
"""
commandcall = getattr(obj, prop, None)
# if the command call is void, bypass it
if not commandcall:
setattr(obj, prop, None)
return
cmdname = commandcall.command
command = self.commands.find_by_name(cmdname)
commandcall.command = command
def linkify_commands(self, obj, prop):
"""
Replace the command_name by the command object in obj.prop
:param obj: A notification way object
:type obj: alignak.objects.notificationway.NotificationWay
:param prop: an attribute to replace
('host_notification_commands' or 'service_notification_commands')
:type prop: str
:return: None
"""
commandcalls = getattr(obj, prop, None)
if not commandcalls:
# If do not have a command list, put a void list instead
setattr(obj, prop, [])
return
for commandcall in commandcalls:
cmdname = commandcall.command
command = self.commands.find_by_name(cmdname)
commandcall.command = command
def linkify_a_timeperiod(self, obj, prop):
"""
Replace the timeperiod_name by the timeperiod object in obj.prop
:param obj: A notification way object
:type obj: alignak.objects.notificationway.NotificationWay
:param prop: an attribute to replace
('host_notification_period' or 'service_notification_period')
:type prop: str
:return: None
"""
raw_timeperiod = getattr(obj, prop, None)
if not raw_timeperiod:
setattr(obj, prop, None)
return
tpname = raw_timeperiod.timeperiod_name
timeperiod = self.timeperiods.find_by_name(tpname)
setattr(obj, prop, timeperiod)
def linkify_a_timeperiod_by_name(self, obj, prop):
"""
Replace the timeperiod_name by the timeperiod object in obj.prop
:param obj: A host or a service
:type obj: alignak.objects.SchedulingItem
:param prop: an attribute to replace
('notification_period' or 'check_period')
:type prop: str
:return: None
"""
tpname = getattr(obj, prop, None)
if not tpname:
setattr(obj, prop, None)
return
timeperiod = self.timeperiods.find_by_name(tpname)
setattr(obj, prop, timeperiod)
def linkify_contacts(self, obj, prop):
"""
Replace the contact_name by the contact object in obj.prop
:param obj: A host or a service
:type obj: alignak.objects.SchedulingItem
:param prop: an attribute to replace ('contacts')
:type prop: str
:return: None
"""
contacts = getattr(obj, prop)
if not contacts:
return
new_v = []
for cname in contacts:
contact = self.contacts.find_by_name(cname)
if contact:
new_v.append(contact)
setattr(obj, prop, new_v)
def linkify_dict_srv_and_hosts(self, obj, prop):
"""
Replace the dict with host and service name by the host or service object in obj.prop
:param obj: A host or a service
:type obj: alignak.objects.SchedulingItem
:param prop: an attribute to replace
('impacts', 'source_problems', 'parent_dependencies' or 'child_dependencies'))
:type prop: str
:return: None
"""
problems = getattr(obj, prop)
if not problems:
setattr(obj, prop, [])
new_v = []
# print "Linkify Dict SRV/Host", v, obj.get_name(), prop
for name in problems['services']:
elts = name.split('/')
hname = elts[0]
sdesc = elts[1]
serv = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if serv:
new_v.append(serv)
for hname in problems['hosts']:
host = self.hosts.find_by_name(hname)
if host:
new_v.append(host)
setattr(obj, prop, new_v)
def linkify_host_and_hosts(self, obj, prop):
"""
Replace the host_name by the host object in obj.prop
:param obj: A host or a service
:type obj: alignak.objects.SchedulingItem
:param prop: an attribute to replace
(''parents' 'childs')
:type prop: str
:return: None
"""
hosts = getattr(obj, prop)
if not hosts:
setattr(obj, prop, [])
new_v = []
for hname in hosts:
host = self.hosts.find_by_name(hname)
if host:
new_v.append(host)
setattr(obj, prop, new_v)
###############
# Brok management part
###############
def before_after_hook(self, brok, obj):
"""
This can be used by derived classes to compare the data in the brok
with the object which will be updated by these data. For example,
it is possible to find out in this method whether the state of a
host or service has changed.
"""
pass
#######
# INITIAL PART
#######
def manage_program_status_brok(self, brok):
"""
Manage program_status brok : Reset objects for the given config id
:param brok: Brok containing new config
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
c_id = data['instance_id']
safe_print("Regenerator: Creating config:", c_id)
# We get a real Conf object ,adn put our data
conf = Config()
self.update_element(conf, data)
# Clean all in_progress things.
# And in progress one
self.inp_hosts[c_id] = Hosts([])
self.inp_services[c_id] = Services([])
self.inp_hostgroups[c_id] = Hostgroups([])
self.inp_servicegroups[c_id] = Servicegroups([])
self.inp_contactgroups[c_id] = Contactgroups([])
# And we save it
self.configs[c_id] = conf
# Clean the old "hard" objects
# We should clean all previously added hosts and services
safe_print("Clean hosts/service of", c_id)
to_del_h = [h for h in self.hosts if h.instance_id == c_id]
to_del_srv = [s for s in self.services if s.instance_id == c_id]
safe_print("Cleaning host:%d srv:%d" % (len(to_del_h), len(to_del_srv)))
# Clean hosts from hosts and hostgroups
for host in to_del_h:
safe_print("Deleting", host.get_name())
del self.hosts[host._id]
# Now clean all hostgroups too
for hostgroup in self.hostgroups:
safe_print("Cleaning hostgroup %s:%d" % (hostgroup.get_name(), len(hostgroup.members)))
# Exclude from members the hosts with this inst_id
hostgroup.members = [host for host in hostgroup.members if host.instance_id != c_id]
safe_print("Len after", len(hostgroup.members))
for serv in to_del_srv:
safe_print("Deleting", serv.get_full_name())
del self.services[serv._id]
# Now clean service groups
for servicegroup in self.servicegroups:
servicegroup.members = [s for s in servicegroup.members if s.instance_id != c_id]
def manage_initial_host_status_brok(self, brok):
"""
Manage initial_host_status brok : Update host object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
hname = data['host_name']
inst_id = data['instance_id']
# Try to get the inp progress Hosts
try:
inp_hosts = self.inp_hosts[inst_id]
except Exception, exp: # not good. we will cry in theprogram update
print "Not good!", exp
return
# safe_print("Creating a host: %s in instance %d" % (hname, inst_id))
host = Host({})
self.update_element(host, data)
# We need to rebuild Downtime and Comment relationship
for dtc in host.downtimes + host.comments:
dtc.ref = host
# Ok, put in in the in progress hosts
inp_hosts[host._id] = host
def manage_initial_hostgroup_status_brok(self, brok):
"""
Manage initial_hostgroup_status brok : Update hostgroup object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
hgname = data['hostgroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Hostgroups
try:
inp_hostgroups = self.inp_hostgroups[inst_id]
except Exception, exp: # not good. we will cry in theprogram update
print "Not good!", exp
return
safe_print("Creating a hostgroup: %s in instance %d" % (hgname, inst_id))
# With void members
hostgroup = Hostgroup([])
# populate data
self.update_element(hostgroup, data)
# We will link hosts into hostgroups later
# so now only save it
inp_hostgroups[hostgroup._id] = hostgroup
def manage_initial_service_status_brok(self, brok):
"""
Manage initial_service_status brok : Update service object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
hname = data['host_name']
sdesc = data['service_description']
inst_id = data['instance_id']
# Try to get the inp progress Hosts
try:
inp_services = self.inp_services[inst_id]
except Exception, exp: # not good. we will cry in theprogram update
print "Not good!", exp
return
# safe_print("Creating a service: %s/%s in instance %d" % (hname, sdesc, inst_id))
serv = Service({})
self.update_element(serv, data)
# We need to rebuild Downtime and Comment relationship
for dtc in serv.downtimes + serv.comments:
dtc.ref = serv
# Ok, put in in the in progress hosts
inp_services[serv._id] = serv
def manage_initial_servicegroup_status_brok(self, brok):
"""
Manage initial_servicegroup_status brok : Update servicegroup object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
sgname = data['servicegroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Hostgroups
try:
inp_servicegroups = self.inp_servicegroups[inst_id]
except Exception, exp: # not good. we will cry in theprogram update
print "Not good!", exp
return
safe_print("Creating a servicegroup: %s in instance %d" % (sgname, inst_id))
# With void members
servicegroup = Servicegroup([])
# populate data
self.update_element(servicegroup, data)
# We will link hosts into hostgroups later
# so now only save it
inp_servicegroups[servicegroup._id] = servicegroup
def manage_initial_contact_status_brok(self, brok):
"""
Manage initial_contact_status brok : Update contact object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
cname = data['contact_name']
safe_print("Contact with data", data)
contact = self.contacts.find_by_name(cname)
if contact:
self.update_element(contact, data)
else:
safe_print("Creating Contact:", cname)
contact = Contact({})
self.update_element(contact, data)
self.contacts.add_item(contact)
# Delete some useless contact values
del contact.host_notification_commands
del contact.service_notification_commands
del contact.host_notification_period
del contact.service_notification_period
# Now manage notification ways too
# Same than for contacts. We create or
# update
nws = contact.notificationways
safe_print("Got notif ways", nws)
new_notifways = []
for cnw in nws:
nwname = cnw.notificationway_name
notifway = self.notificationways.find_by_name(nwname)
if not notifway:
safe_print("Creating notif way", nwname)
notifway = NotificationWay([])
self.notificationways.add_item(notifway)
# Now update it
for prop in NotificationWay.properties:
if hasattr(cnw, prop):
setattr(notifway, prop, getattr(cnw, prop))
new_notifways.append(notifway)
# Linking the notification way
# With commands
self.linkify_commands(notifway, 'host_notification_commands')
self.linkify_commands(notifway, 'service_notification_commands')
# Now link timeperiods
self.linkify_a_timeperiod(notifway, 'host_notification_period')
self.linkify_a_timeperiod(notifway, 'service_notification_period')
contact.notificationways = new_notifways
def manage_initial_contactgroup_status_brok(self, brok):
"""
Manage initial_contactgroup_status brok : Update contactgroup object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
cgname = data['contactgroup_name']
inst_id = data['instance_id']
# Try to get the inp progress Contactgroups
try:
inp_contactgroups = self.inp_contactgroups[inst_id]
except Exception, exp: # not good. we will cry in theprogram update
print "Not good!", exp
return
safe_print("Creating an contactgroup: %s in instance %d" % (cgname, inst_id))
# With void members
contactgroup = Contactgroup([])
# populate data
self.update_element(contactgroup, data)
# We will link contacts into contactgroups later
# so now only save it
inp_contactgroups[contactgroup._id] = contactgroup
def manage_initial_timeperiod_status_brok(self, brok):
"""
Manage initial_timeperiod_status brok : Update timeperiod object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
# print "Creating timeperiod", data
tpname = data['timeperiod_name']
timeperiod = self.timeperiods.find_by_name(tpname)
if timeperiod:
# print "Already existing timeperiod", tpname
self.update_element(timeperiod, data)
else:
# print "Creating Timeperiod:", tpname
timeperiod = Timeperiod({})
self.update_element(timeperiod, data)
self.timeperiods.add_item(timeperiod)
def manage_initial_command_status_brok(self, brok):
"""
Manage initial_command_status brok : Update command object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
cname = data['command_name']
command = self.commands.find_by_name(cname)
if command:
# print "Already existing command", cname, "updating it"
self.update_element(command, data)
else:
# print "Creating a new command", cname
command = Command({})
self.update_element(command, data)
self.commands.add_item(command)
def manage_initial_scheduler_status_brok(self, brok):
"""
Manage initial_scheduler_status brok : Update scheduler object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
scheduler_name = data['scheduler_name']
print "Creating Scheduler:", scheduler_name, data
sched = SchedulerLink({})
print "Created a new scheduler", sched
self.update_element(sched, data)
print "Updated scheduler"
# print "CMD:", c
self.schedulers[scheduler_name] = sched
print "scheduler added"
def manage_initial_poller_status_brok(self, brok):
"""
Manage initial_poller_status brok : Update poller object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
poller_name = data['poller_name']
print "Creating Poller:", poller_name, data
poller = PollerLink({})
print "Created a new poller", poller
self.update_element(poller, data)
print "Updated poller"
# print "CMD:", c
self.pollers[poller_name] = poller
print "poller added"
def manage_initial_reactionner_status_brok(self, brok):
"""
Manage initial_reactionner_status brok : Update reactionner object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
reactionner_name = data['reactionner_name']
print "Creating Reactionner:", reactionner_name, data
reac = ReactionnerLink({})
print "Created a new reactionner", reac
self.update_element(reac, data)
print "Updated reactionner"
# print "CMD:", c
self.reactionners[reactionner_name] = reac
print "reactionner added"
def manage_initial_broker_status_brok(self, brok):
"""
Manage initial_broker_status brok : Update broker object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
broker_name = data['broker_name']
print "Creating Broker:", broker_name, data
broker = BrokerLink({})
print "Created a new broker", broker
self.update_element(broker, data)
print "Updated broker"
# print "CMD:", c
self.brokers[broker_name] = broker
print "broker added"
def manage_initial_receiver_status_brok(self, brok):
"""
Manage initial_receiver_status brok : Update receiver object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
receiver_name = data['receiver_name']
print "Creating Receiver:", receiver_name, data
receiver = ReceiverLink({})
print "Created a new receiver", receiver
self.update_element(receiver, data)
print "Updated receiver"
# print "CMD:", c
self.receivers[receiver_name] = receiver
print "receiver added"
def manage_initial_broks_done_brok(self, brok):
"""
Manage initial_broks_done brok : Call all_done_linking with the instance_id in the brok
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
inst_id = brok.data['instance_id']
print "Finish the configuration of instance", inst_id
self.all_done_linking(inst_id)
#################
# Status Update part
#################
def manage_update_program_status_brok(self, brok):
"""
Manage update_program_status brok : Update config object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
c_id = data['instance_id']
# If we got an update about an unknown instance, cry and ask for a full
# version!
if c_id not in self.configs.keys():
# Do not ask data too quickly, very dangerous
# one a minute
if time.time() - self.last_need_data_send > 60 and self.from_q is not None:
print "I ask the broker for instance id data:", c_id
msg = Message(_id=0, _type='NeedData', data={'full_instance_id': c_id})
self.from_q.put(msg)
self.last_need_data_send = time.time()
return
# Ok, good conf, we can update it
conf = self.configs[c_id]
self.update_element(conf, data)
def manage_update_host_status_brok(self, brok):
"""
Manage update_host_status brok : Update host object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['check_command', 'hostgroups',
'contacts', 'notification_period', 'contact_groups',
'check_period', 'event_handler',
'maintenance_period', 'realm', 'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = brok.data['topology_change']
if not toplogy_change:
other_to_clean = ['childs', 'parents', 'child_dependencies', 'parent_dependencies']
clean_prop.extend(other_to_clean)
data = brok.data
for prop in clean_prop:
del data[prop]
hname = data['host_name']
host = self.hosts.find_by_name(hname)
if host:
self.before_after_hook(brok, host)
self.update_element(host, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(host, 'impacts')
self.linkify_dict_srv_and_hosts(host, 'source_problems')
# If the topology change, update it
if toplogy_change:
print "Topology change for", host.get_name(), host.parent_dependencies
self.linkify_host_and_hosts(host, 'parents')
self.linkify_host_and_hosts(host, 'childs')
self.linkify_dict_srv_and_hosts(host, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(host, 'child_dependencies')
# Relink downtimes and comments
for dtc in host.downtimes + host.comments:
dtc.ref = host
def manage_update_service_status_brok(self, brok):
"""
Manage update_service_status brok : Update service object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
# There are some properties that should not change and are already linked
# so just remove them
clean_prop = ['check_command', 'servicegroups',
'contacts', 'notification_period', 'contact_groups',
'check_period', 'event_handler',
'maintenance_period', 'customs', 'escalations']
# some are only use when a topology change happened
toplogy_change = brok.data['topology_change']
if not toplogy_change:
other_to_clean = ['child_dependencies', 'parent_dependencies']
clean_prop.extend(other_to_clean)
data = brok.data
for prop in clean_prop:
del data[prop]
hname = data['host_name']
sdesc = data['service_description']
serv = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if serv:
self.before_after_hook(brok, serv)
self.update_element(serv, data)
# We can have some change in our impacts and source problems.
self.linkify_dict_srv_and_hosts(serv, 'impacts')
self.linkify_dict_srv_and_hosts(serv, 'source_problems')
# If the topology change, update it
if toplogy_change:
self.linkify_dict_srv_and_hosts(serv, 'parent_dependencies')
self.linkify_dict_srv_and_hosts(serv, 'child_dependencies')
# Relink downtimes and comments with the service
for dtc in serv.downtimes + serv.comments:
dtc.ref = serv
def manage_update_broker_status_brok(self, brok):
"""
Manage update_broker_status brok : Update broker object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
broker_name = data['broker_name']
try:
broker = self.brokers[broker_name]
self.update_element(broker, data)
except Exception:
pass
def manage_update_receiver_status_brok(self, brok):
"""
Manage update_receiver_status brok : Update receiver object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
receiver_name = data['receiver_name']
try:
receiver = self.receivers[receiver_name]
self.update_element(receiver, data)
except Exception:
pass
def manage_update_reactionner_status_brok(self, brok):
"""
Manage update_reactionner_status brok : Update reactionner object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
reactionner_name = data['reactionner_name']
try:
reactionner = self.reactionners[reactionner_name]
self.update_element(reactionner, data)
except Exception:
pass
def manage_update_poller_status_brok(self, brok):
"""
Manage update_poller_status brok : Update poller object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
poller_name = data['poller_name']
try:
poller = self.pollers[poller_name]
self.update_element(poller, data)
except Exception:
pass
def manage_update_scheduler_status_brok(self, brok):
"""
Manage update_scheduler_status brok : Update scheduler object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
scheduler_name = data['scheduler_name']
try:
scheduler = self.schedulers[scheduler_name]
self.update_element(scheduler, data)
# print "S:", s
except Exception:
pass
#################
# Check result and schedule part
#################
def manage_host_check_result_brok(self, brok):
"""
Manage host_check_result brok : Update host object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
hname = data['host_name']
host = self.hosts.find_by_name(hname)
if host:
self.before_after_hook(brok, host)
self.update_element(host, data)
def manage_host_next_schedule_brok(self, brok):
"""
Manage initial_timeperiod_status brok : Same as manage_host_check_result_brok
:return: None
"""
self.manage_host_check_result_brok(brok)
def manage_service_check_result_brok(self, brok):
"""
Manage service_check_result brok : Update service object
:param brok: Brok containing new data
:type brok: alignak.objects.brok.Brok
:return: None
"""
data = brok.data
hname = data['host_name']
sdesc = data['service_description']
serv = self.services.find_srv_by_name_and_hostname(hname, sdesc)
if serv:
self.before_after_hook(brok, serv)
self.update_element(serv, data)
def manage_service_next_schedule_brok(self, brok):
"""
Manage service_next_schedule brok : Same as manage_service_check_result_brok
A service check update have just arrived, we UPDATE data info with this
:return: None
"""
self.manage_service_check_result_brok(brok)
|
gst/alignak
|
alignak/misc/regenerator.py
|
Python
|
agpl-3.0
| 45,353
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.safestring import mark_safe
from django_jinja import library
from shoop.admin.template_helpers import shoop_admin as shoop_admin_template_helpers
from shoop.admin.utils.bs3_renderers import AdminFieldRenderer
from bootstrap3.renderers import FormRenderer
class Bootstrap3Namespace(object):
def field(self, field, **kwargs):
if not field:
return ""
return mark_safe(AdminFieldRenderer(field, **kwargs).render())
def form(self, form, **kwargs):
return mark_safe(FormRenderer(form, **kwargs).render())
library.global_function(name="shoop_admin", fn=shoop_admin_template_helpers)
library.global_function(name="bs3", fn=Bootstrap3Namespace())
|
janusnic/shoop
|
shoop/admin/templatetags/shoop_admin.py
|
Python
|
agpl-3.0
| 950
|