code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Author: Alexander Bokovoy <abokovoy@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import time
from ipapython import ipautil, dogtag
from ipapython.platform import base, redhat, systemd
from ipapython.ipa_log_manager import root_logger
from ipalib import api
# All what we allow exporting directly from this module
# Everything else is made available through these symbols when they are
# directly imported into ipapython.services:
# authconfig -- class reference for platform-specific implementation of
# authconfig(8)
# service -- class reference for platform-specific implementation of a
# PlatformService class
# knownservices -- factory instance to access named services IPA cares about,
# names are ipapython.services.wellknownservices
# backup_and_replace_hostname -- platform-specific way to set hostname and
# make it persistent over reboots
# restore_network_configuration -- platform-specific way of restoring network
# configuration (e.g. static hostname)
# restore_context -- platform-sepcific way to restore security context, if
# applicable
# check_selinux_status -- platform-specific way to see if SELinux is enabled
# and restorecon is installed.
__all__ = ['authconfig', 'service', 'knownservices',
'backup_and_replace_hostname', 'restore_context', 'check_selinux_status',
'restore_network_configuration', 'timedate_services']
# Just copy a referential list of timedate services
timedate_services = list(base.timedate_services)
# For beginning just remap names to add .service
# As more services will migrate to systemd, unit names will deviate and
# mapping will be kept in this dictionary
system_units = dict(map(lambda x: (x, "%s.service" % (x)), base.wellknownservices))
system_units['rpcgssd'] = 'nfs-secure.service'
system_units['rpcidmapd'] = 'nfs-idmap.service'
# Rewrite dirsrv and pki-tomcatd services as they support instances via separate
# service generator. To make this working, one needs to have both foo@.servic
# and foo.target -- the latter is used when request should be coming for
# all instances (like stop). systemd, unfortunately, does not allow one
# to request action for all service instances at once if only foo@.service
# unit is available. To add more, if any of those services need to be
# started/stopped automagically, one needs to manually create symlinks in
# /etc/systemd/system/foo.target.wants/ (look into systemd.py's enable()
# code).
system_units['dirsrv'] = 'dirsrv@.service'
# Our directory server instance for PKI is dirsrv@PKI-IPA.service
system_units['pkids'] = 'dirsrv@PKI-IPA.service'
# Old style PKI instance
system_units['pki-cad'] = 'pki-cad@pki-ca.service'
system_units['pki_cad'] = system_units['pki-cad']
# Our PKI instance is pki-tomcatd@pki-tomcat.service
system_units['pki-tomcatd'] = 'pki-tomcatd@pki-tomcat.service'
system_units['pki_tomcatd'] = system_units['pki-tomcatd']
class Fedora16Service(systemd.SystemdService):
def __init__(self, service_name):
systemd_name = service_name
if service_name in system_units:
systemd_name = system_units[service_name]
else:
if len(service_name.split('.')) == 1:
# if service_name does not have a dot, it is not foo.service
# and not a foo.target. Thus, not correct service name for
# systemd, default to foo.service style then
systemd_name = "%s.service" % (service_name)
super(Fedora16Service, self).__init__(service_name, systemd_name)
# Special handling of directory server service
#
# We need to explicitly enable instances to install proper symlinks as
# dirsrv.target.wants/ dependencies. Standard systemd service class does it
# on enable() method call. Unfortunately, ipa-server-install does not do
# explicit dirsrv.enable() because the service startup is handled by ipactl.
#
# If we wouldn't do this, our instances will not be started as systemd would
# not have any clue about instances (PKI-IPA and the domain we serve) at all.
# Thus, hook into dirsrv.restart().
class Fedora16DirectoryService(Fedora16Service):
def enable(self, instance_name=""):
super(Fedora16DirectoryService, self).enable(instance_name)
dirsrv_systemd = "/etc/sysconfig/dirsrv.systemd"
if os.path.exists(dirsrv_systemd):
# We need to enable LimitNOFILE=8192 in the dirsrv@.service
# Since 389-ds-base-1.2.10-0.8.a7 the configuration of the
# service parameters is performed via
# /etc/sysconfig/dirsrv.systemd file which is imported by systemd
# into dirsrv@.service unit
replacevars = {'LimitNOFILE':'8192'}
ipautil.inifile_replace_variables(dirsrv_systemd, 'service', replacevars=replacevars)
restore_context(dirsrv_systemd)
ipautil.run(["/bin/systemctl", "--system", "daemon-reload"],raiseonerr=False)
def restart(self, instance_name="", capture_output=True, wait=True):
if len(instance_name) > 0:
elements = self.systemd_name.split("@")
srv_etc = os.path.join(self.SYSTEMD_ETC_PATH, self.systemd_name)
srv_tgt = os.path.join(self.SYSTEMD_ETC_PATH, self.SYSTEMD_SRV_TARGET % (elements[0]))
srv_lnk = os.path.join(srv_tgt, self.service_instance(instance_name))
if not os.path.exists(srv_etc):
self.enable(instance_name)
elif not os.path.samefile(srv_etc, srv_lnk):
os.unlink(srv_lnk)
os.symlink(srv_etc, srv_lnk)
super(Fedora16DirectoryService, self).restart(instance_name, capture_output=capture_output, wait=wait)
# Enforce restart of IPA services when we do enable it
# This gets around the fact that after ipa-server-install systemd thinks
# ipa.service is not yet started but all services were actually started
# already.
class Fedora16IPAService(Fedora16Service):
def enable(self, instance_name=""):
super(Fedora16IPAService, self).enable(instance_name)
self.restart(instance_name)
class Fedora16SSHService(Fedora16Service):
def get_config_dir(self, instance_name=""):
return '/etc/ssh'
class Fedora16CAService(Fedora16Service):
def __wait_until_running(self):
# We must not wait for the httpd proxy if httpd is not set up yet.
# Unfortunately, knownservices.httpd.is_installed() can return
# false positives, so check for existence of our configuration file.
# TODO: Use a cleaner solution
if not os.path.exists('/etc/httpd/conf.d/ipa.conf'):
root_logger.debug(
'The httpd proxy is not installed, skipping wait for CA')
return
if dogtag.install_constants.DOGTAG_VERSION < 10:
# The server status information isn't available on DT 9
root_logger.debug('Using Dogtag 9, skipping wait for CA')
return
root_logger.debug('Waiting until the CA is running')
timeout = api.env.startup_timeout
op_timeout = time.time() + timeout
while time.time() < op_timeout:
status = dogtag.ca_status()
root_logger.debug('The CA status is: %s' % status)
if status == 'running':
break
root_logger.debug('Waiting for CA to start...')
time.sleep(1)
else:
raise RuntimeError('CA did not start in %ss' % timeout)
def start(self, instance_name="", capture_output=True, wait=True):
super(Fedora16CAService, self).start(
instance_name, capture_output=capture_output, wait=wait)
if wait:
self.__wait_until_running()
def restart(self, instance_name="", capture_output=True, wait=True):
super(Fedora16CAService, self).restart(
instance_name, capture_output=capture_output, wait=wait)
if wait:
self.__wait_until_running()
# Redirect directory server service through special sub-class due to its
# special handling of instances
def f16_service(name):
if name == 'dirsrv':
return Fedora16DirectoryService(name)
if name == 'ipa':
return Fedora16IPAService(name)
if name == 'sshd':
return Fedora16SSHService(name)
if name in ('pki-cad', 'pki_cad', 'pki-tomcatd', 'pki_tomcatd'):
return Fedora16CAService(name)
return Fedora16Service(name)
class Fedora16Services(base.KnownServices):
def __init__(self):
services = dict()
for s in base.wellknownservices:
services[s] = f16_service(s)
# Call base class constructor. This will lock services to read-only
super(Fedora16Services, self).__init__(services)
def restore_context(filepath, restorecon='/usr/sbin/restorecon'):
return redhat.restore_context(filepath, restorecon)
def check_selinux_status(restorecon='/usr/sbin/restorecon'):
return redhat.check_selinux_status(restorecon)
authconfig = redhat.authconfig
service = f16_service
knownservices = Fedora16Services()
backup_and_replace_hostname = redhat.backup_and_replace_hostname
restore_network_configuration = redhat.restore_network_configuration
|
hatchetation/freeipa
|
ipapython/platform/fedora16.py
|
Python
|
gpl-3.0
| 9,991
|
from factory.django import DjangoModelFactory
from booking.models import Building, Place, Booking, BookTime
from faker import Factory as FakerFactory
from factory import *
import factory
from factory.fuzzy import FuzzyChoice, FuzzyDate, FuzzyInteger
from user.factories import UserFactory, UserProfileFactory
from datetime import date, datetime, time
from random import randrange, randint
from django.core.management import call_command
fake = FakerFactory.create()
class BuildingFactory(DjangoModelFactory):
class Meta:
model = Building
django_get_or_create = ('name',)
name = factory.Sequence(lambda x: 'Building %s' % x)
class PlaceFactory(DjangoModelFactory):
class Meta:
model = Place
django_get_or_create = ('name', 'capacity', 'is_laboratory',
'building')
name = factory.Sequence(lambda x: 'I%s' % x)
capacity = factory.Sequence(lambda x: '%s' % x)
is_laboratory = False
building = factory.SubFactory(BuildingFactory)
class BookTimeFactory(DjangoModelFactory):
class Meta:
model = BookTime
django_get_or_create = ('start_hour', 'end_hour', 'date_booking')
start_hour = time(hour=randrange(0, 22, 2), minute=randrange(0, 50, 10))
date_booking = date.today()
@factory.lazy_attribute
def end_hour(self):
hour = randrange(self.start_hour.hour, 22, 2)
minute = randrange(0, 50, 10)
return time(hour=hour, minute=minute)
class BookingFactory(DjangoModelFactory):
class Meta:
model = Booking
django_get_or_create = ('user', 'place', 'name', 'start_date',
'end_date')
place = factory.SubFactory(PlaceFactory)
name = factory.Sequence(lambda x: 'Testando %s' % x)
start_date = datetime.now()
end_date = fake.date_time_this_year(before_now=False, after_now=True)
@factory.lazy_attribute
def user(self):
userprofile = UserProfileFactory.create()
return userprofile.user
@factory.post_generation
def times(self, create, extracted, **kwargs):
if create:
num_created = 0
start_date = self.start_date
while start_date.date() < self.end_date.date() and \
start_date.date() != self.end_date.date():
start_date = fake.date_time_between_dates(start_date,
self.end_date)
book = BookTimeFactory(date_booking=start_date)
book.save()
self.time.add(book)
|
fga-gpp-mds/2016.2-SAS_FGA
|
sas/booking/factories.py
|
Python
|
gpl-3.0
| 2,595
|
# coding=utf-8
"""
Cadasta **Cadasta Project Update Dialog.**
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
from cadasta.gui.tools.wizard.step_project_update01 import (
StepProjectUpdate01
)
from cadasta.gui.tools.wizard.step_project_update02 import (
StepProjectUpdate02
)
from cadasta.gui.tools.wizard.step_project_update03 import (
StepProjectUpdate03
)
from cadasta.utilities.i18n import tr
from cadasta.gui.tools.wizard.wizard_dialog import WizardDialog
__copyright__ = "Copyright 2016, Cadasta"
__license__ = "GPL version 3"
__email__ = "info@kartoza.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('CadastaQGISPlugin')
class ProjectUpdateWizard(WizardDialog):
"""Dialog implementation class for Project Update Wizard"""
step_project_update01 = None
step_project_update02 = None
step_project_update03 = None
def __init__(self, parent=None, iface=None):
"""Constructor for the dialog.
.. note:: In QtDesigner the advanced editor's predefined keywords
list should be shown in english always, so when adding entries to
cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
the :safe_qgis:`translatable` property.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param iface: QGIS QGisAppInterface instance.
:type iface: QGisAppInterface
"""
super(ProjectUpdateWizard, self).__init__(parent, iface)
self.project = None
self.parties = None
self.locations = None
self.layer = None
self.set_subtitle(
tr('Cadasta project update wizard')
)
def first_step(self):
"""Returns the first step of wizard.
:return: First step of wizard.
:rtype: WizardStep
"""
return self.step_project_update01
def last_step(self):
"""Returns the last step of wizard.
:return: Last step of wizard.
:rtype: WizardStep
"""
return self.step_project_update03
def populate_stacked_widget(self):
"""Append widgets to stacked widget."""
self.step_project_update01 = StepProjectUpdate01(self)
self.step_project_update02 = StepProjectUpdate02(self)
self.step_project_update03 = StepProjectUpdate03(self)
self.stackedWidget.addWidget(self.step_project_update01)
self.stackedWidget.addWidget(self.step_project_update02)
self.stackedWidget.addWidget(self.step_project_update03)
def prepare_the_next_step(self, new_step):
"""Prepare the next tab.
:param new_step: New tab to be prepared.
:type new_step: WizardStep
"""
if new_step == self.step_project_update02:
self.project = \
self.step_project_update01.selected_project()
|
Cadasta/cadasta-qgis-plugin
|
cadasta/gui/tools/wizard/project_update_wizard.py
|
Python
|
gpl-3.0
| 3,103
|
def solution(x):
"""
:param int:
:return: print step
"""
if x == 0:
print 0
return
step = 1
length = 0
while length < x:
length += step
if length == x:
print step
return
step += 1
step -= 1
length = length - step
step -= 1
ret1 = step + 2*(x-length)
step += 1
length += step
ret2 = step + 2*(length-x)
print min(ret1,ret2)
while 1:
s = raw_input()
if s != '':
if int(s) >= 0:
solution(int(s))
else:
solution(-int(s))
else:
break
|
zymtech/leetcode
|
interview/leshi/2.py
|
Python
|
gpl-3.0
| 617
|
#
# Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import functools
import tornado.web
from tornado.options import options
from ..domain import NotLoggedInError, User
def authenticated_user(method):
"""
Decorate methods with this to require that the user be logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.get_current_user()
if not user or not user.active() or not self.application.storage.authenticate_user(user.id):
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
self.redirect(url)
return
raise tornado.web.HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
def authenticated_admin(method):
"""
Decorate methods with this to require that user is admin.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.get_current_user()
if not user or not self.application.storage.authenticate_admin(user.id):
raise tornado.web.HTTPError(403)
elif not user.is_admin():
raise tornado.web.HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class QuarterUserEncoder(json.JSONEncoder):
"""
JSON encoder for quarterapp's User object
"""
def default(self, obj):
if isinstance(obj, User):
return {"id": obj.id, "username": obj.username, "password": obj.password, "last_login": obj.last_login, "type": obj.type, "state": obj.state}
class QuarterUserDecoder(json.JSONDecoder):
"""
JSON decoder for quarterapp's User object
"""
def decode(self, user_string):
user_json = json.loads(user_string)
user = User(id=user_json["id"], username=user_json["username"], password=user_json["password"],
last_login=user_json["last_login"], type=user_json["type"], state=user_json["state"])
return user
class BaseHandler(tornado.web.RequestHandler):
"""
All handlers in quarterapp should be derived from this handler. Contains utility
functions regarding logging in and reading options.
"""
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if not user_json:
return None
return QuarterUserDecoder().decode(user_json)
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", QuarterUserEncoder().encode(user))
else:
self.clear_cookie("user")
def logged_in(self):
"""
Check if the user of the current requests is logged in or not.
@return True if logged in, else False
"""
user = self.get_secure_cookie("user")
if user:
return True
else:
return False
def enabled(self, setting):
"""
Check if the given setting is enabled
Args:
setting - The setting to check
Returns:
True if setting is enabled, else False
"""
return self.application.quarter_settings.get_value(setting) == "1"
class AuthenticatedHandler(BaseHandler):
"""
Base class for any handler that needs user to be authenticated
"""
def user(self):
"""
Get the current user as a User object, or raises a NotLoggedInError
"""
user = self.get_current_user()
if not user:
raise NotLoggedInError("Unauthorized")
return user
class NoCacheHandler(tornado.web.RequestHandler):
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-cache, must-revalidate')
self.set_header('Expires', '0')
class Http404Handler(BaseHandler):
def get(self):
self.set_status(404)
self.render(u"../resources/templates/404.html",
path=self.request.path,
options=options)
|
OlofFredriksson/quarterapp
|
quarterapp/handlers/base.py
|
Python
|
gpl-3.0
| 4,657
|
"""
Copyright 2008, 2009, 2015 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
class Element(object):
def __init__(self, parent=None):
self._parent = parent
self._error_messages = list()
##################################################
# Element Validation API
##################################################
def validate(self):
"""
Validate this element and call validate on all children.
Call this base method before adding error messages in the subclass.
"""
del self._error_messages[:]
for child in self.get_children():
child.validate()
def is_valid(self):
"""
Is this element valid?
Returns:
true when the element is enabled and has no error messages or is bypassed
"""
return (not self.get_error_messages() or not self.get_enabled()) or self.get_bypassed()
def add_error_message(self, msg):
"""
Add an error message to the list of errors.
Args:
msg: the error message string
"""
self._error_messages.append(msg)
def get_error_messages(self):
"""
Get the list of error messages from this element and all of its children.
Do not include the error messages from disabled or bypassed children.
Cleverly indent the children error messages for printing purposes.
Returns:
a list of error message strings
"""
error_messages = list(self._error_messages) # Make a copy
for child in filter(lambda c: c.get_enabled() and not c.get_bypassed(), self.get_children()):
for msg in child.get_error_messages():
error_messages.append("{}:\n\t{}".format(child, msg.replace("\n", "\n\t")))
return error_messages
def rewrite(self):
"""
Rewrite this element and call rewrite on all children.
Call this base method before rewriting the element.
"""
for child in self.get_children():
child.rewrite()
def get_enabled(self):
return True
def get_bypassed(self):
return False
##############################################
# Tree-like API
##############################################
def get_parent(self):
return self._parent
def get_children(self):
return list()
##############################################
# Type testing
##############################################
is_platform = False
is_flow_graph = False
is_block = False
is_dummy_block = False
is_connection = False
is_port = False
is_param = False
|
glennlive/gnuradio-wg-grc
|
grc/core/Element.py
|
Python
|
gpl-3.0
| 3,410
|
"""
mediatum - a multimedia content repository
Copyright (C) 2007 Arne Seifert <seiferta@in.tum.de>
Copyright (C) 2007 Matthias Kramm <kramm@in.tum.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from core.translation import translate, lang
def makeList(req, own_ruleset_assocs, inherited_ruleset_assocs, special_ruleset, special_rule_assocs,
rulesetnamelist, private_ruleset_names, rule_type=''):
already_shown_left = {} # ruleset names in left handside lists will not be shown on the right side
val_left = []
val_right = []
# inherited rulesets
inherited_ruleset_names = [r.ruleset_name for r in inherited_ruleset_assocs]
for rulesetname in inherited_ruleset_names:
if rulesetname in private_ruleset_names:
val_left.append(
"""<optgroup label="%s"></optgroup>""" % (translate("edit_acl_special_rule", lang(req))))
else:
val_left.append("""<optgroup label="%s"></optgroup>""" % rulesetname)
already_shown_left[rulesetname] = 1
# node level rulesets (not inherited)
own_ruleset_names = [r.ruleset_name for r in own_ruleset_assocs]
for rulesetname in own_ruleset_names:
if rulesetname in private_ruleset_names:
entry_text = translate("edit_acl_special_rule", lang(req))
val_left.append(
"""<option value="__special_rule__">%s</optgroup>""" % (entry_text, ))
else:
val_left.append("""<option value="%s">%s</option>""" % (rulesetname, rulesetname))
already_shown_left[rulesetname] = 1
for rulesetname in rulesetnamelist:
if rulesetname not in already_shown_left:
val_right.append("""<option value="%s">%s</option>""" % (rulesetname, rulesetname))
res = {"name": rule_type, "val_left": "".join(val_left), "val_right": "".join(val_right), "type": rule_type}
return res
|
mediatum/mediatum
|
web/common/acl_editor_web.py
|
Python
|
gpl-3.0
| 2,490
|
#! /usr/bin/env python
from pySecDec.loop_integral import loop_package
import pySecDec as psd
# Example used to demonstrate symmetry finder in Stephen Jones ACAT Proceedings 2017
li = psd.loop_integral.LoopIntegralFromGraph(
internal_lines = [ [0,[1,4]], [0,[1,5]], [0,[2,3]], [0,[2,7]], [0,[3,8]], [0,[4,6]], [0,[5,6]], [0,[5,7]], [0,[7,8]], [0,[6,8]] ],
external_lines = [['p1',1],['p2',2],['p3',3],['p4',4]],
powerlist=[1,1,1,1,1,1,1,1,1,1],
replacement_rules = [
('p1*p1', 0),
('p2*p2', 0),
('p3*p3', 0),
('p4*p4', 0),
('p1*p2', 's/2'),
('p2*p3', 't/2'),
('p1*p3', '-s/2-t/2')
]
)
Mandelstam_symbols = ['s', 't']
mass_symbols = []
loop_package(
name = 'box3L',
loop_integral = li,
real_parameters = Mandelstam_symbols + mass_symbols,
requested_order = 0,
decomposition_method = 'iterative',
contour_deformation = False,
use_Pak = True,
use_dreadnaut = False
)
|
mppmu/secdec
|
nodist_examples/box3L/box3L.py
|
Python
|
gpl-3.0
| 1,060
|
from meowth.core.data_manager import schema
def setup(bot):
team_table = bot.dbi.table('teams')
# color_table = bot.dbi.table('colors')
team_table.new_columns = [
schema.IDColumn('team_id', primary_key=True),
schema.IDColumn('color_id', unique=True,
# foreign_key=colors['color_id']
),
schema.StringColumn('identifier', unique=True),
schema.StringColumn('emoji', unique=True)
]
team_names_table = bot.dbi.table('team_names')
languages = bot.dbi.table('languages')
team_names_table.new_columns = [
schema.IDColumn('team_id', primary_key=True),
schema.IDColumn('language_id', primary_key=True,
foreign_key=languages['language_id']),
schema.StringColumn('team_name')
]
team_table.initial_data = [
{
"team_id": 1,
"color_id": 2,
"identifier": "mystic",
"emoji": ':mystic:'
},
{
"team_id": 2,
"color_id": 10,
"identifier": "instinct",
"emoji": ':instinct:'
},
{
"team_id": 3,
"color_id": 8,
"identifier": "valor",
"emoji": ':valor:'
}
]
team_names_table.initial_data = [
{
"team_id": 1,
"language_id": 9,
"team_name": "mystic"
},
{
"team_id": 1,
"language_id": 12,
"team_name": "mystic",
},
{
"team_id": 1,
"language_id": 1,
"team_name": "ミスティック",
},
{
"team_id": 1,
"language_id": 2,
"team_name": "misutikku",
},
{
"team_id": 1,
"language_id": 5,
"team_name": "sagesse",
},
{
"team_id": 1,
"language_id": 6,
"team_name": "weisheit",
},
{
"team_id": 1,
"language_id": 7,
"team_name": "sabiduría",
},
{
"team_id": 1,
"language_id": 8,
"team_name": "saggezza",
},
{
"team_id": 2,
"language_id": 9,
"team_name": "instinct",
},
{
"team_id": 2,
"language_id": 12,
"team_name": "instinct",
},
{
"team_id": 2,
"language_id": 1,
"team_name": "インスティンクト",
},
{
"team_id": 2,
"language_id": 2,
"team_name": "insutinkuto",
},
{
"team_id": 2,
"language_id": 5,
"team_name": "intuition",
},
{
"team_id": 2,
"language_id": 6,
"team_name": "intuition",
},
{
"team_id": 2,
"language_id": 7,
"team_name": "instinto",
},
{
"team_id": 2,
"language_id": 8,
"team_name": "istinto",
},
{
"team_id": 3,
"language_id": 9,
"team_name": "valor",
},
{
"team_id": 3,
"language_id": 12,
"team_name": "valour",
},
{
"team_id": 3,
"language_id": 1,
"team_name": "ヴァーラー",
},
{
"team_id": 3,
"language_id": 2,
"team_name": "ba-ra",
},
{
"team_id": 3,
"language_id": 5,
"team_name": "bravoure",
},
{
"team_id": 3,
"language_id": 6,
"team_name": "wagemut",
},
{
"team_id": 3,
"language_id": 7,
"team_name": "valor",
},
{
"team_id": 3,
"language_id": 8,
"team_name": "coraggio",
}
]
return [
team_table,
team_names_table,
]
|
FoglyOgly/Meowth
|
meowth/exts/users/tables.py
|
Python
|
gpl-3.0
| 4,133
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""
coco.config
~~~~~~~~~~~~
the configuration related objects.
copy from flask
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import types
import errno
import json
import socket
import yaml
from werkzeug.utils import import_string
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
root_path = os.environ.get("COCO_PATH")
if not root_path:
root_path = BASE_DIR
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
self.defaults = defaults or {}
self.root_path = root_path
super(Config, self).__init__({})
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config')
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
"""Updates the values in the config from a JSON file. This function
behaves as if the JSON object was a dictionary and passed to the
:meth:`from_mapping` function.
:param filename: the filename of the JSON file. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.11
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self.from_mapping(obj)
def from_yaml(self, filename, silent=False):
if self.root_path:
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as f:
obj = yaml.safe_load(f)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
if obj:
return self.from_mapping(obj)
return True
def from_mapping(self, *mapping, **kwargs):
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'types': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def convert_type(self, k, v):
default_value = self.defaults.get(k)
if default_value is None:
return v
tp = type(default_value)
# 对bool特殊处理
if tp is bool and isinstance(v, str):
if v in ("true", "True", "1"):
return True
else:
return False
if tp in [list, dict] and isinstance(v, str):
try:
v = json.loads(v)
return v
except json.JSONDecodeError:
return v
try:
v = tp(v)
except Exception:
pass
return v
def __getitem__(self, item):
# 先从设置的来
try:
value = super().__getitem__(item)
except KeyError:
value = None
if value is not None:
return value
# 其次从环境变量来
value = os.environ.get(item, None)
if value is not None:
return self.convert_type(item, value)
return self.defaults.get(item)
def __getattr__(self, item):
return self.__getitem__(item)
def __setattr__(self, key, value):
return self.__setitem__(key, value)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
access_key_path = os.path.abspath(
os.path.join(root_path, 'data', 'keys', '.access_key')
)
host_key_path = os.path.abspath(
os.path.join(root_path, 'data', 'keys', 'host_rsa_key')
)
defaults = {
'NAME': socket.gethostname(),
'CORE_HOST': 'http://127.0.0.1:8080',
'BOOTSTRAP_TOKEN': '',
'ROOT_PATH': root_path,
'DEBUG': True,
'BIND_HOST': '0.0.0.0',
'SSHD_PORT': 2222,
'HTTPD_PORT': 5000,
'COCO_ACCESS_KEY': '',
'ACCESS_KEY_FILE': access_key_path,
'HOST_KEY_FILE': host_key_path,
'SECRET_KEY': 'SDK29K03%MM0ksf',
'LOG_LEVEL': 'INFO',
'LOG_DIR': os.path.join(root_path, 'data', 'logs'),
'REPLAY_DIR': os.path.join(root_path, 'data', 'replays'),
'ASSET_LIST_SORT_BY': 'hostname', # hostname, ip
'TELNET_REGEX': '',
'PASSWORD_AUTH': True,
'PUBLIC_KEY_AUTH': True,
'SSH_TIMEOUT': 15,
'ALLOW_SSH_USER': [],
'BLOCK_SSH_USER': [],
'HEARTBEAT_INTERVAL': 20,
'MAX_CONNECTIONS': 500, # Not use now
'ADMINS': '',
'COMMAND_STORAGE': {'TYPE': 'server'}, # server
'REPLAY_STORAGE': {'TYPE': 'server'},
'LANGUAGE_CODE': 'zh',
'SECURITY_MAX_IDLE_TIME': 60,
'ASSET_LIST_PAGE_SIZE': 'auto',
'SFTP_ROOT': '/tmp',
'SFTP_SHOW_HIDDEN_FILE': False,
'UPLOAD_FAILED_REPLAY_ON_START': True,
'REUSE_CONNECTION': True,
'FORCE_REMOVE_FOLDER': False,
'TELNET_TTYPE': 'XTERM-256COLOR',
'ENABLE_PROXY_PROTOCOL': False
}
def load_from_object(config):
try:
from conf import config as c
config.from_object(c)
return True
except ImportError:
pass
return False
def load_from_yml(config):
for i in ['config.yml', 'config.yaml']:
if not os.path.isfile(os.path.join(config.root_path, i)):
continue
loaded = config.from_yaml(i)
if loaded:
return True
return False
def load_user_config():
sys.path.insert(0, root_path)
config = Config(root_path, defaults)
loaded = load_from_object(config)
if not loaded:
loaded = load_from_yml(config)
if not loaded:
msg = """
Error: No config file found.
You can run `cp config_example.yml config.yml`, and edit it.
"""
raise ImportError(msg)
return config
config = load_user_config()
old_host_key_path = os.path.join(root_path, 'keys', 'host_rsa_key')
old_access_key_path = os.path.join(root_path, 'keys', '.access_key')
if os.path.isfile(old_host_key_path) and not os.path.isfile(config.HOST_KEY_FILE):
config.HOST_KEY_FILE = old_host_key_path
if os.path.isfile(old_access_key_path) and not os.path.isfile(config.ACCESS_KEY_FILE):
config.ACCESS_KEY_FILE = old_access_key_path
|
jumpserver/coco
|
coco/conf.py
|
Python
|
gpl-3.0
| 14,583
|
# -*- coding: utf-8 -*-
"""
HipparchiaServer: an interface to a database of Greek and Latin texts
Copyright: E Gunderson 2016-21
License: GNU GENERAL PUBLIC LICENSE 3
(see LICENSE in the top level directory of the distribution)
"""
import re
from flask import session
from server.hipparchiaobjects.searchobjects import SearchObject
from server.listsandsession.genericlistfunctions import tidyuplist, foundindict
from server.listsandsession.sessionfunctions import reducetosessionselections
from server.listsandsession.checksession import justlatin
from server.startup import allincerta, allvaria
def compilesearchlist(listmapper: dict, s: dict) -> list:
"""
master author dict + session selctions into a list of dbs to search
s = session, but feel free to send frozensession
getsearchlistcontents wants just session
executesearch might as well use frozensession
:param listmapper:
:param s:
:return:
"""
searching = s['auselections'] + s['agnselections'] + s['wkgnselections'] + s['psgselections'] + s['wkselections'] \
+ s['alocselections'] + s['wlocselections']
excluding = s['auexclusions'] + s['wkexclusions'] + s['agnexclusions'] + s['wkgnexclusions'] + s['psgexclusions'] \
+ s['alocexclusions'] + s['wlocexclusions']
# trim by active corpora
ad = reducetosessionselections(listmapper, 'a')
wd = reducetosessionselections(listmapper, 'w')
searchlist = list()
# [A] build the inclusion list
if len(searching) > 0:
# build lists up from specific items (passages) to more general classes (works, then authors)
for g in s['wkgnselections']:
searchlist += foundindict(wd, 'workgenre', g)
authorlist = list()
for g in s['agnselections']:
authorlist = foundindict(ad, 'genres', g)
for a in authorlist:
for w in ad[a].listofworks:
searchlist.append(w.universalid)
del authorlist
for l in s['wlocselections']:
searchlist += foundindict(wd, 'provenance', l)
authorlist = list()
for l in s['alocselections']:
# 'Italy, Africa and the West', but you asked for 'Italy'
exactmatch = False
authorlist = foundindict(ad, 'location', l, exactmatch)
for a in authorlist:
for w in ad[a].listofworks:
searchlist.append(w.universalid)
del authorlist
# a tricky spot: when/how to apply prunebydate()
# if you want to be able to seek 5th BCE oratory and Plutarch, then you need to let auselections take precedence
# accordingly we will do classes and genres first, then trim by date, then add in individual choices
searchlist = prunebydate(searchlist, ad, wd)
# now we look at things explicitly chosen:
authors = [a for a in s['auselections']]
try:
worksof = [w.universalid for a in authors for w in ad[a].listofworks]
except KeyError:
# e.g., you had a LAT list with Cicero and then deactivated that set of authors and works
worksof = list()
works = s['wkselections']
passages = s['psgselections']
searchlist += [w for w in works] + worksof + passages
searchlist = [aw for aw in searchlist if aw]
searchlist = list(set(searchlist))
else:
# you picked nothing and want everything. well, maybe everything...
# trim by active corpora
wd = reducetosessionselections(listmapper, 'w')
searchlist = wd.keys()
if s['latestdate'] != '1500' or s['earliestdate'] != '-850':
searchlist = prunebydate(searchlist, ad, wd, s)
# [B] now start subtracting from the list of inclusions
if not s['spuria']:
searchlist = removespuria(searchlist, wd)
if not s['incerta']:
searchlist = list(set(searchlist) - set(allincerta))
if not s['varia']:
searchlist = list(set(searchlist) - set(allvaria))
# build the exclusion list
# note that we are not handling excluded individual passages yet
excludedworks = list()
if len(excluding) > 0:
excludedauthors = [a for a in s['auexclusions']]
for g in s['agnexclusions']:
excludedauthors += foundindict(ad, 'genres', g)
for l in s['alocexclusions']:
excludedauthors += foundindict(ad, 'location', l)
excludedauthors = set(excludedauthors)
# all works of all excluded authors are themselves excluded
excludedworks = [w.universalid for a in excludedauthors for w in ad[a].listofworks]
excludedworks += s['wkexclusions']
for g in s['wkgnexclusions']:
excludedworks += foundindict(wd, 'workgenre', g)
for l in s['wlocexclusions']:
excludedworks += foundindict(wd, 'provenance', l)
searchlist = list(set(searchlist) - set(excludedworks))
# print('searchlist', searchlist)
return searchlist
def sortsearchlist(searchlist: list, authorsdict: dict) -> list:
"""
send me a list of workuniversalids and i will resort it via the session sortorder
:param searchlist:
:param authorsdict:
:return:
"""
sortby = session['sortorder']
templist = list()
newlist = list()
if sortby != 'universalid':
for a in searchlist:
auid = a[0:6]
crit = getattr(authorsdict[auid], sortby)
name = authorsdict[auid].shortname
if sortby == 'converted_date':
try:
crit = float(crit)
except TypeError:
crit = 9999
templist.append([crit, a, name])
# http://stackoverflow.com/questions/5212870/sorting-a-python-list-by-two-criteria#17109098
# sorted(list, key=lambda x: (x[0], -x[1]))
templist = sorted(templist, key=lambda x: (x[0], x[2], x[1]))
for t in templist:
newlist.append(t[1])
else:
newlist = searchlist
return newlist
def sortresultslist(hits: list, searchobject: SearchObject, authorsdict: dict, worksdict: dict) -> dict:
"""
take a list of hits (which is a list of line objects)
sort it by the session sort criterion
mark the list with index numbers (because an mp function will grab this next)
in:
[<server.hipparchiaclasses.dbWorkLine object at 0x10d6625f8>, <server.hipparchiaclasses.dbWorkLine object at 0x10d662470>,...]
out:
{0: <server.hipparchiaclasses.dbWorkLine object at 0x108981780>, 1: <server.hipparchiaclasses.dbWorkLine object at 0x108981a20>, 2: <server.hipparchiaclasses.dbWorkLine object at 0x108981b70>, ...}
:param hits:
:param searchobject:
:param authorsdict:
:param worksdict:
:return:
"""
sortby = searchobject.session['sortorder']
templist = list()
for hit in hits:
auid = hit.wkuinversalid[0:6]
wkid = hit.wkuinversalid
sortablestring = authorsdict[auid].shortname + worksdict[wkid].title
if sortby == 'converted_date':
try:
crit = int(worksdict[wkid].converted_date)
if crit > 2000:
try:
crit = int(authorsdict[auid].converted_date)
except TypeError:
crit = 9999
except:
try:
crit = int(authorsdict[auid].converted_date)
except TypeError:
crit = 9999
elif sortby == 'provenance':
crit = getattr(worksdict[wkid], sortby)
elif sortby == 'location' or sortby == 'shortname' or sortby == 'authgenre':
crit = getattr(authorsdict[auid], sortby)
else:
crit = hit.wkuinversalid+str(hit.index)
templist.append([crit, sortablestring, hit.index, hit])
# http://stackoverflow.com/questions/5212870/sorting-a-python-list-by-two-criteria#17109098
templist = sorted(templist, key=lambda x: (x[0], x[1], x[2]))
hitsdict = {idx: temp[3] for idx, temp in enumerate(templist)}
return hitsdict
def calculatewholeauthorsearches(searchlist: list, authordict: dict) -> list:
"""
we have applied all of our inclusions and exclusions by this point and we might well be sitting on a pile of authorsandworks
that is really a pile of full author dbs. for example, imagine we have not excluded anything from 'Cicero'
there is no reason to search that DB work by work since that just means doing a series of "WHERE" searches
instead of a single, faster search of the whole thing: hits are turned into full citations via the info contained in the
hit itself and there is no need to derive the work from the item name sent to the dispatcher
this function will figure out if the list of work uids contains all of the works for an author and can accordingly be collapsed
this function is *much* faster (50x?) than searching via 196K WHERE clauses
timing sample shows that E is the bit you need to get right: [all gk, in, dp from -850 to 200 (97836 works)]
compiletimeA = 0.02765798568725586
compiletimeB = 0.02765798568725586
compiletimeC = 0.021197080612182617
compiletimeD = 0.00425410270690918
compiletimeE = 3.2394540309906006
[all gk, in, dp from -850 to 1500 (195825 works)]
compiletimeE = 6.753650903701782
50x faster if you make sure that complete is a set and not a list when you hand it to part E
compiletimeE = 0.1252439022064209
:param searchlist:
:param authordict:
:return:
"""
# exclusionfinder = re.compile(r'......x')
# hasexclusion = [x[0:9] for x in searchlist if re.search(exclusionfinder,x)]
# A
authorspresent = [x[0:6] for x in searchlist]
authorspresent = set(authorspresent)
# B
theoreticalpoolofworks = {w.universalid: a for a in authorspresent for w in authordict[a].listofworks }
# for a in authorspresent:
# for w in authordict[a].listofworks:
# theoreticalpoolofworks[w.universalid] = a
# C
for a in searchlist:
if a in theoreticalpoolofworks:
del theoreticalpoolofworks[a]
# D
# any remaining works in this dict correspond to authors that we are not searching completely
incomplete = [x for x in theoreticalpoolofworks.values()]
incomplete = set(incomplete)
complete = authorspresent - incomplete
# E
wholes = [x[0:6] for x in searchlist if x[0:6] in complete]
parts = [x for x in searchlist if x[0:6] not in complete]
prunedlist = list(set(wholes)) + list(set(parts))
return prunedlist
def flagexclusions(searchlist: list, s=session) -> list:
"""
some works should only be searched partially
this flags those items on the searchlist by changing their workname format
gr0001w001 becomes gr0001x001 if session['wkexclusions'] mentions gr0001w001
this function profiles as relatively slow: likely a faster way to run the loops
:param searchlist:
:param s:
:return:
"""
if len(s['psgexclusions']) == 0:
return searchlist
else:
modifiedsearchlist = list()
for w in searchlist:
for x in s['psgexclusions']:
if '_AT_' not in w and w in x:
w = re.sub('(....)w(...)', r'\1x\2', w)
modifiedsearchlist.append(w)
else:
modifiedsearchlist.append(w)
# if you apply 3 restrictions you will now have 3 copies of gr0001x001
modifiedsearchlist = tidyuplist(modifiedsearchlist)
return modifiedsearchlist
def prunebydate(searchlist: list, authorobjectdict: dict, workobjectdict: dict, s=session) -> list:
"""
send me a list of authorsandworks and i will trim it via the session date limit variables
note that 'varia' and 'incerta' need to be handled here since they have special dates:
incerta = 2500
varia = 2000
[failedtoparse = 9999]
:param searchlist:
:param authorobjectdict:
:param workobjectdict:
:param s:
:return:
"""
trimmedlist = list()
if not justlatin() and (s['earliestdate'] != '-850' or s['latestdate'] != '1500'):
# [a] first prune the bad dates
minimum = int(s['earliestdate'])
maximum = int(s['latestdate'])
if minimum > maximum:
minimum = maximum
s['earliestdate'] = s['latestdate']
for universalid in searchlist:
w = workobjectdict[universalid]
try:
# does the work have a date? if not, we will throw an exception
if w.datefallsbetween(minimum, maximum):
trimmedlist.append(universalid)
except TypeError:
# no work date? then we will look inside the author for the date
authorid = universalid[0:6]
try:
if authorobjectdict[authorid].datefallsbetween(minimum, maximum):
trimmedlist.append(universalid)
except TypeError:
# the author can't tell you his date; you must be building a list with both latin authors and something else
trimmedlist.append(universalid)
# [b] then add back in any varia and/or incerta as needed
if s['varia']:
varia = list(allvaria.intersection(searchlist))
trimmedlist += varia
if s['incerta']:
incerta = list(allincerta.intersection(searchlist))
trimmedlist += incerta
else:
trimmedlist = searchlist
return trimmedlist
def removespuria(searchlist: list, worksdict: dict) -> list:
"""
at the moment pretty crude: just look for [Sp.] or [sp.] at the end of a title
toss it from the list if you find it
:param searchlist:
:param worksdict:
:return:
"""
trimmedlist = list()
sp = re.compile(r'\[[Ss]p\.\]')
for aw in searchlist:
wk = re.sub(r'(......)x(...)', '\1w\2', aw[0:10])
title = worksdict[wk].title
try:
if re.search(sp, title):
for w in session['wkselections']:
if w in aw:
trimmedlist.append(aw)
for w in session['psgselections']:
if w in aw:
trimmedlist.append(aw)
else:
trimmedlist.append(aw)
except:
trimmedlist.append(aw)
return trimmedlist
|
e-gun/HipparchiaServer
|
server/listsandsession/searchlistmanagement.py
|
Python
|
gpl-3.0
| 12,836
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui import Object
class ASound(Object):
def __init__(self, filename, times_to_play = 1):
self.filename = filename
self.times_to_play = times_to_play
def play(self, length = 0):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
|
bhdouglass/agui
|
agui/aextras/sound.py
|
Python
|
gpl-3.0
| 1,065
|
# -*- coding: utf-8 -*-
value = input().split(" ")
cod, qtd = value
cod = int(cod)
qtd = int(qtd)
if cod == 1:
print('Total: R$ %.2f' %(qtd*4.00))
elif cod == 2:
print('Total: R$ %.2f' %(qtd*4.50))
elif cod == 3:
print('Total: R$ %.2f' %(qtd*5.00))
elif cod == 4:
print('Total: R$ %.2f' %(qtd*2.00))
elif cod == 5:
print('Total: R$ %.2f' %(qtd*1.50))
|
lidymonteirowm/URI
|
Python3/URI1038.py
|
Python
|
gpl-3.0
| 371
|
#!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcraft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
_handle_validation_error(e)
def _handle_validation_error(error):
"""Take a jsonschema.ValidationError and raise a SnapcraftSchemaError.
The validation errors coming from jsonschema are a nightmare. This function
tries to make them a bit more understandable.
"""
messages = [error.message]
# error.validator_value may contain a custom validation error message. If
# so, use it instead of the garbage message jsonschema gives us.
with contextlib.suppress(TypeError, KeyError):
messages = [error.validator_value['validation-failure'].format(error)]
path = []
while error.absolute_path:
element = error.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
path.append(str(element))
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if error.cause:
messages.append('({})'.format(error.cause))
raise SnapcraftSchemaError(' '.join(messages))
|
squidsoup/snapcraft
|
snapcraft/_schema.py
|
Python
|
gpl-3.0
| 3,387
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import types and tell flake8 to ignore the "unused" List.
from typing import Any, Dict, Optional, Tuple
from typing_extensions import Final
from ._extension import Extension
_PLATFORM_SNAP = dict(core18="gnome-3-34-1804")
class ExtensionImpl(Extension):
"""Drives ROS 2 build and runtime environment for snap."""
ROS_VERSION: Final[str] = "2"
ROS_DISTRO: Final[str] = "foxy"
@staticmethod
def get_supported_bases() -> Tuple[str, ...]:
return ("core20",)
@staticmethod
def get_supported_confinement() -> Tuple[str, ...]:
return ("strict", "devmode")
@staticmethod
def is_experimental(base: Optional[str]) -> bool:
return True
def __init__(self, *, extension_name: str, yaml_data: Dict[str, Any]) -> None:
super().__init__(extension_name=extension_name, yaml_data=yaml_data)
python_paths = [
f"$SNAP/opt/ros/{self.ROS_DISTRO}/lib/python3.8/site-packages",
"$SNAP/usr/lib/python3/dist-packages",
"${PYTHONPATH}",
]
self.root_snippet = {
"package-repositories": [
{
"type": "apt",
"url": "http://repo.ros2.org/ubuntu/main",
"components": ["main"],
"formats": ["deb"],
"key-id": "C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654",
"key-server": "keyserver.ubuntu.com",
"suites": ["focal"],
}
]
}
self.app_snippet = {
"command-chain": ["snap/command-chain/ros2-launch"],
"environment": {
"ROS_VERSION": self.ROS_VERSION,
"ROS_DISTRO": self.ROS_DISTRO,
"PYTHONPATH": ":".join(python_paths),
},
}
self.part_snippet = {
"build-environment": [
{"ROS_VERSION": self.ROS_VERSION},
{"ROS_DISTRO": self.ROS_DISTRO},
],
}
self.parts = {
f"ros2-{self.ROS_DISTRO}-extension": {
"source": "$SNAPCRAFT_EXTENSIONS_DIR/ros2",
"plugin": "nil",
"override-build": "install -D -m 0755 launch ${SNAPCRAFT_PART_INSTALL}/snap/command-chain/ros2-launch",
"build-packages": [f"ros-{self.ROS_DISTRO}-ros-core"],
}
}
|
snapcore/snapcraft
|
snapcraft/internal/project_loader/_extensions/ros2_foxy.py
|
Python
|
gpl-3.0
| 3,074
|
import os
import sys
import math
import random
import pygame
from pygame.locals import *
SCREEN_SIZE = [720, 480]
FPS = 60
#print(FPS)
# Initialize screen
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Juego en PyGame to guapo")
clock = pygame.time.Clock()
class Background(pygame.sprite.Sprite):
"""The randomly created background
Returns: background object
"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# Create image and rect variables, needed for the group
self.image = pygame.Surface(SCREEN_SIZE)
self.rect = self.image.get_rect()
# Draw the stars into the background
self.star_rate = 0.0025
self.star_points = int((SCREEN_SIZE[0]*SCREEN_SIZE[1]) * self.star_rate)
for i in range(self.star_points):
pos = [random.randrange(SCREEN_SIZE[0]),
random.randrange(SCREEN_SIZE[1])]
pygame.draw.line(self.image, (255, 255, 255), pos, pos)
background = Background()
class Ship(pygame.sprite.Sprite):
def __init__(self):
# Initialize the sprite and create general variables
pygame.sprite.Sprite.__init__(self)
self.cs = 10 # (Cube Size) this is used for changing the size of the ship
self.size = (self.cs*3*2, self.cs*3*2)
# Create image and rect variables, needed for the group
self.image = pygame.Surface(self.size, flags=SRCALPHA)
self.rect = self.image.get_rect()
self.rect.center = (SCREEN_SIZE[0]/2, SCREEN_SIZE[1]/2)
# Variables with different kinds of models of the ship
self.model = ((self.cs, 0), (self.cs*2, self.cs*3), (self.cs, self.cs*2), (0, self.cs*3)) # Model of the ship in a tuple (only for reference)
self.points = [] # Model of the ship in a list (this is the one that changes)
self.centered_model = [] # Model of the default ship located at (0, 0)
for i in self.model:
self.points.append(list(i))
self.centered_model.append([0, 0])
for i in range(len(self.points)):
self.centered_model[i][0] = self.model[i][0] - self.model[2][0]
self.centered_model[i][1] = self.model[i][1] - self.model[2][1]
# Movement and rotation
#self.move_velocity = 500 / FPS
self.move_velocity = 100 / FPS
self.change_xy = 0
self.change_x_p = 0 # Positive x
self.change_x_n = 0 # Negative x
self.change_y_p = 0 # Positive y
self.change_y_n = 0 # Negative y
self.angle = 0 # TODO: It would be nice to make this radians right away, instead of degrees. We'll keep it like this for now.
self.rotation_velocity = 400 / FPS
self.change_angle_p = 0
self.change_angle_n = 0
def update(self):
# Increment/decrease angle
self.angle += self.change_angle_p - self.change_angle_n
# Rotate the ship
radians = self.angle * (math.pi/180)
for i in range(len(self.points)):
self.points[i][0] = self.centered_model[i][0] * math.cos(radians) - self.centered_model[i][1] * math.sin(radians)
self.points[i][1] = self.centered_model[i][1] * math.cos(radians) + self.centered_model[i][0] * math.sin(radians)
self.points[i][0] = self.points[i][0] + self.size[0]/2
self.points[i][1] = self.points[i][1] + self.size[1]/2
# Move the ship. TODO: move it with angle
# self.rect.centerx += self.change_x_p - self.change_x_n
# self.rect.centery += self.change_y_p - self.change_y_n
self.rect.centerx += math.cos(radians-(math.pi/2)) * self.change_xy
self.rect.centery += math.sin(radians-(math.pi/2)) * self.change_xy
# Draw the ship with variables changed
self.image = pygame.Surface(self.size, flags=SRCALPHA)
pygame.draw.polygon(self.image, (255, 255, 255), self.points)
ship = Ship()
# Group that contains the sprites needed for the game.
game = pygame.sprite.OrderedUpdates()
game.add(background)
game.add(ship)
# Group that contains the sprites needed for the main meny.
menu = pygame.sprite.Group()
def main():
while True:
for event in pygame.event.get():
if event.type == KEYDOWN:
# Movement
if event.key == K_w:
#ship.change_y_n = ship.move_velocity
ship.change_xy += ship.move_velocity
elif event.key == K_s:
#ship.change_y_p = ship.move_velocity
ship.change_xy -= ship.move_velocity
# Rotation
elif event.key == K_a:
ship.change_angle_n = ship.rotation_velocity
elif event.key == K_d:
ship.change_angle_p = ship.rotation_velocity
# Exiting
elif event.key == K_ESCAPE:
return 0
if event.type == KEYUP:
# Movement
# if event.key == K_w:
# ship.change_y_n = 0
# elif event.key == K_s:
# ship.change_y_p = 0
# Rotation
if event.key == K_a:
ship.change_angle_n = 0
elif event.key == K_d:
ship.change_angle_p = 0
elif event.type == QUIT:
return 0
game.update()
game.draw(screen)
pygame.display.flip()
clock.tick(FPS)
if __name__ == '__main__':
exit = main()
sys.exit(exit)
|
ElGoreLoco/Pygame-Testing
|
ship_asteroids.py
|
Python
|
gpl-3.0
| 5,612
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter
class DataOperationPlotter(BaseReporter):
_typeName = "DataOperation"
_typeKeyFields = [dF[0] for dF in DataOperation().definitionKeyFields]
def _translateGrouping(self, grouping):
if grouping == "Channel":
return ("%s, %s", ['Source', 'Destination'], "CONCAT( %s, ' -> ', %s )")
else:
return ("%s", [grouping])
_reportSuceededTransfersName = "Successful transfers"
def _reportSuceededTransfers(self, reportRequest):
return self.__reportTransfers(reportRequest, 'Succeeded', ('Failed', 0))
_reportFailedTransfersName = "Failed transfers"
def _reportFailedTransfers(self, reportRequest):
return self.__reportTransfers(reportRequest, 'Failed', ('Succeeded', 1))
def __reportTransfers(self, reportRequest, titleType, togetherFieldsToPlot):
selectFields = (
self._getSelectStringForGrouping(
reportRequest['groupingFields']) + ", %s, %s, SUM(%s), SUM(%s)-SUM(%s)",
reportRequest['groupingFields'][1] + [
'startTime',
'bucketLength',
'TransferOK',
'TransferTotal',
'TransferOK',
])
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
strippedData = self.stripDataField(dataDict, togetherFieldsToPlot[1])
if strippedData:
dataDict[togetherFieldsToPlot[0]] = strippedData[0]
dataDict, maxValue = self._divideByFactor(dataDict, granularity)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
dataDict, self._getAccumulationMaxValue(dataDict), "files")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotSuceededTransfers(self, reportRequest, plotInfo, filename):
return self.__plotTransfers(reportRequest, plotInfo, filename, 'Succeeded', ('Failed', 0))
def _plotFailedTransfers(self, reportRequest, plotInfo, filename):
return self.__plotTransfers(reportRequest, plotInfo, filename, 'Failed', ('Succeeded', 1))
def __plotTransfers(self, reportRequest, plotInfo, filename, titleType, togetherFieldsToPlot):
metadata = {'title': '%s Transfers by %s' % (titleType, reportRequest['grouping']),
'ylabel': plotInfo['unit'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateTimedStackedBarPlot(filename, plotInfo['graphDataDict'], metadata)
_reportQualityName = "Efficiency by protocol"
def _reportQuality(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s), SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferOK', 'TransferTotal'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{'checkNone': True,
'convertToGranularity': 'sum',
'calculateProportionalGauges': False,
'consolidationFunction': self._efficiencyConsolidation})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
if len(dataDict) > 1:
# Get the total for the plot
selectFields = ("'Total', %s, %s, SUM(%s),SUM(%s)",
['startTime', 'bucketLength',
'TransferOK', 'TransferTotal'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{'checkNone': True,
'convertToGranularity': 'sum',
'calculateProportionalGauges': False,
'consolidationFunction': self._efficiencyConsolidation})
if not retVal['OK']:
return retVal
totalDict = retVal['Value'][0]
self.stripDataField(totalDict, 0)
for key in totalDict:
dataDict[key] = totalDict[key]
return S_OK({'data': dataDict, 'granularity': granularity})
def _plotQuality(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Transfer quality by %s' % reportRequest['grouping'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateQualityPlot(filename, plotInfo['data'], metadata)
_reportTransferedDataName = "Cumulative transferred data"
def _reportTransferedData(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferSize'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
dataDict = self._accumulate(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit(dataDict,
self._getAccumulationMaxValue(dataDict),
"bytes")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotTransferedData(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Transfered data by %s' % reportRequest['grouping'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity'],
'ylabel': plotInfo['unit'],
'sort_labels': 'last_value'}
return self._generateCumulativePlot(filename, plotInfo['graphDataDict'], metadata)
def _reportThroughput(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", %s, %s, SUM(%s)",
reportRequest['groupingFields'][1] + ['startTime', 'bucketLength',
'TransferSize'
]
)
retVal = self._getTimedData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict, granularity = retVal['Value']
self.stripDataField(dataDict, 0)
dataDict, maxValue = self._divideByFactor(dataDict, granularity)
dataDict = self._fillWithZero(granularity, reportRequest['startTime'], reportRequest['endTime'], dataDict)
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit(
dataDict, self._getAccumulationMaxValue(dataDict), "bytes")
return S_OK({'data': baseDataDict, 'graphDataDict': graphDataDict,
'granularity': granularity, 'unit': unitName})
def _plotThroughput(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Throughput by %s' % reportRequest['grouping'],
'ylabel': plotInfo['unit'],
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime'],
'span': plotInfo['granularity']}
return self._generateTimedStackedBarPlot(filename, plotInfo['graphDataDict'], metadata)
_reportDataTransferedName = "Pie chart of transferred data"
def _reportDataTransfered(self, reportRequest):
selectFields = (self._getSelectStringForGrouping(reportRequest['groupingFields']) + ", SUM(%s)",
reportRequest['groupingFields'][1] + ['TransferSize'
]
)
retVal = self._getSummaryData(reportRequest['startTime'],
reportRequest['endTime'],
selectFields,
reportRequest['condDict'],
reportRequest['groupingFields'],
{})
if not retVal['OK']:
return retVal
dataDict = retVal['Value']
for key in dataDict:
dataDict[key] = int(dataDict[key])
return S_OK({'data': dataDict})
def _plotDataTransfered(self, reportRequest, plotInfo, filename):
metadata = {'title': 'Total data transfered by %s' % reportRequest['grouping'],
'ylabel': 'bytes',
'starttime': reportRequest['startTime'],
'endtime': reportRequest['endTime']
}
return self._generatePiePlot(filename, plotInfo['data'], metadata)
|
yujikato/DIRAC
|
src/DIRAC/AccountingSystem/private/Plotters/DataOperationPlotter.py
|
Python
|
gpl-3.0
| 10,965
|
# coding:utf-8
'''
Author : qbeenslee
Created : 2015/1/20
'''
import os
import re
import time
import datetime
from config.configuration import THUMB_SIZE_NORMAL, THUMB_SIZE_ORIGIN, THUMB_SIZE_LARGE
from config.setting import UPLOAD_PATH
from data.db import Share, Comment, Favorite, User
RE_MATCH_DEPWD = re.compile(r'^md5\$\d\d\$[a-zA-Z0-9_-]{20}\$[0-9a-f]{32}$')
RE_MATCH_EMAIL = re.compile(
r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+(?:[A-Z]{2}|asia|com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel)"
) # RFC 2822 (specific TLDs)
def clean_data(*args, **kwargs):
'''
清理数据
:param args:
:param kwargs:
:return:dictionary
'''
imei = kwargs.get('imei', None)
pass
def check_string_format(format_string, content):
'''
正则表达式匹配字符串
:param email:
:return:boolean
'''
if type(content) == str or type(content) == unicode:
success = format_string.findall(content)
if success is not None and success != []:
return True
return False
# 最好是在client端实现,sever没有必要进行细分
def friendly_time():
'''
友好的时间表达(以之前时间相对与现在时刻的表述,如果可以添加相对于春季时令和夏季时令的区别)
:param:time or string/unicode
:return:unicode
:do:
时间以小时/天/星期/月/年,进行分割表达
1.一小时内:x分钟之前
2.3小时内:x小时前
3.在同一日期之内(非24小时之内):根据所在的时段进行表述,0<=x<5(夜里x点,x为汉字);5<=x<7(清晨x点);7<=x<12(早上);12<=x<13.5(正午)
13.5<=x<17(下午);17<=x<18(傍晚);18<=x<21(晚上)21<=x<0(深夜)
4.在3天内,使用昨天,前天
5.大于3天时间的,使用具体的天数
'''
now = time.time()
now_time = datetime.datetime.fromtimestamp(now)
print now_time.hour
def check_pwd_format(pwd):
'''
检测密码格式
:param pwd:string and its length must be 60(never changed)
:return:boolean
'''
if len(pwd) == 60 and check_string_format(RE_MATCH_DEPWD, pwd):
return True
else:
return False
def get_image_file_name(time_stamp, thumb_size, postfix):
'''
获取图片文件名
default postfix:jpg
:return:" 时间-宽x高-缩略尺寸.后缀"
'''
if time_stamp is None:
time_stamp = get_time_stamp()
if type(postfix) is not str or type(postfix) is not unicode:
postfix = 'jpg'
if type(thumb_size) is not dict or thumb_size == THUMB_SIZE_LARGE:
thumb_size = THUMB_SIZE_ORIGIN
h = thumb_size.get('h', 100)
w = thumb_size.get('w', 100)
thumb = thumb_size.get('thumb', 'thumb')
return r'%s$%dx%d$%s.%s' % (time_stamp, w, h, thumb, postfix)
def get_image_save_path(time_stamp, thumb_size, postfix):
'''
后去图片文件保存路径
:param upload_path:上传文件保存路径
:param file_name:文件名
:return:
'''
file_name = get_image_file_name(time_stamp, thumb_size, postfix)
return os.path.join(UPLOAD_PATH, file_name)
def get_time_stamp():
'''
获取时间标签
:return:
'''
return str(time.time()).replace('.', '')
def get_data_for_photowall(resultdata):
resultdatas = []
for item_result in resultdata:
if len(item_result) > 3:
share_item = {}
if isinstance(item_result[0], Share):
share_item = item_result[0].__json__()
share_item['avatar'] = item_result[1].avatar
share_item['nickname'] = item_result[1].nickname
elif isinstance(item_result[1], Share):
share_item = item_result[1]
share_item['avatar'] = item_result[0].avatar
share_item['nickname'] = item_result[0].nickname
if item_result[2] is not None:
share_item['collected'] = True
else:
share_item['collected'] = False
if item_result[3] is not None:
share_item['praised'] = True
else:
share_item['praised'] = False
resultdatas.append(share_item)
return resultdatas
def get_data_for_comment(resultdata):
resultdatas = []
for item_result in resultdata:
if len(item_result) == 2:
share_item = {}
if isinstance(item_result[0], Comment):
share_item = item_result[0].__json__()
share_item['avatar'] = item_result[1].avatar
share_item['nickname'] = item_result[1].nickname
elif isinstance(item_result[1], Share):
share_item = item_result[1].__json__()
share_item['avatar'] = item_result[0].avatar
share_item['nickname'] = item_result[0].nickname
resultdatas.append(share_item)
return resultdatas
def get_data_for_collect(resultdata):
resultdatas = []
for item_result in resultdata:
if len(item_result) >= 2:
share_item = {}
if isinstance(item_result[0], Share):
share_item = item_result[0].__json__()
share_item['avatar'] = item_result[1].avatar
share_item['nickname'] = item_result[1].nickname
elif isinstance(item_result[1], Share):
share_item = item_result[1].__json__()
share_item['avatar'] = item_result[0].avatar
share_item['nickname'] = item_result[0].nickname
if item_result[2] is not None:
share_item['praised'] = True
else:
share_item['praised'] = False
resultdatas.append(share_item)
return resultdatas
|
qbeenslee/Nepenthes-Server
|
utils/stringutil.py
|
Python
|
gpl-3.0
| 6,041
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import tempfile
from datetime import datetime
import time
from PIL import Image as ImagePIL
from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, get_object_or_404, render
from zds.gallery.forms import ArchiveImageForm, ImageForm, UpdateImageForm, \
GalleryForm, UpdateGalleryForm, UserGalleryForm, ImageAsAvatarForm
from zds.gallery.models import UserGallery, Image, Gallery
from zds.member.decorator import can_write_and_read_now
from zds.utils import slugify
from zds.utils.paginator import ZdSPagingListView
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
import zipfile
import shutil
import os
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView, FormView
from django.utils.decorators import method_decorator
from zds.tutorialv2.models.models_database import PublishableContent
class ListGallery(ZdSPagingListView):
"""Display the gallery list with all their images"""
object = UserGallery
template_name = "gallery/gallery/list.html"
context_object_name = "user_galleries"
paginate_by = settings.ZDS_APP['gallery']['gallery_per_page']
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ListGallery, self).dispatch(*args, **kwargs)
def get_queryset(self):
return UserGallery.objects.filter(user=self.request.user).prefetch_related('gallery').all()
def get_context_data(self, **kwargs):
context = super(ListGallery, self).get_context_data(**kwargs)
# fetch content linked to galleries:
pk_list = [g.gallery.pk for g in context['user_galleries']]
contents_linked = {}
contents = PublishableContent.objects.prefetch_related('gallery').filter(gallery__pk__in=pk_list).all()
for content in contents:
contents_linked[content.gallery.pk] = content
# link galleries to contents
galleries = []
for g in context['user_galleries']:
content = None if g.gallery.pk not in contents_linked else contents_linked[g.gallery.pk]
galleries.append((g, g.gallery, content))
context['galleries'] = galleries
return context
class NewGallery(CreateView):
"""Create a new gallery"""
template_name = "gallery/gallery/new.html"
form_class = GalleryForm
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(NewGallery, self).dispatch(*args, **kwargs)
def form_valid(self, form):
gallery = Gallery()
gallery.title = form.cleaned_data['title']
gallery.subtitle = form.cleaned_data['subtitle']
gallery.slug = slugify(form.cleaned_data['title'])
gallery.pubdate = datetime.now()
gallery.save()
# Attach user :
userg = UserGallery()
userg.gallery = gallery
userg.mode = "W"
userg.user = self.request.user
userg.save()
return HttpResponseRedirect(gallery.get_absolute_url())
def ensure_user_access(gallery, user, can_write=False):
"""
:param gallery: the gallery
:param user: user who want to access the gallery
:param can_write: check if the user has the writing access to the gallery
:return: the gallery of the user
:rtype: UserGallery
:raise PermissionDenied: if the user has not access or no write permission (if applicable)
"""
try:
user_gallery = UserGallery.objects.get(gallery=gallery, user=user)
if user_gallery:
if can_write and not user_gallery.can_write():
raise PermissionDenied
else:
raise PermissionDenied
except ObjectDoesNotExist: # the user_gallery does not exists
raise PermissionDenied
return user_gallery
class GalleryDetails(DetailView):
"""Gallery details"""
model = Gallery
template_name = "gallery/gallery/details.html"
context_object_name = "gallery"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(GalleryDetails, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(GalleryDetails, self).get_context_data(**kwargs)
context['gallery_mode'] = ensure_user_access(self.object, self.request.user)
context['images'] = self.object.get_images()
context['form'] = UserGalleryForm
context['content_linked'] = PublishableContent.objects.filter(gallery__pk=self.object.pk).first()
return context
class EditGallery(UpdateView):
"""Update gallery information"""
model = Gallery
template_name = "gallery/gallery/edit.html"
form_class = UpdateGalleryForm
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(EditGallery, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
pkey = self.kwargs.pop('pk', None)
slug = self.kwargs.pop('slug', None)
gallery = get_object_or_404(Gallery, pk=pkey, slug=slug)
ensure_user_access(gallery, self.request.user, can_write=True)
return gallery
def form_valid(self, form):
self.object.slug = slugify(form.cleaned_data['title'])
return super(EditGallery, self).form_valid(form)
@can_write_and_read_now
@require_POST
@login_required
def modify_gallery(request):
"""Modify gallery instance: delete galleries or add user to them"""
# Global actions
if "delete_multi" in request.POST:
list_items = request.POST.getlist("items")
# Don't delete gallery when it's link to tutorial
free_galleries = []
for g_pk in list_items:
# check if the gallery is not linked to a content
v2_content = PublishableContent.objects.filter(gallery__pk=g_pk).first()
has_v2_content = v2_content is not None
if has_v2_content:
gallery = Gallery.objects.get(pk=g_pk)
_type = _(u'au tutoriel')
if v2_content.type == 'ARTICLE':
_type = _(u'à l\'article')
error_message = _(u"La galerie « {} » ne peut pas être supprimée car elle est liée {} « {} ».")\
.format(gallery.title, _type, v2_content.title)
messages.error(request, error_message)
else:
free_galleries.append(g_pk)
perms = UserGallery.objects.filter(gallery__pk__in=free_galleries, user=request.user, mode="W").count()
# Check that the user has the RW right on each gallery
if perms < len(free_galleries):
raise PermissionDenied
# Delete all the permissions on all the selected galleries
UserGallery.objects.filter(gallery__pk__in=free_galleries).delete()
# Delete all the images of the gallery (autodelete of file)
Image.objects.filter(gallery__pk__in=free_galleries).delete()
# Finally delete the selected galleries
Gallery.objects.filter(pk__in=free_galleries).delete()
return redirect(reverse("gallery-list"))
elif "adduser" in request.POST:
# Gallery-specific actions
try:
gal_pk = request.POST["gallery"]
except KeyError:
raise Http404
gallery = get_object_or_404(Gallery, pk=gal_pk)
# Disallow actions to read-only members
try:
gal_mode = UserGallery.objects.get(gallery=gallery,
user=request.user)
if gal_mode.mode != "W":
raise PermissionDenied
except:
raise PermissionDenied
form = UserGalleryForm(request.POST)
if form.is_valid():
user = get_object_or_404(User, username=request.POST["user"])
# If a user is already in a user gallery, we don't add him.
galleries = UserGallery.objects.filter(gallery=gallery,
user=user).all()
if galleries.count() > 0:
return redirect(gallery.get_absolute_url())
if user.profile.is_private():
return redirect(gallery.get_absolute_url())
user_gal = UserGallery()
user_gal.user = user
user_gal.gallery = gallery
user_gal.mode = request.POST["mode"]
user_gal.save()
else:
return render(request, "gallery/gallery/details.html", {
"gallery": gallery,
"gallery_mode": gal_mode,
"images": gallery.get_images(),
"form": form,
})
return redirect(gallery.get_absolute_url())
class GalleryMixin(object):
"""Mixin that ensure the access to the gallery and fill context data properly"""
can_write = False # if `True`, check for user write access
def get_context_data(self, **kwargs):
context = super(GalleryMixin, self).get_context_data(**kwargs)
pk_gallery = self.kwargs.pop('pk_gallery', None)
gallery = get_object_or_404(Gallery, pk=pk_gallery)
user_gallery = ensure_user_access(gallery, self.request.user, can_write=self.can_write)
context['gallery'] = gallery
context['gallery_mode'] = user_gallery
return context
class NewImage(GalleryMixin, CreateView):
"""Creates a new image."""
form_class = ImageForm
template_name = 'gallery/image/new.html'
can_write = True # only allowed users can insert images
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(NewImage, self).dispatch(*args, **kwargs)
def form_valid(self, form):
context = self.get_context_data(**self.kwargs)
img = Image()
img.gallery = context['gallery']
img.title = form.cleaned_data['title']
if form.cleaned_data['legend'] and form.cleaned_data['legend'] != '':
img.legend = form.cleaned_data['legend']
else:
img.legend = img.title
img.physical = self.request.FILES['physical']
img.pubdate = datetime.now()
img.save()
return redirect(reverse("gallery-image-edit", args=[img.gallery.pk, img.pk]))
class EditImage(GalleryMixin, UpdateView):
"""Edit or view an existing image."""
model = Image
form_class = UpdateImageForm
template_name = "gallery/image/edit.html"
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(EditImage, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
pkey = self.kwargs.pop('pk', None)
return get_object_or_404(Image, pk=pkey)
def get_context_data(self, **kwargs):
context = super(EditImage, self).get_context_data(**kwargs)
context['as_avatar_form'] = ImageAsAvatarForm()
return context
def form_valid(self, form):
self.can_write = True # only allowed users can change images
context = self.get_context_data(**self.kwargs)
img = self.object
gallery = context['gallery']
if img.gallery != gallery:
raise PermissionDenied
can_change = True
if 'physical' in self.request.FILES: # the user request to change the image
if self.request.FILES["physical"].size > settings.ZDS_APP['gallery']['image_max_size']:
messages.error(
self.request,
_(u"Votre image est beaucoup trop lourde, réduisez sa taille à moins de {:.0f} "
u"<abbr title=\"kibioctet\">Kio</abbr> avant de l'envoyer.").format(
settings.ZDS_APP['gallery']['image_max_size'] / 1024))
can_change = False
else:
img.physical = self.request.FILES["physical"]
img.slug = slugify(self.request.FILES["physical"])
if can_change:
img.title = form.cleaned_data['title']
img.legend = form.cleaned_data['legend']
img.update = datetime.now()
img.save()
return redirect(reverse("gallery-image-edit", args=[img.gallery.pk, img.pk]))
class DeleteImages(DeleteView):
"""Delete a given image"""
model = Image
http_method_names = ['post', 'delete']
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(DeleteImages, self).dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
pk_gallery = self.request.POST['gallery']
gallery = get_object_or_404(Gallery, pk=pk_gallery)
ensure_user_access(gallery, request.user, can_write=True)
if 'delete_multi' in request.POST:
list_items = request.POST.getlist("items")
Image.objects.filter(pk__in=list_items, gallery=gallery).delete()
elif 'delete' in request.POST:
pkey = self.request.POST['image']
img = get_object_or_404(Image, pk=pkey)
if img.gallery != gallery:
raise PermissionDenied
img.delete()
return redirect(gallery.get_absolute_url())
class ImportImages(GalleryMixin, FormView):
"""Create images from zip archive."""
form_class = ArchiveImageForm
http_method_names = ['get', 'post', 'put']
template_name = "gallery/image/import.html"
can_write = True # only allowed user can import new images
@method_decorator(login_required)
@method_decorator(can_write_and_read_now)
def dispatch(self, *args, **kwargs):
return super(ImportImages, self).dispatch(*args, **kwargs)
def form_valid(self, form):
context = self.get_context_data()
gallery = context['gallery']
archive = self.request.FILES["file"]
temp = os.path.join(tempfile.gettempdir(), str(time.time()))
if not os.path.exists(temp):
os.makedirs(temp)
zfile = zipfile.ZipFile(archive, "a")
for i in zfile.namelist():
filename = os.path.split(i)[1]
ph_temp = os.path.abspath(os.path.join(temp, os.path.basename(i)))
if filename.strip() == "": # don't deal with directory
continue
# create file for image
f_im = open(ph_temp, "wb")
f_im.write(zfile.read(i))
f_im.close()
(title, ext) = os.path.splitext(os.path.basename(i))
# if size is too large, don't save
if os.stat(ph_temp).st_size > settings.ZDS_APP['gallery']['image_max_size']:
messages.error(
self.request, _(u'Votre image "{}" est beaucoup trop lourde, réduisez sa taille à moins de {:.0f}'
u'Kio avant de l\'envoyer.').format(
title, settings.ZDS_APP['gallery']['image_max_size'] / 1024))
continue
# if it's not an image, pass
try:
ImagePIL.open(ph_temp)
except IOError:
continue
# create picture in database:
f_im = File(open(ph_temp, "rb"))
f_im.name = title + ext
pic = Image()
pic.gallery = gallery
pic.title = title
pic.legend = ""
pic.pubdate = datetime.now()
pic.physical = f_im
pic.save()
f_im.close()
if os.path.exists(ph_temp):
os.remove(ph_temp)
zfile.close()
if os.path.exists(temp):
shutil.rmtree(temp)
return redirect(gallery.get_absolute_url())
|
DevHugo/zds-site
|
zds/gallery/views.py
|
Python
|
gpl-3.0
| 16,394
|
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class ScheduleAdmin(admin.ModelAdmin):
list_display = ('group', 'location', 'week_day', 'start_time', 'end_time')
fieldsets = ((None, {'fields': ('group', 'location', 'week_day', 'start_time', 'end_time')}),)
WEEK_DAYS = (
(1, _('Sunday')),
(2, _('Monday')),
(3, _('Tuesday')),
(4, _('Wednesday')),
(5, _('Thursday')),
(6, _('Friday')),
(7, _('Saturday')))
class Schedule(models.Model):
group = models.ForeignKey('Group', on_delete=models.CASCADE)
location = models.ForeignKey('Location', on_delete=models.CASCADE)
week_day = models.IntegerField(choices=WEEK_DAYS)
start_time = models.TimeField()
end_time = models.TimeField()
def __str__(self):
return "{}, {} - {}".format(self.week_day, self.start_time, self.end_time)
def find_group_schedule(group):
return Schedule.objects.filter(group=group)
|
htmfilho/boust
|
club/models/schedule.py
|
Python
|
gpl-3.0
| 996
|
# This file is part of Indico.
# Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.console import cformat
from indico.util.struct.iterables import committing_iterator
from indico_zodbimport import Importer, convert_to_unicode
from indico_livesync.plugin import LiveSyncPlugin
from indico_livesync.models.agents import LiveSyncAgent
class LiveSyncImporter(Importer):
plugins = {'livesync'}
def pre_check(self):
return self.check_plugin_schema('livesync')
def has_data(self):
return LiveSyncAgent.find().count()
def migrate(self):
# noinspection PyAttributeOutsideInit
self.livesync_root = self.zodb_root['plugins']['livesync']._storage
with LiveSyncPlugin.instance.plugin_context():
self.migrate_settings()
self.migrate_agents()
print cformat('%{cyan!}Note: The old queue is not preserved!%{reset}')
def migrate_settings(self):
print cformat('%{white!}migrating settings')
LiveSyncPlugin.settings.delete_all()
opts = self.zodb_root['plugins']['livesync']._PluginBase__options
LiveSyncPlugin.settings.set('excluded_categories', [{'id': x} for x in opts['excludedCategories'].getValue()])
db.session.commit()
def migrate_agents(self):
print cformat('%{white!}migrating agents')
for old_agent in committing_iterator(self.livesync_root['agent_manager']._agents.itervalues()):
if not old_agent._active:
print cformat('%{yellow}skipping inactive agent {} ({})%{reset}').format(old_agent._id, old_agent._name)
continue
agent = LiveSyncAgent(name=convert_to_unicode(old_agent._name), initial_data_exported=True)
old_agent_class = old_agent.__class__.__name__
if old_agent_class == 'InvenioBatchUploaderAgent':
agent.backend_name = 'invenio'
agent.settings = {
'invenio_url': old_agent._url
}
elif old_agent_class == 'CERNSearchUploadAgent':
agent.backend_name = 'cernsearch'
agent.settings = {
'service_url': old_agent._url,
'service_username': old_agent._username,
'service_password': old_agent._password,
}
else:
print cformat('%{red!}skipping unknown agent type: {}%{reset}').format(old_agent_class)
continue
print cformat('- %{cyan}{} ({})').format(agent.name, agent.backend_name)
db.session.add(agent)
|
nop33/indico-plugin-livesync
|
indico_livesync/zodbimport.py
|
Python
|
gpl-3.0
| 3,329
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Node Class
The Node is based on the Basic class
It is the base class of all items in a namespace.
Device and Value are based on the Node Class
it
"""
from pybush.constants import __dbug__
from pybush.functions import spacelessify
from pybush.basic import Basic
from pybush.functions import set_attributes
class Node(Basic):
"""
Base Class for all item in the namespace.
It offers a way to have information / notification / functions
about the namespace hierarchy
It inherits from Basic Class. It adds name, description and tags functions.
That export/import json files for any Node with all its chidren
"""
def __init__(self, **kwargs):
super(Node, self).__init__()
# initialise attributes/properties of this node
self._address = None
self._parameter = None
self._children = None
# kwargs setup attributes
set_attributes(self, kwargs)
def post_print(self, printer):
printer = 'Node' + printer
if self.children:
printer = printer + ' - children : ' + str(len(self.children))
if self.address:
printer = printer + ' - address : ' + self.address
if self.parameter:
val = str(self.parameter.value)
printer = printer + ' - parameter value is : ' + val
return printer
@property
def parameter(self):
"""
parameter of the node
"""
return self._parameter
@parameter.setter
def parameter(self, parameter):
if parameter.__class__.__name__ == 'Parameter':
self._parameter = parameter
return True
elif parameter.__class__.__name__ == 'dict':
self._parameter = self.get_device().add_param(parameter)
return bool(self._parameter)
else:
if __dbug__:
print('ERROR 876 : this is not a Value instance, this is a ' + parameter.__class__.__name__)
return False
def get_device(self):
"""
get the root device of this node
"""
asker = self
def get_parent(asker):
"""
get the parent of the current node
"""
asker = asker.parent
return asker
while asker.service != 'Device':
asker = get_parent(asker)
return asker
@property
def children(self):
"""
Return the list of the children registered to this node
"""
if self._children:
return self._children
else:
return None
def new_child(self, dict_import=None, name=None, description=None, tags=None, children=None):
"""
Create a new Node in its parent
You can
:return node object if successful
:return False if name is not valid (already exists or is not provided)
"""
def append_child(new_child):
"""
append the child to the children list of this instance
"""
if not self.children:
self._children = [new_child]
else:
self._children.append(new_child)
if not isinstance(dict_import, dict):
dict_import = {'name':name, 'description':description, 'tags':tags}
if isinstance(dict_import, dict):
if 'name' in dict_import.keys():
# check that the name doesn't already exists
if self.children:
for child in self.children:
if isinstance(child, list):
child = child[0]
if isinstance(child, Node):
if dict_import['name'] == child.name:
# return the existing child if it already exists
return child
# we import a python dict to create the child
# be careful about children and parameter
# which needs to instanciate Classes Node and Value
the_new_child = Node(parent=self, **dict_import)
# Append this child in the self.children list
append_child(the_new_child)
# maybe the new_child contains children itself?
if 'children' in dict_import.keys():
if len(dict_import['children']) > 0:
for little_child in dict_import['children']:
# create a new child for each of the new_child.children item recursivly
the_new_child.new_child(little_child)
self.new_child_post_action(dict_import)
else:
print('for now we need a name to create a parameter')
else:
# if the child argument is only a string, this is the name of the new_child to create
the_new_child = Node(parent=self, name=name, description=description, tags=tags, children=children)
# Append this child in the self.children list
append_child(the_new_child)
return the_new_child
def new_child_post_action(self, dict_import):
"""
might be subclassed
"""
pass
def post_export(self, node):
"""
export Node to a dict with all its attributes
"""
if self.parameter:
node.setdefault('parameter', self.parameter.export())
else:
node.setdefault('parameter', None)
filiation = []
if self.children:
for child in self.children:
filiation.append(child.export())
node.setdefault('children', filiation)
return node
@property
def address(self):
"""
Current address of the node
"""
def get_address(self):
"""
recursive function to get into parent's hierarchy
"""
address = spacelessify(self.name)
if not address:
address = ''
if self.__class__.__name__ is not 'Device':
if self.parent:
parent_address = (get_address(self.parent))
if self.__class__.__name__ is 'Value':
address = parent_address
else:
address = parent_address + '/' + address
else:
address = '/' + address
return address
return get_address(self)
@address.setter
def address(self, address):
if __dbug__:
print('come back later for setting a new address for a node', address)
print(self.address)
|
PixelStereo/pybush
|
pybush/node.py
|
Python
|
gpl-3.0
| 6,766
|
#! /usr/bin/env python
"""
File: main.py
Author: Revolt
--------------------------
Desc:
This file initializes the wx application and starts the main
application loop.
--------------------------
Copyright (C) 2010 Revolt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import wx, os, sys, logging, logging.handlers, traceback
from appinfo import *
from gui.frames.MainFrame import *
class MainApp(wx.App):
""" Our main application class """
def OnInit(self):
""" What to do on application load """
global appInfo
if "unicode" not in wx.PlatformInfo:
self.__logger.warning("wxPython isn't built as unicode")
self.SetAppName(appInfo['name'])
self.SetClassName(appInfo['class'])
#wx.Image.AddHandler(wx.PNGHandler())
#wx.Image.AddHandler(wx.JPEGHandler())
stdPaths = wx.StandardPaths.Get()
appDataFolder = stdPaths.GetUserLocalDataDir()
if not os.path.isdir(appDataFolder):
os.mkdir(appDataFolder)
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
if len(sys.argv) > 1:
level_name = sys.argv[1]
level = LEVELS.get(level_name, logging.ERROR)
logging.basicConfig(level=level)
logFolder = os.path.join(appDataFolder, "logs")
if not os.path.isdir(logFolder):
os.mkdir(logFolder)
logger = logging.getLogger("mhdd")
fileHandler = logging.handlers.RotatingFileHandler(os.path.join(logFolder,
"mhddorganizer.log"),
maxBytes = 512*1024,
backupCount = 2)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s (%(funcName)s)")
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
self.config = wx.FileConfig(localFilename = os.path.join(appDataFolder, "config"),
style = wx.CONFIG_USE_LOCAL_FILE)
wx.Config.Set(self.config)
self.frame = MainFrame(None, "MHDD Organizer")
self.frame.Show(True)
self.SetTopWindow(self.frame)
return True;
def exceptionHandler(type, value, tb):
try:
message = "Unhandled exception: " + ''.join(traceback.format_exception(type, value, tb))
print >> sys.stderr, message
logger = logging.getLogger("mhdd")
logger.error(message)
wx.MessageBox(message, "Unhandled Exception", wx.OK | wx.ICON_ERROR)
except Exception, e:
pass
sys.exit(1)
scriptDir = os.path.dirname(sys.argv[0])
if scriptDir:
os.chdir(scriptDir)
sys.excepthook = exceptionHandler
app = MainApp(False)
app.MainLoop()
|
AlexJF/MHDD-Organizer
|
main.py
|
Python
|
gpl-3.0
| 3,567
|
# -*- coding: utf-8 -*-
"""
APT Motor Controller for Thorlabs.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
"""
This module was developed from PyAPT, written originally by Michael Leung
(mcleung@stanford.edu). Have a look in:
https://github.com/HaeffnerLab/Haeffner-Lab-LabRAD-Tools/blob/master/cdllservers/APTMotor/APTMotorServer.py
APT.dll and APT.lib were provided to PyAPT thanks to SeanTanner@ThorLabs .
All the specific error and status code are taken from:
https://github.com/UniNE-CHYN/thorpy
The rest of the documentation is based on the Thorlabs APT Server documentation
which can be obtained directly from
https://www.thorlabs.com/software_pages/ViewSoftwarePage.cfm?Code=APT
"""
from collections import OrderedDict
from core.base import Base
from ctypes import c_long, c_buffer, c_float, windll, pointer
from interface.motor_interface import MotorInterface
import os
import platform
class APTMotor():
""" Class to control Thorlabs APT motor. This class wrapps the low level
commands from a dll library in python methods.
"""
# all the possible hardware types that are available to be controlled by
# the apt.dll
hwtype_dict ={}
hwtype_dict['HWTYPE_BSC001'] = 11 # 1 Ch benchtop stepper driver
hwtype_dict['HWTYPE_BSC101'] = 12 # 1 Ch benchtop stepper driver
hwtype_dict['HWTYPE_BSC002'] = 13 # 2 Ch benchtop stepper driver
hwtype_dict['HWTYPE_BDC101'] = 14 # 1 Ch benchtop DC servo driver
hwtype_dict['HWTYPE_SCC001'] = 21 # 1 Ch stepper driver card (used within BSC102,103 units)
hwtype_dict['HWTYPE_DCC001'] = 22 # 1 Ch DC servo driver card (used within BDC102,103 units)
hwtype_dict['HWTYPE_ODC001'] = 24 # 1 Ch DC servo driver cube
hwtype_dict['HWTYPE_OST001'] = 25 # 1 Ch stepper driver cube
hwtype_dict['HWTYPE_MST601'] = 26 # 2 Ch modular stepper driver module
hwtype_dict['HWTYPE_TST001'] = 29 # 1 Ch Stepper driver T-Cube
hwtype_dict['HWTYPE_TDC001'] = 31 # 1 Ch DC servo driver T-Cube
hwtype_dict['HWTYPE_LTSXXX'] = 42 # LTS300/LTS150 Long Travel Integrated Driver/Stages
hwtype_dict['HWTYPE_L490MZ'] = 43 # L490MZ Integrated Driver/Labjack
hwtype_dict['HWTYPE_BBD10X'] = 44 # 1/2/3 Ch benchtop brushless DC servo driver
# the error code is also comparable to the APT server documentation.
error_code= {}
# General Error code:
error_code[10000] = 'An unknown Server error has occurred. '
error_code[10001] = 'A Server internal error has occurred. '
error_code[10002] = 'A Server call has failed. '
error_code[10003] = 'An attempt has been made to pass a parameter that is ' \
'invalid or out of range. In the case of motor ' \
'commands, this error may occur when a move is ' \
'requested that exceeds the stage travel or exceeds ' \
'the calibration data.'
error_code[10004] = 'An attempt has been made to save or load control ' \
'parameters to the registry (using the SaveParamSet ' \
'or LoadParamSet methods) when the unit serial number ' \
'has not been specified.'
# PC System:
error_code[10050] = 'An error has occurred whilst accessing the disk. ' \
'Check that the drive is not full, missing or ' \
'corrupted.'
error_code[10051] = 'An error has occurred with the ethernet connections ' \
'or the windows sockets. '
error_code[10052] = 'An error has occurred whilst accessing the ' \
'registry. '
error_code[10053] = 'An internal memory allocation error or ' \
'de-allocation error has occurred.'
error_code[10054] = 'An error has occurred with the COM system. ' \
'Restart the program.'
error_code[10055] = 'An error has occurred with the USB communications.'
# Rack and USB Units:
error_code[10100] = 'A serial number has been specified that is unknown ' \
'to the server.'
error_code[10101] = 'A duplicate serial number has been detected. ' \
'Serial numbers are required to be unique.'
error_code[10102] = 'A duplicate device identifier has been detected.'
error_code[10103] = 'An invalid message source has been detected.'
error_code[10104] = 'A message has been received with an unknown ' \
'identifier.'
error_code[10105] = 'An unknown hardware identifier has been encountered.'
error_code[10106] = 'An invalid serial number has been detected.'
error_code[10107] = 'An invalid message destination ident has been detected.'
error_code[10108] = 'An invalid index parameter has been passed.'
error_code[10109] = 'A software call has been made to a control which is ' \
'not currently communicating with any hardware. This ' \
'may be because the control has not been started or ' \
'may be due to an incorrect serial number or missing ' \
'hardware. '
error_code[10110] = 'A notification or response message has been ' \
'received from a hardware unit. This may be indicate ' \
'a hardware fault or that an illegal ' \
'command/parameter has been sent to the hardware.'
error_code[10111] = 'A time out has occurred while waiting for a ' \
'hardware unit to respond. This may be due to ' \
'communications problems or a hardware fault. '
error_code[10112] = 'Some functions are applicable only to later ' \
'versions of embedded code. This error is returned ' \
'when a software call is made to a unit with an ' \
'incompatible version of embedded code installed.'
error_code[10115] = 'Some functions are applicable only to later versions ' \
'of hardware. This error is returned when a software ' \
'call is made to an incompatible version of hardware.'
# Motors:
error_code[10150] = 'The GetStageAxisInfo method has been called when ' \
'no stage has been assigned. '
error_code[10151] = 'An internal error has occurred when using an ' \
'encoded stage.'
error_code[10152] = 'An internal error has occurred when using an ' \
'encoded stage. '
error_code[10153] = 'A software call applicable only to encoded stages ' \
'has been made to a non-encoded stage.'
# The status is encodes in a 32bit word. Some bits in that word have no
# assigned meaning, or their meaning could not be deduced from the manual.
# The known status bits are stated below. The current status can also be a
# combination of status bits. Therefore you have to check with an AND
# bitwise comparison, which status your device has. The bit flags are
# returned in a single 32 bit integer parameter and can provide additional
# useful status information for client application development.
status_code = {}
# dict key as bit number = 'hex value, bit number, description'
status_code[1] = '0x00000001, 1, forward hardware limit switch is active. ' \
'CW hardware limit switch (0 - no contact, 1 - contact).'
status_code[2] = '0x00000002, 2, reverse hardware limit switch is active. ' \
'CCW hardware limit switch (0 - no contact, 1 - contact).'
status_code[3] = '0x00000004, 3, CW software limit switch (0 - no ' \
'contact, 1 - contact). Not applicable to Part Number ' \
'ODC001 and TDC001 controllers'
status_code[4] = '0x00000008, 4, CCW software limit switch (0 - no ' \
'contact, 1 - contact). Not applicable to Part Number ' \
'ODC001 and TDC001 controllers'
status_code[5] = '0x00000010, 5, in motion, moving forward, Motor shaft ' \
'moving clockwise (1 - moving, 0 - stationary).'
status_code[6] = '0x00000020, 6, in motion, moving reverse, Motor shaft ' \
'moving counterclockwise (1 - moving, 0 - stationary).'
status_code[7] = '0x00000040, 7, in motion, jogging forward, Shaft ' \
'jogging clockwise (1 - moving, 0 - stationary).'
status_code[8] = '0x00000080, 8, in motion, jogging reverse, Shaft ' \
'jogging counterclockwise (1 - moving, 0 - stationary).'
status_code[9] = '0x00000100, 9, Motor connected (1 - connected, 0 - ' \
'not connected). Not applicable to Part Number BMS001 ' \
'and BMS002 controllers. Not applicable to Part Number ' \
'ODC001 and TDC001 controllers.'
status_code[10] = '0x00000200, 10, in motion, homing, Motor homing ' \
'(1 - homing, 0 - not homing).'
status_code[11] = '0x00000400, 11, homed (homing has been completed)' \
'(1 - homed, 0 - not homed).'
status_code[12] = '0x00000800, 12, For Future Use.'
# NOTE: Bits 13 to 20 are applicable only to the BBD10x series brushless DC
# controllers!
status_code[13] = '0x00001000, 13, Trajectory within tracking window ' \
'(1 – within window, 0 – not within window).'
status_code[14] = '0x00002000, 14, settled, Axis within settled window ' \
'(1 – settled within window, 0 – not settled within' \
'window).'
status_code[15] = '0x00004000, 15, motion error (excessive position ' \
'error), Axis exceeds position error limit ' \
'(1 – limit exceeded, 0 – within limit).'
status_code[16] = '0x00008000, 16, Set when position module instruction ' \
'error exists (1 – instruction error exists, 0 – ' \
'no error).'
status_code[17] = '0x00010000, 17, Interlock link missing in motor ' \
'connector (1 – missing, 0 – present).'
status_code[18] = '0x00020000, 18, Position module over temperature ' \
'warning (1 – over temp, 0 – temp OK).'
status_code[19] = '0x00040000, 19, Position module bus voltage fault ' \
'(1 – fault exists, 0 – OK).'
status_code[20] = '0x00080000, 20, Axis commutation error ' \
'(1 – error, 0 – OK).'
# NOTE: Bits 21 to 26 (Digital Input States) are only applicable if the
# associated digital input is fitted to your controller – see the
# relevant handbook for more details.
status_code[21] = '0x00100000, 21, Digital input 1 state (1 - ' \
'logic high, 0 - logic low).'
status_code[22] = '0x00200000, 22, Digital input 2 state (1 - ' \
'logic high, 0 - logic low).'
status_code[23] = '0x00400000, 23, Digital input 3 state (1 - ' \
'logic high, 0 - logic low).'
status_code[24] = '0x00800000, 24, Digital input 4 state (1 - ' \
'logic high, 0 - logic low).'
status_code[25] = '0x01000000, 25, BBD10x Controllers: Axis phase ' \
'current limit (1 – current limit exceeded, ' \
'0 – below limit). Other Controllers: Digital input 5 ' \
'state (1 - logic high, 0 - logic low).'
status_code[26] = '0x02000000, 26, Digital input 6 state (1 - logic ' \
'high, 0 - logic low).'
status_code[27] = '0x04000000, 27, Unspecified, for Future Use.'
status_code[28] = '0x08000000, 28, Unspecified, for Future Use.'
status_code[29] = '0x10000000, 29, Unspecified, for Future Use.'
status_code[30] = '0x20000000, 30, Active (1 – indicates unit is active, ' \
'0 – not active).'
status_code[31] = '0x40000000, 31, Unspecified, for Future Use.'
status_code[32] = '0x80000000, Channel enabled (1 – enabled, 0- disabled).'
def __init__(self, path_dll, serialnumber, hwtype, label='', unit='m'):
"""
@param str path_dll: the absolute path to the dll of the current
operating system
@param int serialnumber: serial number of the stage
@param str hwtype: name for the type of the hardware device you want to
control. The name must be available in hwtype_dict!
@param str label: a label which identifies the axis and gives
it a meaning.
@param str unit: the unit of this axis, possible entries are m, ° or
degree
"""
self.aptdll = windll.LoadLibrary(path_dll)
self.aptdll.EnableEventDlg(True)
self.aptdll.APTInit()
self._HWType = c_long(self.hwtype_dict[hwtype])
self.Connected = False
self.verbose = False
self.label = label
self.setSerialNumber(serialnumber)
self._wait_until_done = True
self._unit = unit # all apt stages are wither in mm or in degree and
# since mm is not an SI unit it has to be converted
# here in this hardware file from m to mm.
def getNumberOfHardwareUnits(self):
""" Returns the number of connected external hardware (HW) units that
are available to be interfaced.
"""
numUnits = c_long()
self.aptdll.GetNumHWUnitsEx(self._HWType, pointer(numUnits))
return numUnits.value
def getSerialNumberByIdx(self, index):
""" Returns the Serial Number of the specified index """
HWSerialNum = c_long()
hardwareIndex = c_long(index)
self.aptdll.GetHWSerialNumEx(self._HWType, hardwareIndex, pointer(HWSerialNum))
return HWSerialNum
def setSerialNumber(self, SerialNum):
'''
Sets the Serial Number of the specified index
'''
if self.verbose:
print("Serial is", SerialNum)
self.SerialNum = c_long(SerialNum)
return self.SerialNum.value
def initializeHardwareDevice(self):
'''
Initialises the motor.
You can only get the position of the motor and move the motor after it
has been initialised. Once initiallised, it will not respond to other
objects trying to control it, until released.
'''
if self.verbose:
print('initializeHardwareDevice serial', self.SerialNum)
result = self.aptdll.InitHWDevice(self.SerialNum)
if result == 0:
self.Connected = True
if self.verbose:
print('initializeHardwareDevice connection SUCESS')
# need some kind of error reporting here
else:
raise Exception('Connection Failed. Check Serial Number!')
return True
# Interfacing with the motor settings
def getHardwareInformation(self):
''' Get information from the hardware'''
model = c_buffer(255)
softwareVersion = c_buffer(255)
hardwareNotes = c_buffer(255)
self.aptdll.GetHWInfo(self.SerialNum, model, 255, softwareVersion, 255, hardwareNotes, 255)
hwinfo = [model.value, softwareVersion.value, hardwareNotes.value]
return hwinfo
def get_stage_axis_info(self):
""" Get parameter configuration of the stage
@return list: with the 4 entries:
float min_pos: Minimum position in m or degree
float max_pos: Maximum position in m or degree
int units: 1=m and 2=degree
float pinch: The angular distance to the next teeth in
the stepper motor. That determines
basically the precision of the movement of
the stepper motor.
This method will handle the conversion to the non SI unit mm.
"""
minimumPosition = c_float()
maximumPosition = c_float()
units = c_long()
pitch = c_float()
self.aptdll.MOT_GetStageAxisInfo(self.SerialNum,
pointer(minimumPosition),
pointer(maximumPosition),
pointer(units),
pointer(pitch))
if self._unit == 'm':
stageAxisInformation = [minimumPosition.value/1000.0,
maximumPosition.value/1000.0,
units.value,
pitch.value]
else:
stageAxisInformation = [minimumPosition.value,
maximumPosition.value,
units.value,
pitch.value]
return stageAxisInformation
def set_stage_axis_info(self, pos_min , pos_max, pitch, unit=1):
""" Set parameter configuration of the stage.
@param float pos_min: minimal position of the axis in m or degree.
@param float pos_max: maximal position of the axis in m or degree.
@param float pitch: the pitch determines the full step angle of a
stepper magnet motor. That is the resolution of the
stepper motor.
@param int unit: unit of the axis, possible values:
1 = m
2 = degree
This method will handle the conversion to the non SI unit mm.
"""
if unit == 1:
self._unit = 'm'
elif unit == 2:
self._unit = 'degree'
else:
raise Exception('The unit in method set_stage_axis_info is invalid! '
'Use either 1 (= in m) or 2 (= degree)!')
return
if self._unit == 'm':
# the thorlabs stage takes just mm values, that is really a pity...
pos_min_c = c_float(pos_min*1000)
pos_max_c = c_float(pos_max*1000)
else:
pos_min_c = c_float(pos_min)
pos_max_c = c_float(pos_max)
unit_c = c_long(unit) # units of mm
# Get different pitches of lead screw for moving stages for different stages.
pitch_c = c_float(pitch)
self.aptdll.MOT_SetStageAxisInfo(self.SerialNum, pos_min_c, pos_max_c,
unit_c, pitch_c)
def getHardwareLimitSwitches(self):
reverseLimitSwitch = c_long()
forwardLimitSwitch = c_long()
self.aptdll.MOT_GetHWLimSwitches(self.SerialNum, pointer(reverseLimitSwitch), pointer(forwardLimitSwitch))
hardwareLimitSwitches = [reverseLimitSwitch.value, forwardLimitSwitch.value]
return hardwareLimitSwitches
def setHardwareLimitSwitches(self, switch_reverse, switch_forward):
""" Set the Switch Configuration of the axis.
@param int switch_reverse: sets the switch in reverse movement
@param int switch_forward: sets the switch in forward movement
The following values are allowed:
0x01 or 1: Ignore switch or switch not present.
0x02 or 2: Switch makes on contact.
0x03 or 3: Switch breaks on contact.
0x04 or 4: Switch makes on contact - only used for homes (e.g. limit switched rotation stages).
0x05 or 5: Switch breaks on contact - only used for homes (e.g. limit switched rotations stages).
0x06 or 6: For PMD based brushless servo controllers only - uses index mark for homing.
"""
reverseLimitSwitch = c_long(switch_reverse)
forwardLimitSwitch = c_long(switch_forward)
self.aptdll.MOT_SetHWLimSwitches(self.SerialNum, reverseLimitSwitch, forwardLimitSwitch)
hardwareLimitSwitches = [reverseLimitSwitch.value, forwardLimitSwitch.value]
return hardwareLimitSwitches
def getVelocityParameters(self):
""" Retrieve the velocity parameter with the currently used acceleration.
@return list: with 4 entries:
float min_vel: minimal velocity in m/s or degree/s
float curr_acc: currently set acceleration in m/s^2 or degree/s^2
float max_vel: maximal velocity in m/s or degree/s
"""
minimumVelocity = c_float()
acceleration = c_float()
maximumVelocity = c_float()
self.aptdll.MOT_GetVelParams(self.SerialNum, pointer(minimumVelocity), pointer(acceleration), pointer(maximumVelocity))
if self._unit == 'm':
# the thorlabs stage return a the values in mm/s or mm/s^2, that is really a pity...
velocityParameters = [minimumVelocity.value/1000.0,
acceleration.value/1000.0,
maximumVelocity.value/1000.0]
else:
velocityParameters = [minimumVelocity.value, acceleration.value,
maximumVelocity.value]
return velocityParameters
def get_velocity(self):
""" Get the current velocity setting
"""
if self.verbose:
print('get_velocity probing...')
minVel, acc, maxVel = self.getVelocityParameters()
if self.verbose:
print('get_velocity maxVel')
return maxVel
def setVelocityParameters(self, minVel, acc, maxVel):
""" Set the velocity and acceleration parameter.
@param flaot minVel: the minimum velocity at which to start and end a
move in m/s or degree/s
@param float acc: the rate at which the velocity climbs from minimum
to maximum, and slows from maximum to minimum current
acceleration in m/s^2 or degree/s^2
@param float maxVel: the maximum velocity at which to perform a move in
m/s or degree/s
Note: The minVel parameter value is locked at zero and cannot be
adjusted.
"""
if self._unit == 'm':
minimumVelocity = c_float(minVel*1000.0)
acceleration = c_float(acc*1000.0)
maximumVelocity = c_float(maxVel*1000.0)
else:
minimumVelocity = c_float(minVel)
acceleration = c_float(acc)
maximumVelocity = c_float(maxVel)
self.aptdll.MOT_SetVelParams(self.SerialNum, minimumVelocity, acceleration, maximumVelocity)
def set_velocity(self, maxVel):
""" Set the maximal velocity for the motor movement.
@param float maxVel: maximal velocity of the stage in m/s or degree/s.
"""
if self.verbose:
print('set_velocity', maxVel)
minVel, acc, oldVel = self.getVelocityParameters()
self.setVelocityParameters(minVel, acc, maxVel)
def getVelocityParameterLimits(self):
""" Get the current maximal velocity and acceleration parameter.
@return list: with 2 entries:
float max_acc: maximum acceleration in m/s^2 or degree/s^2
float max_vel: maximal velocity in m/s or degree/s
"""
maximumAcceleration = c_float()
maximumVelocity = c_float()
self.aptdll.MOT_GetVelParamLimits(self.SerialNum, pointer(maximumAcceleration), pointer(maximumVelocity))
if self._unit == 'm':
velocityParameterLimits = [maximumAcceleration.value/1000.0,
maximumVelocity.value/1000.0]
else:
velocityParameterLimits = [maximumAcceleration.value,
maximumVelocity.value]
return velocityParameterLimits
# Controlling the motors:
# =======================
# m = move
# c = controlled velocity
# b = backlash correction
#
# Rel = relative distance from current position.
# Abs = absolute position
def get_home_parameter(self):
""" Get the home parameter"""
home_direction = c_long()
limit_switch = c_long()
home_velocity = c_float()
zero_offset = c_float()
self.aptdll.MOT_GetHomeParams(self.SerialNum, pointer(home_direction),
pointer(limit_switch),
pointer(home_velocity),
pointer(zero_offset))
home_param = [home_direction.value, limit_switch.value,
home_velocity.value, zero_offset.value]
return home_param
def set_home_parameter(self, home_dir, switch_dir, home_vel, zero_offset):
""" Set the home parameters.
@param int home_dir: direction to the home position,
1 = Move forward
2 = Move backward
@param int switch_dir: Direction of the switch limit:
4 = Use forward limit switch for home datum
1 = Use forward limit switch for home datum.
@param float home_vel = default velocity
@param float zero_offset: the distance or offset (in mm or degrees) of
the limit switch from the Home position.
"""
home_dir_c = c_long(home_dir)
switch_dir_c = c_long(switch_dir)
home_vel_c = c_float(home_vel)
zero_offset_c = c_float(zero_offset)
self.aptdll.MOT_SetHomeParams(self.SerialNum, home_dir_c, switch_dir_c,
home_vel_c, zero_offset_c)
return True
def get_pos(self):
""" Obtain the current absolute position of the stage.
@return float: the value of the axis either in m or in degree.
"""
if self.verbose:
print('getPos probing...')
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
position = c_float()
self.aptdll.MOT_GetPosition(self.SerialNum, pointer(position))
if self._unit == 'm':
if self.verbose:
print('getPos (m)', position.value/1000.0)
return position.value/1000.0
else:
if self.verbose:
print('getPos (degree)', position.value)
return position.value
def move_rel(self, relDistance):
""" Moves the motor a relative distance specified
@param float relDistance: Relative position desired, in m or in degree.
"""
if self.verbose:
print('move_rel ', relDistance, c_float(relDistance))
if not self.Connected:
# TODO: This should use our error message system
print('Please connect first! Use initializeHardwareDevice')
if self._unit == 'm':
relativeDistance = c_float(relDistance*1000.0)
else:
relativeDistance = c_float(relDistance)
self.aptdll.MOT_MoveRelativeEx(self.SerialNum, relativeDistance, self._wait_until_done)
if self.verbose:
print('move_rel SUCESS')
def move_abs(self, absPosition):
""" Moves the motor to the Absolute position specified
@param float absPosition: absolute Position desired, in m or degree.
"""
if self.verbose:
print('move_abs ', absPosition, c_float(absPosition))
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
if self._unit == 'm':
absolutePosition = c_float(absPosition*1000.0)
else:
absolutePosition = c_float(absPosition)
self.aptdll.MOT_MoveAbsoluteEx(self.SerialNum, absolutePosition, self._wait_until_done)
if self.verbose:
print('move_abs SUCESS')
return True
def mcRel(self, relDistance, moveVel=0.5e-3):
""" Moves the motor a relative distance specified at a controlled velocity.
@param float relDistance: Relative position desired in m or in degree
@param float moveVel: Motor velocity, m/s or in degree/s
"""
if self.verbose:
print('mcRel ', relDistance, c_float(relDistance), 'mVel', moveVel)
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
# Save velocities to reset after move
maxVel = self.get_velocity()
# Set new desired max velocity
self.set_velocity(moveVel)
self.move_rel(relDistance)
self.set_velocity(maxVel)
if self.verbose:
print('mcRel SUCESS')
return True
def mcAbs(self, absPosition, moveVel=0.5):
""" Moves the motor to the Absolute position specified at a controlled velocity.
@param float absPosition: Position desired in m or degree.
@param float moveVel: Motor velocity, m/s or degree/s
"""
if self.verbose:
print('mcAbs ', absPosition, c_float(absPosition), 'mVel', moveVel)
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
# Save velocities to reset after move
minVel, acc, maxVel = self.getVelocityParameters()
# Set new desired max velocity
self.set_velocity(moveVel)
self.move_rel(absPosition)
self.set_velocity(maxVel)
if self.verbose:
print('mcAbs SUCESS')
return True
def move_bc_rel(self, relDistance):
""" Moves the motor a relative distance specified, correcting for backlash.
@param float relDistance: Relative position desired in m or in degree
NOTE: Be careful in using this method. If interactive mode is on, then
the stage reacts immediately on both input for the relative
movement, which prevents the proper execution of the first
command!
"""
if self.verbose:
print('mbRel ', relDistance, c_float(relDistance))
if not self.Connected:
# TODO: This should use our error message system
print('Please connect first! Use initializeHardwareDevice')
self.move_rel(relDistance - self._backlash)
self.move_rel(self._backlash)
if self.verbose:
print('mbRel SUCESS')
return True
def mbAbs(self, absPosition):
""" Moves the motor to the Absolute position specified
@param float absPosition: Position desired in m or degree
"""
if self.verbose:
print('mbAbs ', absPosition, c_float(absPosition))
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
if (absPosition < self.getPos()):
if self.verbose:
print('backlash move_rel', absPosition - self._backlash)
self.move_rel(absPosition - self._backlash)
self.move_rel(absPosition)
if self.verbose:
print('mbAbs SUCESS')
return True
# --------------------------- Miscellaneous --------------------------------
def _create_status_dict(self):
""" Extract from the status integer all possible states.
"return:
"""
status = {}
status[0] = 'magnet stopped'
status[1] = 'magnet moves forward'
status[2] = 'magnet moves backward'
return status
def get_status(self):
""" Get the status bits of the current axis.
@return tuple(int, dict): the current status as an integer and the
dictionary explaining the current status.
"""
status_bits = c_long()
self.aptdll.MOT_GetStatusBits(self.SerialNum, pointer(status_bits))
# Check at least whether magnet is moving:
if self._test_bit(status_bits.value, 4):
return 1, self._create_status_dict()
elif self._test_bit(status_bits.value, 5):
return 2, self._create_status_dict()
else:
return 0, self._create_status_dict()
def identify(self):
""" Causes the motor to blink the Active LED. """
self.aptdll.MOT_Identify(self.SerialNum)
def cleanUpAPT(self):
""" Releases the APT object. Use when exiting the program. """
self.aptdll.APTCleanUp()
if self.verbose:
print('APT cleaned up')
self.Connected = False
def abort(self):
""" Abort the movement. """
self.aptdll.MOT_StopProfiled(self.SerialNum)
def go_home(self):
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
#TODO: a proper home position has to be set, not just zero.
self.move_abs(0.0)
def _test_bit(self, int_val, offset):
""" Check a bit in an integer number at position offset.
@param int int_val: an integer value, which is checked
@param int offset: the position which should be checked whether in
int_val for a bit of 1 is set.
@return bool: Check in an integer representation, whether the bin at the
position offset is set to 0 or to 1. If bit is set True
will be returned else False.
"""
mask = 1 << offset
return(int_val & mask) != 0
def set_backlash(self, backlash):
""" Set the provided backlash for the apt motor.
@param float backlash: the backlash in m or degree for the used stage.
"""
if self._unit == 'm':
# controller needs values in mm:
c_backlash = c_float(backlash*1000)
else:
c_backlash = c_float(backlash)
self.aptdll.MOT_SetBLashDist(self.SerialNum, c_backlash)
self._backlash = backlash
return backlash
def get_backlash(self):
""" Ask for the currently set backlash in the controller for the axis.
@return float: backlash in m or degree, depending on the axis config.
"""
backlash = c_float()
self.aptdll.MOT_GetBLashDist(self.SerialNum, pointer(backlash))
if self._unit == 'm':
self._backlash = backlash.value/1000
else:
self._backlash = backlash.value
return self._backlash
# ==============================================================================
class APTStage(Base, MotorInterface):
""" Control class for an arbitrary collection of axis. Do not use this
Class directly but inherit this class to a new Class, where also the
method get_constraints() is specified for that specific set of a
hardware.
If it is really necessary to change an already existing interface
module, then overwrite it in the class, which inherited that class.
"""
def on_activate(self, e):
""" Initialize instance variables and connect to hardware as configured.
@param object e: Event class object from Fysom.
An object created by the state machine module Fysom,
which is connected to a specific event (have a look in
the Base Class). This object contains the passed event
the state before the event happens and the destination
of the state which should be reached after the event
has happen.
"""
# create the magnet dump folder
self._magnet_dump_folder = self._get_magnet_dump()
# Load DLL
if platform.architecture()[0] == '64bit':
path_dll = os.path.join(self.get_main_dir(), 'thirdparty',
'thorlabs',
'win64',
'APT.dll')
elif platform.architecture()[0] == '32bit':
path_dll = os.path.join(self.get_main_dir(), 'thirdparty',
'thorlabs',
'win64',
'APT.dll')
else:
self.log.error('Unknown platform, cannot load the Thorlabs dll.')
# Read HW from config
config = self.getConfiguration()
if 'motor_type_serial_label' in config.keys():
self._motor_type_serial_label = config['motor_type_serial_label']
else:
self.log.error('Motor Hardware-controller-type, serial-number '
'and label for x axis not found in the configuration.\n'
'This numbers are essential, without them no proper '
'communication can be established!\n'
'The Hardware-controller-type depends on the used '
'microcontroller, Serial number can be found at the '
'back of the Step Motor controller and a label for '
'each axis has to be chosen like:\n'
'[("<hw_type>", <serial_num>, "<axis_label>"), '
'("<hw_type>", <serial_num>, "<axis_label>"), ...]\n'
'and assigned to the attribute '
'motor_serial_number_label.')
# here are all the references to the different axis are stored:
self._axis_dict = OrderedDict()
limits_dict = self.get_constraints()
# the variable self._motor_type_serial_label is a list, which contains
# the information about each axis. Three information about each axis
# have to be present:
# 1. hw_type: hardware type of the controller, it must be one entry
# from the dict hwtype_dict of the generic class APTMotor
# 2. serial_num: the serial number assiged to that axis
# 3. label: the label you give that axis. Note that this lable should
# coincide with the label defined in the get_constraints methods.
#
# Therefore self._motor_type_serial_label is looking like:
# [(hw_type, serial_num, label), (hw_type, serial_num, label), ...]
for (hw_type, serialnummer, label) in self._motor_type_serial_label:
if limits_dict.get(label) is not None:
unit = limits_dict[label]['unit']
self._axis_dict[label] = APTMotor(path_dll, serialnummer,
hw_type, label, unit)
self._axis_dict[label].initializeHardwareDevice()
else:
self.log.error('The following label "{0}" cannot be found in '
'the constraints method!\nCheck whether label '
'coincide with the label given in the config!\n'
'Restart the program!')
self.custom_activation(e)
def custom_activation(self, e):
""" That activation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation.
"""
pass
def on_deactivate(self, e):
""" Disconnect from hardware and clean up.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation.
"""
for label_axis in self._axis_dict:
self._axis_dict[label_axis].cleanUpAPT()
self.custom_deactivation(e)
def custom_deactivation(self, e):
""" That deactivation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation.
"""
pass
def move_rel(self, param_dict):
""" Moves stage in given direction (relative movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed.
With get_constraints() you can obtain all
possible parameters of that stage. According to
this parameter set you have to pass a dictionary
with keys that are called like the parameters
from get_constraints() and assign a SI value to
that. For a movement in x the dict should e.g.
have the form:
dict = { 'x' : 23 }
where the label 'x' corresponds to the chosen
axis label.
A smart idea would be to ask the position after the movement.
"""
curr_pos_dict = self.get_pos()
constraints = self.get_constraints()
for label_axis in self._axis_dict:
if param_dict.get(label_axis) is not None:
move = param_dict[label_axis]
curr_pos = curr_pos_dict[label_axis]
if (curr_pos + move > constraints[label_axis]['pos_max'] ) or\
(curr_pos + move < constraints[label_axis]['pos_min']):
self.log.warning('Cannot make further relative movement '
'of the axis "{0}" since the motor is at '
'position {1} and with the step of {2} it would '
'exceed the allowed border [{3},{4}]! Movement '
'is ignored!'.format(
label_axis,
move,
curr_pos,
constraints[label_axis]['pos_min'],
constraints[label_axis]['pos_max']))
else:
self._save_pos({label_axis: curr_pos + move})
self._axis_dict[label_axis].move_rel(move)
def move_abs(self, param_dict):
""" Moves stage to absolute position (absolute movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <a-value>}.
'axis_label' must correspond to a label given
to one of the axis.
A smart idea would be to ask the position after the movement.
"""
constraints = self.get_constraints()
for label_axis in self._axis_dict:
if param_dict.get(label_axis) is not None:
desired_pos = param_dict[label_axis]
constr = constraints[label_axis]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the '
'axis "{0}" to position {1}, since it exceeds '
'the limts [{2},{3}]. Movement is ignored!'
''.format(label_axis, desired_pos, constr['pos_min'], constr['pos_max']))
else:
self._save_pos({label_axis:desired_pos})
self._axis_dict[label_axis].move_abs(desired_pos)
def abort(self):
""" Stops movement of the stage. """
for label_axis in self._axis_dict:
self._axis_dict[label_axis].abort()
self.log.warning('Movement of all the axis aborted! Stage stopped.')
def get_pos(self, param_list=None):
""" Gets current position of the stage arms
@param list param_list: optional, if a specific position of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
position is asked.
@return dict: with keys being the axis labels and item the current
position.
"""
pos = {}
if param_list is not None:
for label_axis in param_list:
if label_axis in self._axis_dict:
pos[label_axis] = self._axis_dict[label_axis].get_pos()
else:
for label_axis in self._axis_dict:
pos[label_axis] = self._axis_dict[label_axis].get_pos()
return pos
def get_status(self, param_list=None):
""" Get the status of the position
@param list param_list: optional, if a specific status of an axis
is desired, then the labels of the needed
axis should be passed in the param_list.
If nothing is passed, then from each axis the
status is asked.
"""
status = {}
if param_list is not None:
for label_axis in param_list:
if label_axis in self._axis_dict:
status[label_axis] = self._axis_dict[label_axis].get_status()
else:
for label_axis in self._axis_dict:
status[label_axis] = self._axis_dict[label_axis].get_status()
return status
def calibrate(self, param_list=None):
""" Calibrates the stage.
@param dict param_list: param_list: optional, if a specific calibration
of an axis is desired, then the labels of the
needed axis should be passed in the param_list.
If nothing is passed, then all connected axis
will be calibrated.
@return int: error code (0:OK, -1:error)
After calibration the stage moves to home position which will be the
zero point for the passed axis. The calibration procedure will be
different for each stage.
"""
raise InterfaceImplementationError('MagnetStageInterface>calibrate')
#TODO: read out a saved home position in file and compare that with the
# last position saved also in file. The difference between these
# values will determine the absolute home position.
#
if param_list is not None:
for label_axis in param_list:
if label_axis in self._axis_dict:
self._axis_dict[label_axis].go_home()
else:
for label_axis in self._axis_dict:
self._axis_dict[label_axis].go_home()
def _save_pos(self, param_dict):
""" Save after each move the parameters to file, since the motor stage
looses any information if it is initialized. That might be a way to
store and retrieve the current position.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed.
"""
for label_axis in param_dict:
if label_axis in self._axis_dict:
pos = param_dict[label_axis]
filename = os.path.join(self._magnet_dump_folder,
label_axis + '.dat')
with open(filename, 'w') as f:
f.write(str(pos))
def _get_magnet_dump(self):
""" Create the folder where the position file is saved, and check
whether it exists.
@return str: the path to the created folder."""
path = self.get_home_dir()
magnet_path = os.path.join(path, 'magnet')
if not os.path.exists(magnet_path):
os.makedirs(magnet_path)
self.log.info('Magnet dump was created in:\n'
'{}'.format(magnet_path))
return magnet_path
def get_velocity(self, param_list=None):
""" Gets the current velocity for all connected axes.
@param dict param_list: optional, if a specific velocity of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
velocity is asked.
@return dict : with the axis label as key and the velocity as item.
"""
vel = {}
if param_list is not None:
for label_axis in param_list:
if label_axis in self._axis_dict:
vel[label_axis] = self._axis_dict[label_axis].get_velocity()
else:
for label_axis in self._axis_dict:
vel[label_axis] = self._axis_dict[label_axis].get_velocity()
return vel
def set_velocity(self, param_dict):
""" Write new value for velocity.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-velocity-value>}.
'axis_label' must correspond to a label given
to one of the axis.
"""
constraints = self.get_constraints()
for label_axis in param_dict:
if label_axis in self._axis_dict:
desired_vel = param_dict[label_axis]
constr = constraints[label_axis]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot set velocity of the axis "{0}" '
'to the desired velocity of "{1}", since it '
'exceeds the limts [{2},{3}] ! Command is ignored!'
''.format(label_axis, desired_vel, constr['vel_min'], constr['vel_max']))
else:
self._axis_dict[label_axis].set_velocity(desired_vel)
class APTOneAxisStage(APTStage):
_modclass = 'APTOneAxis'
_modtype = 'hardware'
# connectors
_out = {'aptmotor': 'MotorInterface'}
def custom_activation(self, e):
""" That activation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
# my specific settings for the stage can be set here.
# remember to set the units to degree if you want to use it as a
# rotation stage, like that:
# min_pos, max_pos, unit_read, pinch = self.get_stage_axis_info()
# self._axis_dict[label].set_stage_axis_info(min_pos, max_pos, unit, pinch)
# my specific settings for the stage:
limits_dict = self.get_constraints()
for label_axis in self._axis_dict:
# adapt the hardware controller to the proper unit set:
if limits_dict[label_axis]['unit'] == '°' or limits_dict[label_axis]['unit'] == 'degree':
unit = 2 # for rotation movement
#FIXME: the backlash parameter has to be taken from the config and
# should not be hardcoded here!!
pitch = 7.5
backlash_correction = 0.2
else:
unit = 1 # default value for linear movement
pitch = 1
backlash_correction = 0.10e-3
self._axis_dict[label_axis].set_stage_axis_info(
limits_dict[label_axis]['pos_min'],
limits_dict[label_axis]['pos_max'],
pitch=pitch, unit=unit)
self._axis_dict[label_axis].setVelocityParameters(
limits_dict[label_axis]['vel_min'],
limits_dict[label_axis]['acc_max'],
limits_dict[label_axis]['vel_max'])
self._axis_dict[label_axis].set_velocity(limits_dict[label_axis]['vel_max'])
self._axis_dict[label_axis].setHardwareLimitSwitches(2, 2)
self._axis_dict[label_axis]._wait_until_done = False
# set the backlach correction in m since the forward movement is
# preciser than the backward:
self._axis_dict[label_axis].set_backlash(backlash_correction)
def custom_deactivation(self, e):
""" That deactivation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
pass
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the magnet hardware. These
constraints will be passed via the logic to the GUI so
that proper display elements with boundary conditions
could be made.
Provides all the constraints for each axis of a motorized stage
(like total travel distance, velocity, ...)
Each axis has its own dictionary, where the label is used as the
identifier throughout the whole module. The dictionaries for each axis
are again grouped together in a constraints dictionary in the form
{'<label_axis0>': axis0 }
where axis0 is again a dict with the possible values defined below. The
possible keys in the constraint are defined here in the interface file.
If the hardware does not support the values for the constraints, then
insert just None. If you are not sure about the meaning, look in other
hardware files to get an impression.
"""
constraints = {}
# be careful, if the pinch is not set correctly, the units are not the
# write ones! Check the pinch for the used traveling unit in the file
# MG17APTServer.ini
#FIXME: the numbers for the constraints should be obtained from the
# configuration and should be not hardcoded here into this file!
# constraints for the axis of type CR1-Z7:
# set the constraints for the phi axis:
axis0 = {}
axis0['label'] = 'phi' # That name must coincide with the given
# name in the config. Otherwise there is no
# way of identifying the used axes.
axis0['unit'] = 'm' # the SI units, only possible mm or degree
axis0['ramp'] = ['Trapez'] # a possible list of ramps
axis0['pos_min'] = 0 # in °
axis0['pos_max'] = 360 # that is basically the traveling range
axis0['pos_step'] = 0.01 # in °
axis0['vel_min'] = 0.1 # in °/s
axis0['vel_max'] = 4.5 # in °/s
axis0['vel_step'] = 0.1 # in °/s (a rather arbitrary number)
axis0['acc_min'] = 4.0 # in °/s^2
axis0['acc_max'] = 5.0 # in °/s^2
axis0['acc_step'] = 0.01 # in °/s^2 (a rather arbitrary number)
constraints[axis0['label']] = axis0
return constraints
class APTThreeAxisStage(APTStage):
""" The module controlles three StepperStage56=NRT150 Enc Stage 150mm
"""
_modclass = 'APTThreeAxis'
_modtype = 'hardware'
# connectors
_out = {'aptmotor': 'MotorInterface'}
def custom_activation(self, e):
""" That activation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
# my specific settings for the stage:
limits_dict = self.get_constraints()
for label_axis in self._axis_dict:
# adapt the hardware controller to the proper unit set:
if limits_dict[label_axis]['unit'] == '°' or limits_dict[label_axis]['unit'] == 'degree':
unit = 2 # for rotation movement
#FIXME: the backlash parameter has to be taken from the config and
# should not be hardcoded here!!
pitch = 7.5
backlash_correction = 0.2
else:
unit = 1 # default value for linear movement
pitch = 1
backlash_correction = 0.10e-3
self._axis_dict[label_axis].set_stage_axis_info(
limits_dict[label_axis]['pos_min'],
limits_dict[label_axis]['pos_max'],
pitch=pitch, unit=unit)
self._axis_dict[label_axis].setVelocityParameters(
limits_dict[label_axis]['vel_min'],
limits_dict[label_axis]['acc_max'],
limits_dict[label_axis]['vel_max'])
self._axis_dict[label_axis].set_velocity(limits_dict[label_axis]['vel_max'])
self._axis_dict[label_axis].setHardwareLimitSwitches(2, 2)
self._axis_dict[label_axis]._wait_until_done = False
# set the backlach correction in m since the forward movement is
# preciser than the backward:
self._axis_dict[label_axis].set_backlash(backlash_correction)
def custom_deactivation(self, e):
""" That deactivation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
pass
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the magnet hardware. These
constraints will be passed via the logic to the GUI so
that proper display elements with boundary conditions
could be made.
Provides all the constraints for each axis of a motorized stage
(like total travel distance, velocity, ...)
Each axis has its own dictionary, where the label is used as the
identifier throughout the whole module. The dictionaries for each axis
are again grouped together in a constraints dictionary in the form
{'<label_axis0>': axis0 }
where axis0 is again a dict with the possible values defined below. The
possible keys in the constraint are defined here in the interface file.
If the hardware does not support the values for the constraints, then
insert just None. If you are not sure about the meaning, look in other
hardware files to get an impression.
"""
#FIXME: the numbers for the constraints should be obtained from the
# configuration and should be not hardcoded here into this file!
constraints = OrderedDict()
# set the constraints for the x axis:
axis0 = {}
axis0['label'] = 'x' # That name must coincide with the given
# name in the config. Otherwise there is no
# way of identifying the used axes.
axis0['unit'] = 'm' # the SI units, only possible mm or degree
axis0['ramp'] = ['Trapez'] # a possible list of ramps
axis0['pos_min'] = -65e-3 # in m
axis0['pos_max'] = 65e-3 # that is basically the traveling range
axis0['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis0['vel_min'] = 0.1e-3 # in m/s
axis0['vel_max'] = 2.0e-3 # in m/s
axis0['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis0['acc_min'] = 10e-6 # in m/s^2
axis0['acc_max'] = 500e-6 # in m/s^2
axis0['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# set the constraints for the y axis:
axis1 = {}
axis1['label'] = 'y' # That name must coincide with the given
# name in the config. Otherwise there is no
# way of identifying the used axes.
axis1['unit'] = 'm' # the SI units, only possible mm or degree
axis1['ramp'] = ['Trapez'] # a possible list of ramps
axis1['pos_min'] = -65e-3 # in m
axis1['pos_max'] = 65e-3 # that is basically the traveling range
axis1['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis1['vel_min'] = 0.1e-3 # in m/s
axis1['vel_max'] = 2.0e-3 # in m/s
axis1['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis1['acc_min'] = 10e-6 # in m/s^2
axis1['acc_max'] = 500e-6 # in m/s^2
axis1['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# set the constraints for the z axis:
axis2 = {}
axis2['label'] = 'z' # name is just as a sanity included
axis2['unit'] = 'm' # the SI units
axis2['ramp'] = ['Trapez'] # a possible list of ramps
axis2['pos_min'] = -65e-3 # in m
axis2['pos_max'] = 65e-3 # that is basically the traveling range
axis2['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis2['vel_min'] = 0.1e-3 # in m/s
axis2['vel_max'] = 2.0e-3 # in m/s
axis2['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis2['acc_min'] = 10e-6 # in m/s^2
axis2['acc_max'] = 500e-6 # in m/s^2
axis2['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# assign the parameter container for x to a name which will identify it
constraints[axis0['label']] = axis0
constraints[axis1['label']] = axis1
constraints[axis2['label']] = axis2
return constraints
class APTFourAxisStage(APTStage):
""" The module controls three StepperStage56=NRT150 Enc Stage 150mm
together with CR1-Z7 rotation stage.
"""
_modclass = 'APTThreeAxis'
_modtype = 'hardware'
# connectors
_out = {'aptmotor': 'MotorInterface'}
def custom_activation(self, e):
""" That activation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
# my specific settings for the stage:
limits_dict = self.get_constraints()
for label_axis in self._axis_dict:
#FIXME: The pitch and backlash_correction has to be set from the
# config and not hardcoded here in the file!
# adapt the hardware controller to the proper unit set:
if (limits_dict[label_axis]['unit'] == '°') or (limits_dict[label_axis]['unit'] == 'degree'):
unit = 2 # for rotation movement, for the CR1-Z7 rotation stage
pitch = 7.5
backlash_correction = 0.2
else:
unit = 1 # default value for linear movement
pitch = 1
backlash_correction = 0.10e-3
self._axis_dict[label_axis].set_stage_axis_info(
limits_dict[label_axis]['pos_min'],
limits_dict[label_axis]['pos_max'],
pitch=pitch, unit=unit)
self._axis_dict[label_axis].setHardwareLimitSwitches(2, 2)
self._axis_dict[label_axis]._wait_until_done = False
# set the backlach correction in m since the forward movement is
# preciser than the backward.
self._axis_dict[label_axis].set_backlash(backlash_correction)
#FIXME: That is a hardcoded workaround, since the VelocityParameters
# cannot be set with the controller for whatever reasons...
if label_axis != 'phi':
self._axis_dict[label_axis].setVelocityParameters(
limits_dict[label_axis]['vel_min'],
limits_dict[label_axis]['acc_max']/2,
limits_dict[label_axis]['vel_max'])
self._axis_dict[label_axis].set_velocity(limits_dict[label_axis]['vel_max']/2)
def custom_deactivation(self, e):
""" That deactivation method can be overwritten in the sub-classed file.
@param object e: Event class object from Fysom. A more detailed
explanation can be found in method activation of the
parent class APTStage.
"""
pass
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the magnet hardware. These
constraints will be passed via the logic to the GUI so
that proper display elements with boundary conditions
could be made.
Provides all the constraints for each axis of a motorized stage
(like total travel distance, velocity, ...)
Each axis has its own dictionary, where the label is used as the
identifier throughout the whole module. The dictionaries for each axis
are again grouped together in a constraints dictionary in the form
{'<label_axis0>': axis0 }
where axis0 is again a dict with the possible values defined below. The
possible keys in the constraint are defined here in the interface file.
If the hardware does not support the values for the constraints, then
insert just None. If you are not sure about the meaning, look in other
hardware files to get an impression.
"""
#FIXME: the numbers for the constraints should be obtained from the
# configuration and should be not hardcoded here into this file!
constraints = OrderedDict()
# constraints for the axis of type NRT150M:
# set the constraints for the x axis:
axis0 = {}
axis0['label'] = 'x' # That name must coincide with the given
# name in the config. Otherwise there is no
# way of identifying the used axes.
axis0['unit'] = 'm' # the SI units, only possible mm or degree
axis0['ramp'] = ['Trapez'] # a possible list of ramps
axis0['pos_min'] = -65e-3 # in m
axis0['pos_max'] = 65e-3 # that is basically the traveling range
axis0['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis0['vel_min'] = 0.1e-3 # in m/s
axis0['vel_max'] = 2.0e-3 # in m/s
axis0['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis0['acc_min'] = 10e-6 # in m/s^2
axis0['acc_max'] = 500e-6 # in m/s^2
axis0['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# constraints for the axis of type NRT150M:
# set the constraints for the y axis:
axis1 = {}
axis1['label'] = 'y' # That name must coincide with the given
# name in the config. Otherwise there is no
# way of identifying the used axes.
axis1['unit'] = 'm' # the SI units, only possible mm or degree
axis1['ramp'] = ['Trapez'] # a possible list of ramps
axis1['pos_min'] = -65e-3 # in m
axis1['pos_max'] = 65e-3 # that is basically the traveling range
axis1['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis1['vel_min'] = 0.1e-3 # in m/s
axis1['vel_max'] = 2.0e-3 # in m/s
axis1['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis1['acc_min'] = 10e-6 # in m/s^2
axis1['acc_max'] = 500e-6 # in m/s^2
axis1['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# constraints for the axis of type NRT150M:
# set the constraints for the z axis:
axis2 = {}
axis2['label'] = 'z' # name is just as a sanity included
axis2['unit'] = 'm' # the SI units
axis2['ramp'] = ['Trapez'] # a possible list of ramps
axis2['pos_min'] = -65e-3 # in m
axis2['pos_max'] = 65e-3 # that is basically the traveling range
axis2['pos_step'] = 3.0e-6 # in m (a rather arbitrary number)
axis2['vel_min'] = 0.1e-3 # in m/s
axis2['vel_max'] = 2.0e-3 # in m/s
axis2['vel_step'] = 1.0e-6 # in m/s (a rather arbitrary number)
axis2['acc_min'] = 10e-6 # in m/s^2
axis2['acc_max'] = 500e-6 # in m/s^2
axis2['acc_step'] = 1.0e-6 # in m/s^2 (a rather arbitrary number)
# constraints for the axis of type CR1-Z7:
# set the constraints for the phi axis:
axis3 = {}
axis3['label'] = 'phi' # name is just as a sanity included
axis3['unit'] = '°' # the SI units, possible entries: m or ° or degree
axis3['ramp'] = ['Trapez'] # a possible list of ramps
axis3['pos_min'] = 0 # in °
axis3['pos_max'] = 360 # that is basically the traveling range
axis3['pos_step'] = 0.01 # in ° 2.19 arcsec
axis3['vel_min'] = 1/3600*22 # in °/s, 22 arcsec/sec to 6 °/sec, 1 arcsec = 1/1296000 of a circle (1 degree is 1/360 of a cicle)
axis3['vel_max'] = 6.0 # in °/s 6 °/sec
axis3['vel_step'] = 0.1 # in °/s (a rather arbitrary number)
axis3['acc_min'] = 4.0 # in °/s^2
axis3['acc_max'] = 5.0 # in °/s^2
axis3['acc_step'] = 0.01 # in °/s^2 (a rather arbitrary number)
# assign the parameter container for x to a name which will identify it
constraints[axis0['label']] = axis0
constraints[axis1['label']] = axis1
constraints[axis2['label']] = axis2
constraints[axis3['label']] = axis3
return constraints
|
drogenlied/qudi
|
hardware/motor/aptmotor.py
|
Python
|
gpl-3.0
| 72,081
|
__author__ = 'civa'
import re
class DictQuery(dict):
def get(self, path, where = '', single=False, default = None):
keys = path.split("/")
val = None
for key in keys:
if val:
if isinstance(val, list):
if where:
if '*' in where:
val = [v for v in val if v and (re.search(where.replace('*', '.'), v.get(key, default)) is not None)]
else:
val = [v for v in val if v and v.get(key, default) == where]
if single and val:
val = val[0]
else:
val = [v.get(key, default) if v else None for v in val]
else:
if '*' in key:
for k, v in val.iteritems():
x = re.search(key.replace('*', '.'), k)
if x:
val = v
break
else:
val = val.get(key, default)
if where and val == where:
break
else:
if '*' in key:
for k, v in self.iteritems():
x = re.search(key.replace('*', '.'), k)
if x:
val = v
break
else:
val = dict.get(self, key, default)
if not val:
break;
return val
'''
Find key with selected value recursively
DictQuery(some_dict).find('id', 5)
'''
def find(self, key, value):
if key in self and self[key] == value:
return self
else:
return self._find(self, key, value)
def _find(self, data, key, value):
if key in data and data[key] == value: return data
for k, v in data.iteritems():
if isinstance(v, dict):
return self._find(v, key, value)
elif isinstance(v, list):
for item in v:
ret_val = self._find(item, key, value)
if ret_val is not None:
return ret_val
|
Civa/Zenith
|
src/Backend/Distributed/shared/utils.py
|
Python
|
gpl-3.0
| 2,328
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, repeat_last
class SvMapRangeNode(bpy.types.Node, SverchCustomTreeNode):
''' Map a range from one to another'''
bl_idname = 'SvMapRangeNode'
bl_label = 'Map Range'
bl_icon = 'OUTLINER_OB_EMPTY'
old_min = FloatProperty(
name='Old Min', description='Old Min',
default=0,
options={'ANIMATABLE'}, update=updateNode)
old_max = FloatProperty(
name='Old Max', description='Old Max',
default=1,
options={'ANIMATABLE'}, update=updateNode)
new_min = FloatProperty(
name='New Min', description='New Min',
default=0,
options={'ANIMATABLE'}, update=updateNode)
new_max = FloatProperty(
name='New Max', description='New Max',
default=10,
options={'ANIMATABLE'}, update=updateNode)
value = FloatProperty(
name='Value', description='New Max',
default=.5,
options={'ANIMATABLE'}, update=updateNode)
clamp = BoolProperty(default=True, name='Clamp', update=updateNode)
def sv_init(self, context):
self.inputs.new('StringsSocket', "Value").prop_name = 'value'
self.inputs.new('StringsSocket', "Old Min").prop_name = 'old_min'
self.inputs.new('StringsSocket', "Old Max").prop_name = 'old_max'
self.inputs.new('StringsSocket', "New Min").prop_name = 'new_min'
self.inputs.new('StringsSocket', "New Max").prop_name = 'new_max'
self.outputs.new('StringsSocket', "Value")
def draw_buttons(self, context, layout):
layout.prop(self, "clamp")
def map_range(self, x_list, old_min, old_max, new_min, new_max):
old_d = old_max - old_min
new_d = new_max - new_min
scale = new_d/old_d
def f(x):
return new_min + (x-old_min)*scale
if self.clamp:
return [min(new_max, max(new_min, f(x))) for x in x_list]
else:
return [f(x) for x in x_list]
def process(self):
inputs = self.inputs
outputs = self.outputs
# no outputs, end early.
if not outputs['Value'].is_linked:
return
value_in = iter(inputs[0].sv_get())
param = [repeat_last(inputs[i].sv_get()[0]) for i in range(1, 5)]
out = [self.map_range(*args) for args in zip(value_in, *param)]
self.outputs['Value'].sv_set(out)
def register():
bpy.utils.register_class(SvMapRangeNode)
def unregister():
bpy.utils.unregister_class(SvMapRangeNode)
|
kilon/sverchok
|
nodes/number/range_map.py
|
Python
|
gpl-3.0
| 3,434
|
#!/usr/bin/env python
vector_a = [1,1,1,1,1]
vector_b = [1,1,1,1,1]
vector_c = [0,0,0,0,0]
for i in range(0,len(vector_a)):
vector_c[i] = vector_a[i] + vector_b[i]
print vector_c
|
robertdfrench/tiny-vector-addition
|
Step1/vector-add-serial.py
|
Python
|
gpl-3.0
| 182
|
"""initialize invoice number format
Revision ID: 1e1a970ad004
Revises: 44f964dc36a2
Create Date: 2018-06-06 12:29:45.046659
"""
# revision identifiers, used by Alembic.
revision = '1e1a970ad004'
down_revision = '44f964dc36a2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def update_database_structure():
pass
def migrate_datas():
from autonomie_base.models.base import DBSESSION
from autonomie.models.config import Config
session = DBSESSION()
Config.query().filter_by(
app='autonomie',
name='invoice_number_template'
).delete()
prefix = session.query(Config.value).filter_by(
app='autonomie',
name='invoiceprefix',
).scalar() or ''
default_format = Config(
app='autonomie',
name='invoice_number_template',
value=prefix + '{SEQYEAR}'
)
session.add(default_format)
session.flush()
def upgrade():
update_database_structure()
migrate_datas()
def downgrade():
pass
|
CroissanceCommune/autonomie
|
autonomie/alembic/versions/4_2_0_initialize_invoice_number_format_1e1a970ad004.py
|
Python
|
gpl-3.0
| 1,033
|
#!/usr/bin/python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t; python-indent: 4 -*-
"""
Role
====
The ``PluginFileLocator`` locates plugins when they are accessible via the filesystem.
It's default behaviour is to look for text files with the
'.yapsy-plugin' extensions and to read the plugin's decription in
them.
Customization
-------------
The behaviour of a ``PluginFileLocator`` can be customized by instanciating it with a specific 'analyzer'.
Two analyzers are already implemented and provided here:
``PluginFileAnalyzerWithInfoFile``
the default 'analyzer' that looks for plugin 'info files' as
text file with a predefined extension. This implements the way
yapsy looks for plugin since version 1.
``PluginFileAnalyzerMathingRegex``
look for files matching a regex and considers them as being
the plugin itself.
All analyzers must enforce the
It enforces the ``plugin locator`` policy as defined by ``IPluginLocator`` and used by ``PluginManager``.
``info_ext``
expects a plugin to be discovered through its *plugin info file*.
User just needs to provide an extension (without '.') to look
for *plugin_info_file*.
``regexp``
looks for file matching the given regular pattern expression.
User just needs to provide the regular pattern expression.
All analyzers must enforce the policy represented by the ``IPluginFileAnalyzer`` interface.
"""
import os
import re
from yapsy import log
from yapsy.compat import ConfigParser, is_py2
from yapsy.PluginInfo import PluginInfo
from yapsy import PLUGIN_NAME_FORBIDEN_STRING
from yapsy.IPluginLocator import IPluginLocator
class IPluginFileAnalyzer(object):
"""
Define the methods expected by PluginFileLocator for its 'analyzer'.
"""
def __init__(self,name):
self.name = name
def isValidPlugin(self, filename):
"""
Check if the resource found at filename is a valid plugin.
"""
raise NotImplementedError("'isValidPlugin' must be reimplemented by %s" % self)
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
*dirpath* is the full path to the directory where the plugin file is
*filename* is the name (ie the basename) of the plugin file.
If *callback* function has not been provided for this strategy,
we use the filename alone to extract minimal informations.
"""
raise NotImplementedError("'getInfosDictFromPlugin' must be reimplemented by %s" % self)
class PluginFileAnalyzerWithInfoFile(IPluginFileAnalyzer):
"""
Consider plugins described by a textual description file.
A plugin is expected to be described by a text file ('ini' format) with a specific extension (.yapsy-plugin by default).
This file must contain at least the following information::
[Core]
Name = name of the module
Module = relative_path/to/python_file_or_directory
Optionnally the description file may also contain the following section (in addition to the above one)::
[Documentation]
Author = Author Name
Version = Major.minor
Website = url_for_plugin
Description = A simple one-sentence description
"""
def __init__(self, name, extensions="yapsy-plugin"):
"""
Creates a new analyzer named *name* and dedicated to check and analyze plugins described by a textual "info file".
*name* name of the plugin.
*extensions* the expected extensions for the plugin info file. May be a string or a tuple of strings if several extensions are expected.
"""
IPluginFileAnalyzer.__init__(self,name)
self.setPluginInfoExtension(extensions)
def setPluginInfoExtension(self,extensions):
"""
Set the extension that will identify a plugin info file.
*extensions* May be a string or a tuple of strings if several extensions are expected.
"""
# Make sure extension is a tuple
if not isinstance(extensions, tuple):
extensions = (extensions, )
self.expectedExtensions = extensions
def isValidPlugin(self, filename):
"""
Check if it is a valid plugin based on the given plugin info file extension(s).
If several extensions are provided, the first matching will cause the function
to exit successfully.
"""
res = False
for ext in self.expectedExtensions:
if filename.endswith(".%s" % ext):
res = True
break
return res
def getPluginNameAndModuleFromStream(self, infoFileObject, candidate_infofile=None):
"""
Extract the name and module of a plugin from the
content of the info file that describes it and which
is stored in ``infoFileObject``.
.. note:: Prefer using ``_extractCorePluginInfo``
instead, whenever possible...
.. warning:: ``infoFileObject`` must be a file-like object:
either an opened file for instance or a string
buffer wrapped in a StringIO instance as another
example.
.. note:: ``candidate_infofile`` must be provided
whenever possible to get better error messages.
Return a 3-uple with the name of the plugin, its
module and the config_parser used to gather the core
data *in a tuple*, if the required info could be
localised, else return ``(None,None,None)``.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# parse the information buffer to get info about the plugin
config_parser = ConfigParser()
try:
if is_py2:
config_parser.readfp(infoFileObject)
else:
config_parser.read_file(infoFileObject)
except Exception as e:
log.debug("Could not parse the plugin file '%s' (exception raised was '%s')" % (candidate_infofile,e))
return (None, None, None)
# check if the basic info is available
if not config_parser.has_section("Core"):
log.debug("Plugin info file has no 'Core' section (in '%s')" % candidate_infofile)
return (None, None, None)
if not config_parser.has_option("Core","Name") or not config_parser.has_option("Core","Module"):
log.debug("Plugin info file has no 'Name' or 'Module' section (in '%s')" % candidate_infofile)
return (None, None, None)
# check that the given name is valid
name = config_parser.get("Core", "Name")
name = name.strip()
if PLUGIN_NAME_FORBIDEN_STRING in name:
log.debug("Plugin name contains forbiden character: %s (in '%s')" % (PLUGIN_NAME_FORBIDEN_STRING,
candidate_infofile))
return (None, None, None)
return (name, config_parser.get("Core", "Module"), config_parser)
def _extractCorePluginInfo(self,directory, filename):
"""
Gather the core information (name, and module to be loaded)
about a plugin described by it's info file (found at
'directory/filename').
Return a dictionary with name and path of the plugin as well
as the ConfigParser instance used to collect these info.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# now we can consider the file as a serious candidate
if not isinstance(filename, str):
# filename is a file object: use it
name, moduleName, config_parser = self.getPluginNameAndModuleFromStream(filename)
else:
candidate_infofile_path = os.path.join(directory, filename)
# parse the information file to get info about the plugin
with open(candidate_infofile_path) as candidate_infofile:
name, moduleName, config_parser = self.getPluginNameAndModuleFromStream(candidate_infofile,candidate_infofile_path)
if (name, moduleName, config_parser) == (None, None, None):
return (None,None)
infos = {"name":name, "path":os.path.join(directory, moduleName)}
return infos, config_parser
def _extractBasicPluginInfo(self,directory, filename):
"""
Gather some basic documentation about the plugin described by
it's info file (found at 'directory/filename').
Return a dictionary containing the core information (name and
path) as well as as the 'documentation' info (version, author,
description etc).
See also:
``self._extractCorePluginInfo``
"""
infos, config_parser = self._extractCorePluginInfo(directory, filename)
# collect additional (but usually quite usefull) information
if infos and config_parser and config_parser.has_section("Documentation"):
if config_parser.has_option("Documentation","Author"):
infos["author"] = config_parser.get("Documentation", "Author")
if config_parser.has_option("Documentation","Version"):
infos["version"] = config_parser.get("Documentation", "Version")
if config_parser.has_option("Documentation","Website"):
infos["website"] = config_parser.get("Documentation", "Website")
if config_parser.has_option("Documentation","Copyright"):
infos["copyright"] = config_parser.get("Documentation", "Copyright")
if config_parser.has_option("Documentation","Description"):
infos["description"] = config_parser.get("Documentation", "Description")
return infos, config_parser
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
If *callback* function has not been provided for this strategy,
we use the filename alone to extract minimal informations.
"""
infos, config_parser = self._extractBasicPluginInfo(dirpath, filename)
if not infos or infos.get("name", None) is None:
raise ValueError("Missing *name* of the plugin in extracted infos.")
if not infos or infos.get("path", None) is None:
raise ValueError("Missing *path* of the plugin in extracted infos.")
return infos, config_parser
class PluginFileAnalyzerMathingRegex(IPluginFileAnalyzer):
"""
An analyzer that targets plugins decribed by files whose name match a given regex.
"""
def __init__(self, name, regexp):
IPluginFileAnalyzer.__init__(self,name)
self.regexp = regexp
def isValidPlugin(self, filename):
"""
Checks if the given filename is a valid plugin for this Strategy
"""
reg = re.compile(self.regexp)
if reg.match(filename) is not None:
return True
return False
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
"""
# use the filename alone to extract minimal informations.
infos = {}
module_name = os.path.splitext(filename)[0]
plugin_filename = os.path.join(dirpath,filename)
if module_name == "__init__":
module_name = os.path.basename(dirpath)
plugin_filename = dirpath
infos["name"] = "%s" % module_name
infos["path"] = plugin_filename
cf_parser = ConfigParser()
cf_parser.add_section("Core")
cf_parser.set("Core","Name",infos["name"])
cf_parser.set("Core","Module",infos["path"])
return infos,cf_parser
class PluginFileLocator(IPluginLocator):
"""
Locates plugins on the file system using a set of analyzers to
determine what files actually corresponds to plugins.
If more than one analyzer is being used, the first that will discover a
new plugin will avoid other strategies to find it too.
By default each directory set as a "plugin place" is scanned
recursively. You can change that by a call to
``disableRecursiveScan``.
"""
def __init__(self, analyzers=None, plugin_info_cls=PluginInfo):
"""
Defines the strategies, and the places for plugins to look into.
"""
IPluginLocator.__init__(self)
self._discovered_plugins = {}
self.setPluginPlaces(None)
self._analyzers = analyzers # analyzers used to locate plugins
if self._analyzers is None:
self._analyzers = [PluginFileAnalyzerWithInfoFile("info_ext")]
self._default_plugin_info_cls = PluginInfo
self._plugin_info_cls_map = {}
self._max_size = 1e3*1024 # in octets (by default 1 Mo)
self.recursive = True
def disableRecursiveScan(self):
"""
Disable recursive scan of the directories given as plugin places.
"""
self.recursive = False
def setAnalyzers(self, analyzers):
"""
Sets a new set of analyzers.
.. warning:: the new analyzers won't be aware of the plugin
info class that may have been set via a previous
call to ``setPluginInfoClass``.
"""
self._analyzers = analyzers
def removeAnalyzers(self, name):
"""
Removes analyzers of a given name.
"""
analyzersListCopy = self._analyzers[:]
foundAndRemoved = False
for obj in analyzersListCopy:
if obj.name == name:
self._analyzers.remove(obj)
foundAndRemoved = True
if not foundAndRemoved:
log.debug("'%s' is not a known strategy name: can't remove it." % name)
def removeAllAnalyzer(self):
"""
Remove all analyzers.
"""
self._analyzers = []
def appendAnalyzer(self, analyzer):
"""
Append an analyzer to the existing list.
"""
self._analyzers.append(analyzer)
def _getInfoForPluginFromAnalyzer(self,analyzer,dirpath, filename):
"""
Return an instance of plugin_info_cls filled with data extracted by the analyzer.
May return None if the analyzer fails to extract any info.
"""
plugin_info_dict,config_parser = analyzer.getInfosDictFromPlugin(dirpath, filename)
if plugin_info_dict is None:
return None
plugin_info_cls = self._plugin_info_cls_map.get(analyzer.name,self._default_plugin_info_cls)
plugin_info = plugin_info_cls(plugin_info_dict["name"],plugin_info_dict["path"])
plugin_info.details = config_parser
return plugin_info
def locatePlugins(self):
"""
Walk through the plugins' places and look for plugins.
Return the candidates and number of plugins found.
"""
# print "%s.locatePlugins" % self.__class__
_candidates = []
_discovered = {}
for directory in map(os.path.abspath, self.plugins_places):
# first of all, is it a directory :)
if not os.path.isdir(directory):
log.debug("%s skips %s (not a directory)" % (self.__class__.__name__, directory))
continue
if self.recursive:
debug_txt_mode = "recursively"
walk_iter = os.walk(directory, followlinks=True)
else:
debug_txt_mode = "non-recursively"
walk_iter = [(directory,[],os.listdir(directory))]
# iteratively walks through the directory
log.debug("%s walks (%s) into directory: %s" % (self.__class__.__name__, debug_txt_mode, directory))
for item in walk_iter:
dirpath = item[0]
for filename in item[2]:
# print("testing candidate file %s" % filename)
for analyzer in self._analyzers:
# print("... with analyzer %s" % analyzer.name)
# eliminate the obvious non plugin files
if not analyzer.isValidPlugin(filename):
log.debug("%s is not a valid plugin for strategy %s" % (filename, analyzer.name))
continue
candidate_infofile = os.path.join(dirpath, filename)
if candidate_infofile in _discovered:
log.debug("%s (with strategy %s) rejected because already discovered" % (candidate_infofile, analyzer.name))
continue
log.debug("%s found a candidate:\n %s" % (self.__class__.__name__, candidate_infofile))
# print candidate_infofile
plugin_info = self._getInfoForPluginFromAnalyzer(analyzer, dirpath, filename)
if plugin_info is None:
log.warning("Plugin candidate '%s' rejected by strategy '%s'" % (candidate_infofile, analyzer.name))
break # we consider this was the good strategy to use for: it failed -> not a plugin -> don't try another strategy
# now determine the path of the file to execute,
# depending on wether the path indicated is a
# directory or a file
# print plugin_info.path
# Remember all the files belonging to a discovered
# plugin, so that strategies (if several in use) won't
# collide
if os.path.isdir(plugin_info.path):
candidate_filepath = os.path.join(plugin_info.path, "__init__")
# it is a package, adds all the files concerned
for _file in os.listdir(plugin_info.path):
if _file.endswith(".py"):
self._discovered_plugins[os.path.join(plugin_info.path, _file)] = candidate_filepath
_discovered[os.path.join(plugin_info.path, _file)] = candidate_filepath
elif (plugin_info.path.endswith(".py") and os.path.isfile(plugin_info.path)) or os.path.isfile(plugin_info.path+".py"):
candidate_filepath = plugin_info.path
if candidate_filepath.endswith(".py"):
candidate_filepath = candidate_filepath[:-3]
# it is a file, adds it
self._discovered_plugins[".".join((plugin_info.path, "py"))] = candidate_filepath
_discovered[".".join((plugin_info.path, "py"))] = candidate_filepath
else:
log.error("Plugin candidate rejected: cannot find the file or directory module for '%s'" % (candidate_infofile))
break
# print candidate_filepath
_candidates.append((candidate_infofile, candidate_filepath, plugin_info))
# finally the candidate_infofile must not be discovered again
_discovered[candidate_infofile] = candidate_filepath
self._discovered_plugins[candidate_infofile] = candidate_filepath
# print "%s found by strategy %s" % (candidate_filepath, analyzer.name)
return _candidates, len(_candidates)
def gatherCorePluginInfo(self, directory, filename):
"""
Return a ``PluginInfo`` as well as the ``ConfigParser`` used to build it.
If filename is a valid plugin discovered by any of the known
strategy in use. Returns None,None otherwise.
"""
for analyzer in self._analyzers:
# eliminate the obvious non plugin files
if not analyzer.isValidPlugin(filename):
continue
plugin_info = self._getInfoForPluginFromAnalyzer(analyzer,directory, filename)
return plugin_info,plugin_info.details
return None,None
# -----------------------------------------------
# Backward compatible methods
# Note: their implementation must be conform to their
# counterpart in yapsy<1.10
# -----------------------------------------------
def getPluginNameAndModuleFromStream(self, infoFileObject, candidate_infofile=None):
for analyzer in self._analyzers:
if analyzer.name == "info_ext":
return analyzer.getPluginNameAndModuleFromStream(infoFileObject)
else:
raise RuntimeError("No current file analyzer is able to provide plugin information from stream")
def setPluginInfoClass(self, picls, name=None):
"""
Set the class that holds PluginInfo. The class should inherit
from ``PluginInfo``.
If name is given, then the class will be used only by the corresponding analyzer.
If name is None, the class will be set for all analyzers.
"""
if name is None:
self._default_plugin_info_cls = picls
self._plugin_info_cls_map = {}
else:
self._plugin_info_cls_map[name] = picls
def setPluginPlaces(self, directories_list):
"""
Set the list of directories where to look for plugin places.
"""
if directories_list is None:
directories_list = [os.path.dirname(__file__)]
self.plugins_places = directories_list
def updatePluginPlaces(self, directories_list):
"""
Updates the list of directories where to look for plugin places.
"""
self.plugins_places = list(set.union(set(directories_list), set(self.plugins_places)))
def setPluginInfoExtension(self, ext):
"""
DEPRECATED(>1.9): for backward compatibility. Directly configure the
IPluginLocator instance instead !
This will only work if the strategy "info_ext" is active
for locating plugins.
"""
for analyzer in self._analyzers:
if analyzer.name == "info_ext":
analyzer.setPluginInfoExtension(ext)
|
meyt/mehrcal
|
mehrcal/yapsy/PluginFileLocator.py
|
Python
|
gpl-3.0
| 19,587
|
# Copyright Daniel Dunn 2013-2015
# This file is part of Kaithem Automation.
# Kaithem Automation is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
# Kaithem Automation is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Kaithem Automation. If not, see <http://www.gnu.org/licenses/>.
"""This is the global general purpose utility thing that is accesable from almost anywhere in user code."""
import traceback
from . import tagpoints, geolocation
import time
import random
import subprocess
import threading
import json
import yaml
import os
import weakref
import scullery.persist
import cherrypy
from . import unitsofmeasure, workers, sound, messagebus, util, widgets, registry, directories, pages, config, persist, breakpoint,statemachines
from . import devices, alerts, midi, gpio, theming
from . import version_info
from . import astrallibwrapper as sky
bootTime = time.time()
# Persist is one of the ones that we want to be usable outside of kaithem, so we add our path resolution stuff here.
def resolvePath(fn, expand=False):
if not fn.startswith(os.pathsep) or fn.startswith("~") or fn.startswith("$"):
fn = os.path.join(directories.moduledatadir, fn)
return (os.path.expandvars(os.path.expanduser(fn))) if expand else fn
persist.resolvePath = resolvePath
# This exception is what we raise from within the page handler to serve a static file
ServeFileInsteadOfRenderingPageException=pages.ServeFileInsteadOfRenderingPageException
plugins = weakref.WeakValueDictionary()
class TagInterface():
def __getitem__(self, k):
return tagpoints.Tag(k)
def StringTag(self, k):
return tagpoints.StringTag(k)
def ObjectTag(self, k):
return tagpoints.ObjectTag(k)
def BinaryTag(self, k):
return tagpoints.BinaryTag(k)
def __iter__(self):
return tagpoints.allTagsAtomic
TagClass = tagpoints._TagPoint
#HysteresisFilter = tagpoints.HysteresisFilter
LowpassFilter = tagpoints.LowpassFilter
HighpassFilter = tagpoints.HighpassFilter
class SoundOutput():
pass
class Kaithem():
devices = devices.DeviceNamespace()
tags = TagInterface()
context = threading.local()
def __getattr__(self, name):
if name in plugins:
return pluginInterface(plugins[name])
else:
raise AttributeError(name)
class units():
convert = unitsofmeasure.convert
units = unitsofmeasure.units
getType = unitsofmeasure.getUnitType
define = unitsofmeasure.defineUnit
class users(object):
@staticmethod
def checkPermission(user, permission):
try:
if pages.canUserDoThis(permission, user):
return True
else:
return False
except KeyError:
return False
class alerts(object):
Alert = alerts.Alert
class gpio():
DigitalInput = gpio.DigitalInput
DigitalOutput = gpio.DigitalOutput
PWMOutput = gpio.PWMOutput
class logging(object):
@staticmethod
def flushsyslog():
import pylogginghandler
pylogginghandler.syslogger.flush()
class mqtt(object):
@staticmethod
def Connection(server, port=1883, password=None, alertPriority="info", alertAck=True, messageBusName=None,connectionID=None):
from src import mqtt as mqttPatch
from scullery import mqtt
return mqtt.getConnection(server=server, port=port, password=password, alertPriority=alertPriority, alertAck=alertAck, messageBusName=messageBusName,connectionID=connectionID)
@staticmethod
def listConnections():
from src import mqtt as mqttPatch
from scullery import mqtt
return mqttPatch.listConnections()
class misc(object):
version = version_info.__version__
version_info = version_info.__version_info__
@staticmethod
def lorem():
return(random.choice(sentences))
# return ("""lorem ipsum dolor sit amet, consectetur adipiscing elit. Proin vitae laoreet eros. Integer nunc nisl, ultrices et commodo sit amet, dapibus vitae sem. Nam vel odio metus, ac cursus nulla. Pellentesque scelerisque consequat massa, non mollis dolor commodo ultrices. Vivamus sit amet sapien non metus fringilla pretium ut vitae lorem. Donec eu purus nulla, quis venenatis ipsum. Proin rhoncus laoreet ullamcorper. Etiam fringilla ligula ut erat feugiat et pulvinar velit fringilla.""")
@staticmethod
def do(f):
workers.do(f)
@staticmethod
def location():
lat,lon = geolocation.getCoords()
if not lon or not lat:
raise RuntimeError("No location set")
return((lat, lon))
@staticmethod
def uptime():
return time.time()-bootTime
@staticmethod
def errors(f):
try:
f()
except Exception as e:
return e
return None
@staticmethod
def breakpoint():
breakpoint.breakpoint()
@staticmethod
def mkdir(d):
util.ensure_dir2(d)
effwords = util.eff_wordlist
vardir = directories.vardir
# In modules.py, we insert a resource API object.
#kaithemobj.kaithem.resource = ResourceAPI()
class time(object):
@staticmethod
def lantime():
return time.time()
@staticmethod
def uptime():
return time.time()-bootTime
@staticmethod
def strftime(*args):
return unitsofmeasure.strftime(*args)
@staticmethod
def time():
return time.time()
@staticmethod
def month():
return(unitsofmeasure.Month())
@staticmethod
def day():
return(time.localtime().tm_mday)
@staticmethod
def year():
return(time.localtime().tm_year)
@staticmethod
def hour():
return(time.localtime().tm_hour)
@staticmethod
def minute():
return(time.localtime().tm_min)
@staticmethod
def second():
return(time.localtime().tm_sec)
@staticmethod
def isdst(self):
# It returns 1 or 0, cast to bool because that's just weird.
return(bool(time.localtime().tm_isdst))
@staticmethod
def dayofweek():
return (unitsofmeasure.DayOfWeek())
@staticmethod
def sunsetTime(lat=None, lon=None, date=None):
if lon is None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat is None or lon is None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.sunset(lat, lon, date))
@staticmethod
def sunriseTime(lat=None, lon=None, date=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.sunrise(lat, lon, date))
@staticmethod
def civilDuskTime(lat=None, lon=None, date=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.dusk(lat, lon, date))
@staticmethod
def civilDawnTime(lat=None, lon=None, date=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.dawn(lat, lon, date))
@staticmethod
def rahuStart(lat=None, lon=None, date=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.rahu(lat, lon, date)[0])
@staticmethod
def rahuEnd(lat=None, lon=None, date=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.rahu(lat, lon, date)[1])
@staticmethod
def isDark(lat=None, lon=None):
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.isDark(lat, lon))
@staticmethod
def isRahu(lat=None, lon=None):
if lat == None:
if lon == None:
lat,lon = geolocation.getCoords()
else:
raise ValueError("You set lon, but not lst?")
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.isRahu(lat, lon))
@staticmethod
def isDay(lat=None, lon=None):
if lat == None:
if lon == None:
lat,lon = geolocation.getCoords()
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.isDay(lat, lon))
@staticmethod
def isNight(lat=None, lon=None):
if lat == None:
if lon == None:
lat,lon = geolocation.getCoords()
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.isNight(lat, lon))
@staticmethod
def isLight(lat=None, lon=None):
if lat == None:
if lon == None:
lat,lon = geolocation.getCoords()
if lat == None or lon == None:
raise RuntimeError(
"No server location set, fix this in system settings")
return (sky.isLight(lat, lon))
@staticmethod
def moonPhase():
return sky.moon()
@staticmethod
def moonPercent():
x = sky.moon()
if x > 14:
x -= 14
x = 14-x
return 100*(x/14.0)
@staticmethod
def accuracy():
return util.timeaccuracy()
class sys(object):
@staticmethod
def shellex(cmd):
return (subprocess.check_output(cmd, shell=True))
@staticmethod
def shellexbg(cmd):
subprocess.Popen(cmd, shell=True)
@staticmethod
def lsdirs(path):
return util.get_immediate_subdirectories(path)
@staticmethod
def lsfiles(path):
return util.get_files(path)
@staticmethod
def which(exe):
return util.which(exe)
@staticmethod
def sensors():
try:
if util.which('sensors'):
return (subprocess.check_output('sensors').decode('utf8'))
else:
return('"sensors" command failed(lm_sensors not available)')
except:
return('sensors call failed')
class registry(object):
@staticmethod
def set(key, value):
registry.set(key, value)
@staticmethod
def setschema(key, schema):
registry.setschema(key, schema)
@staticmethod
def delete(key):
registry.delete(key)
@staticmethod
def get(*args, **kwargs):
return registry.get(*args, **kwargs)
class states(object):
StateMachine = statemachines.StateMachine
class web(object):
# TODO: Deprecate webresource stuff
@staticmethod
def resource(name):
return pages.webResources[name].url
controllers = pages.nativeHandlers
navBarPlugins = pages.navBarPlugins
theming = theming
@staticmethod
def freeboard(page, kwargs, plugins=[]):
"Returns the ready-to-embed code for freeboard. Used to unclutter user created pages that use it."
if cherrypy.request.method == "POST":
import re
import html
pages.require("/admin/modules.edit")
c = re.sub(r"<\s*freeboard-data\s*>[\s\S]*<\s*\/freeboard-data\s*>", "<freeboard-data>\n" + html.escape(
yaml.dump(json.loads(kwargs['bd'])))+"\n</freeboard-data>", page.getContent())
page.setContent(c)
else:
return pages.get_template("freeboard/app.html").render(plugins=plugins)
@staticmethod
def unurl(s):
return util.unurl(s)
@staticmethod
def url(s):
return util.url(s)
@staticmethod
def goBack():
raise cherrypy.HTTPRedirect(cherrypy.request.headers['Referer'])
@staticmethod
def goto(url):
raise cherrypy.HTTPRedirect(url)
@staticmethod
def serveFile(*a,**k):
pages.serveFile(*a,**k)
@staticmethod
def user():
x = pages.getAcessingUser()
if x:
return x
else:
return ''
@staticmethod
def hasPermission(permission):
return pages.canUserDoThis(permission)
midi = midi.MidiAPI()
class sound(object):
builtinSounds = sound.builtinSounds
resolveSound = sound.resolveSound
oggTest = sound.oggSoundTest
directories = config.config['audio-paths']
@staticmethod
def outputs():
try:
from src import jackmanager
#Always
try:
x= [i.name for i in jackmanager.getPorts(is_audio=True, is_input=True)]
except:
print(traceback.format_exc())
x=[]
prefixes={}
op=[]
for i in x:
if not i.split(":")[0] in prefixes:
prefixes[i.split(":")[0]]=i
op.append(i.split(":")[0])
op.append(i)
return ['']+op
except:
print(traceback.format_exc())
return []
@staticmethod
def play(*args, **kwargs):
sound.playSound(*args, **kwargs)
@staticmethod
def stop(*args, **kwargs):
sound.stopSound(*args, **kwargs)
@staticmethod
def pause(*args, **kwargs):
sound.pause(*args, **kwargs)
@staticmethod
def resume(*args, **kwargs):
sound.resume(*args, **kwargs)
@staticmethod
def stopAll():
sound.stopAllSounds()
@staticmethod
def isPlaying(*args, **kwargs):
return sound.isPlaying(*args, **kwargs)
@staticmethod
def position(*args, **kwargs):
return sound.position(*args, **kwargs)
@staticmethod
def setvol(*args, **kwargs):
return sound.setvol(*args, **kwargs)
@staticmethod
def setEQ(*args, **kwargs):
return sound.setEQ(*args, **kwargs)
@staticmethod
def fadeTo(*args, **kwargs):
return sound.fadeTo(*args, **kwargs)
@staticmethod
def preload(*args, **kwargs):
return sound.preload(*args, **kwargs)
class message():
@staticmethod
def post(topic, message):
messagebus.postMessage(topic, message)
@staticmethod
def subscribe(topic, callback):
messagebus.subscribe(topic, callback)
@staticmethod
def unsubscribe(topic, callback):
messagebus.unsubscribe(topic, callback)
class pymessage():
@staticmethod
def post(topic, message):
messagebus.pyPostMessage(topic, message)
@staticmethod
def subscribe(topic, callback):
messagebus.pySubscribe(topic, callback)
class persist():
unsaved = scullery.persist.unsavedFiles
@staticmethod
def load(*args, **kwargs):
return persist.load(*args, **kwargs)
@staticmethod
def save(*args, **kwargs):
return persist.save(*args, **kwargs)
class string():
@staticmethod
def usrstrftime(*a):
return unitsofmeasure.strftime(*a)
@staticmethod
def SIFormat(number, d=2):
return unitsofmeasure.siFormatNumber(number, d)
@staticmethod
def formatTimeInterval(s, places=2, clock=False):
return unitsofmeasure.formatTimeInterval(s, places, clock)
class events():
pass
# Stuff gets inserted here externally
class obj():
pass
kaithem = Kaithem()
kaithem.widget = widgets
kaithem.globals = obj() # this is just a place to stash stuff.
if config.config['quotes-file'] == 'default':
sentences = kaithem.persist.load(
os.path.join(directories.datadir, "quotes.yaml"))
else:
sentences = kaithem.persist.load(config.config['quotes-file'])
|
EternityForest/KaithemAutomation
|
kaithem/src/kaithemobj.py
|
Python
|
gpl-3.0
| 19,149
|
import unittest
import os.path
import sys
import numpy as np
from opm.io.parser import Parser
from opm.io.parser import ParseContext
from opm.io.deck import DeckKeyword
try:
from tests.utils import test_path
except ImportError:
from utils import test_path
unit_foot = 0.3048 #meters
class TestParser(unittest.TestCase):
REGIONDATA = """
START -- 0
10 MAI 2007 /
RUNSPEC
FIELD
DIMENS
2 2 1 /
GRID
DX
4*0.25 /
DY
4*0.25 /
DZ
4*0.25 /
TOPS
4*0.25 /
REGIONS
OPERNUM
3 3 1 2 /
FIPNUM
1 1 2 3 /
"""
def setUp(self):
self.spe3fn = test_path('spe3/SPE3CASE1.DATA')
self.norne_fname = test_path('../examples/data/norne/NORNE_ATW2013.DATA')
def test_create(self):
parser = Parser()
deck = parser.parse(self.spe3fn)
active_unit_system = deck.active_unit_system()
default_unit_system = deck.default_unit_system()
self.assertEqual(active_unit_system.name, "Field")
context = ParseContext()
deck = parser.parse(self.spe3fn, context)
with open(self.spe3fn) as f:
string = f.read()
deck = parser.parse_string(string)
deck = parser.parse_string(string, context)
def test_deck_kw_records(self):
parser = Parser()
deck = parser.parse_string(self.REGIONDATA)
active_unit_system = deck.active_unit_system()
default_unit_system = deck.default_unit_system()
self.assertEqual(active_unit_system.name, "Field")
with self.assertRaises(ValueError):
kw = parser["NOT_A_VALID_KEYWORD"]
field = parser["FIELD"]
assert(field.name == "FIELD")
dkw_field = DeckKeyword(field)
assert(dkw_field.name == "FIELD")
DeckKeyword(parser["AQUCWFAC"], [[]], active_unit_system, default_unit_system)
with self.assertRaises(TypeError):
dkw_wrong = DeckKeyword(parser["AQUCWFAC"], [22.2, 0.25], active_unit_system, default_unit_system)
dkw_aqannc = DeckKeyword(parser["AQANNC"], [[12, 1, 2, 3, 0.89], [13, 4, 5, 6, 0.625]], active_unit_system, default_unit_system)
assert( len(dkw_aqannc[0]) == 5 )
assert( dkw_aqannc[0][2].get_int(0) == 2 )
assert( dkw_aqannc[1][1].get_int(0) == 4 )
with self.assertRaises(ValueError):
value = dkw_aqannc[1][1].get_raw(0)
with self.assertRaises(ValueError):
value = dkw_aqannc[1][1].get_SI(0)
assert( dkw_aqannc[1][4].get_raw(0) == 0.625 )
self.assertAlmostEqual( dkw_aqannc[1][4].get_SI(0), 0.625 * unit_foot**2 )
assert( dkw_aqannc[1][4].get_raw_data_list() == [0.625] )
self.assertAlmostEqual( dkw_aqannc[1][4].get_SI_data_list()[0], 0.625 * unit_foot**2 )
with self.assertRaises(ValueError):
value = dkw_aqannc[1][4].get_int(0)
dkw_aqantrc = DeckKeyword(parser["AQANTRC"], [[12, "ABC", 8]], active_unit_system, default_unit_system)
assert( dkw_aqantrc[0][1].get_str(0) == "ABC" )
assert( dkw_aqantrc[0][2].get_raw(0) == 8.0 )
dkw1 = DeckKeyword(parser["AQUCWFAC"], [["*", 0.25]], active_unit_system, default_unit_system)
assert( dkw1[0][0].get_raw(0) == 0.0 )
assert( dkw1[0][1].get_raw(0) == 0.25 )
dkw2 = DeckKeyword(parser["AQUCWFAC"], [[0.25, "*"]], active_unit_system, default_unit_system)
assert( dkw2[0][0].get_raw(0) == 0.25 )
assert( dkw2[0][1].get_raw(0) == 1.0 )
dkw3 = DeckKeyword(parser["AQUCWFAC"], [[0.50]], active_unit_system, default_unit_system)
assert( dkw3[0][0].get_raw(0) == 0.50 )
assert( dkw3[0][1].get_raw(0) == 1.0 )
dkw4 = DeckKeyword(parser["CBMOPTS"], [["3*", "A", "B", "C", "2*", 0.375]], active_unit_system, default_unit_system)
assert( dkw4[0][0].get_str(0) == "TIMEDEP" )
assert( dkw4[0][2].get_str(0) == "NOKRMIX" )
assert( dkw4[0][3].get_str(0) == "A" )
assert( dkw4[0][6].get_str(0) == "PMPVK" )
assert( dkw4[0][8].get_raw(0) == 0.375 )
with self.assertRaises(TypeError):
dkw4[0][8].get_data_list()
with self.assertRaises(TypeError):
DeckKeyword(parser["CBMOPTS"], [["3*", "A", "B", "C", "R2*", 0.77]], active_unit_system, default_unit_system)
with self.assertRaises(TypeError):
DeckKeyword(parser["CBMOPTS"], [["3*", "A", "B", "C", "2.2*", 0.77]], active_unit_system, default_unit_system)
dkw5 = DeckKeyword(parser["AQUCWFAC"], [["2*5.5"]], active_unit_system, default_unit_system)
assert( dkw5[0][0].get_raw(0) == 5.5 )
assert( dkw5[0][1].get_raw(0) == 5.5 )
with self.assertRaises(ValueError):
raise DeckKeyword(parser["AQANTRC"], [["1*2.2", "ABC", 8]], active_unit_system, default_unit_system)
def test_deck_kw_vector(self):
parser = Parser()
deck = parser.parse_string(self.REGIONDATA)
active_unit_system = deck.active_unit_system()
default_unit_system = deck.default_unit_system()
self.assertEqual(active_unit_system.name, "Field")
int_array = np.array([0, 1, 2, 3])
hbnum_kw = DeckKeyword( parser["HBNUM"], int_array)
assert( np.array_equal(hbnum_kw.get_int_array(), int_array) )
raw_array = np.array([1.1, 2.2, 3.3])
zcorn_kw = DeckKeyword( parser["ZCORN"], raw_array, active_unit_system, default_unit_system)
assert( np.array_equal(zcorn_kw.get_raw_array(), raw_array) )
si_array = zcorn_kw.get_SI_array()
self.assertAlmostEqual( si_array[0], 1.1 * unit_foot )
self.assertAlmostEqual( si_array[2], 3.3 * unit_foot )
assert( not( "ZCORN" in deck ) )
deck.add( zcorn_kw )
assert( "ZCORN" in deck )
if __name__ == "__main__":
unittest.main()
|
OPM/opm-cmake
|
python/tests/test_parser.py
|
Python
|
gpl-3.0
| 5,804
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
# generic test setup
from weakref import ref, WeakValueDictionary
from dabroker.base.transport import BaseTransport
from dabroker.base.transport.local import RPCmessage,LocalQueue
from dabroker.util import format_msg
from dabroker.util.thread import spawned
import logging,sys,os
logger = logging.getLogger("dabroker.server.transport.local")
class Transport(BaseTransport):
"""Server side of the LocalQueue transport"""
def __init__(self,callbacks,cfg):
#logger.debug("Server: setting up")
self.trace = cfg.get('trace,',0)
self.callbacks = callbacks
self.p = LocalQueue(cfg)
self.p.server = ref(self) # for clients to find me
self.clients = WeakValueDictionary() # clients add themselves here
@spawned
def _process(self,msg):
m = self.callbacks.recv(msg.msg)
msg.reply(m)
def run(self):
logger.debug("Server: wait for messages")
while self.p.request_q is not None:
msg = self.p.request_q.get()
#logger.debug("Server: received %r",msg)
self._process(msg)
def send(self,msg):
m = msg
msg = RPCmessage(msg, _trace=self.trace)
self.last_msgid -= 1
msg.msgid = self.last_msgid
if self.trace:
logger.debug("Server: send msg %s:\n%s",msg.msgid,format_msg(m))
for c in self.clients.values():
c.reply_q.put(msg)
|
smurfix/DaBroker
|
dabroker/server/transport/local.py
|
Python
|
gpl-3.0
| 1,877
|
def horner(a, x):
y = 0.0
i = a.length - 1
while i >= 0:
y = a[i] + x * y
i = i - 1
return y
|
wojtask/CormenPy
|
src/chapter02/textbook_problem2_3.py
|
Python
|
gpl-3.0
| 125
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
| Plural subsystem is created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop@gmail.com>
Translation system
--------------------------------------------
"""
import os
import re
import sys
import pkgutil
import logging
from cgi import escape
from threading import RLock
try:
import copyreg as copy_reg # python 3
except ImportError:
import copy_reg # python 2
from gluon.portalocker import read_locked, LockedFile
from utf8 import Utf8
from gluon.fileutils import listdir
from gluon.cfs import getcfs
from gluon.html import XML, xmlescape
from gluon.contrib.markmin.markmin2html import render, markmin_escape
from string import maketrans
__all__ = ['translator', 'findT', 'update_all_languages']
ostat = os.stat
oslistdir = os.listdir
pjoin = os.path.join
pexists = os.path.exists
pdirname = os.path.dirname
isdir = os.path.isdir
DEFAULT_LANGUAGE = 'en'
DEFAULT_LANGUAGE_NAME = 'English'
# DEFAULT PLURAL-FORMS RULES:
# language doesn't use plural forms
DEFAULT_NPLURALS = 1
# only one singular/plural form is used
DEFAULT_GET_PLURAL_ID = lambda n: 0
# word is unchangeable
DEFAULT_CONSTRUCT_PLURAL_FORM = lambda word, plural_id: word
NUMBERS = (int, long, float)
# pattern to find T(blah blah blah) expressions
PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\
+ r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\
+ r"(?:'(?:[^'\\]|\\.)*')|" + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\
+ r'(?:"(?:[^"\\]|\\.)*"))'
regex_translate = re.compile(PY_STRING_LITERAL_RE, re.DOTALL)
regex_param = re.compile(r'{(?P<s>.+?)}')
# pattern for a valid accept_language
regex_language = \
re.compile('([a-z]{2,3}(?:\-[a-z]{2})?(?:\-[a-z]{2})?)(?:[,;]|$)')
regex_langfile = re.compile('^[a-z]{2,3}(-[a-z]{2})?\.py$')
regex_backslash = re.compile(r"\\([\\{}%])")
regex_plural = re.compile('%({.+?})')
regex_plural_dict = re.compile('^{(?P<w>[^()[\]][^()[\]]*?)\((?P<n>[^()\[\]]+)\)}$') # %%{word(varname or number)}
regex_plural_tuple = re.compile(
'^{(?P<w>[^[\]()]+)(?:\[(?P<i>\d+)\])?}$') # %%{word[index]} or %%{word}
regex_plural_file = re.compile('^plural-[a-zA-Z]{2}(-[a-zA-Z]{2})?\.py$')
def is_writable():
""" returns True if and only if the filesystem is writable """
from gluon.settings import global_settings
return not global_settings.web2py_runtime_gae
def safe_eval(text):
if text.strip():
try:
import ast
return ast.literal_eval(text)
except ImportError:
return eval(text, {}, {})
return None
# used as default filter in translator.M()
def markmin(s):
def markmin_aux(m):
return '{%s}' % markmin_escape(m.group('s'))
return render(regex_param.sub(markmin_aux, s),
sep='br', autolinks=None, id_prefix='')
# UTF8 helper functions
def upper_fun(s):
return unicode(s, 'utf-8').upper().encode('utf-8')
def title_fun(s):
return unicode(s, 'utf-8').title().encode('utf-8')
def cap_fun(s):
return unicode(s, 'utf-8').capitalize().encode('utf-8')
ttab_in = maketrans("\\%{}", '\x1c\x1d\x1e\x1f')
ttab_out = maketrans('\x1c\x1d\x1e\x1f', "\\%{}")
# cache of translated messages:
# global_language_cache:
# { 'languages/xx.py':
# ( {"def-message": "xx-message",
# ...
# "def-message": "xx-message"}, lock_object )
# 'languages/yy.py': ( {dict}, lock_object )
# ...
# }
global_language_cache = {}
def get_from_cache(cache, val, fun):
lang_dict, lock = cache
lock.acquire()
try:
result = lang_dict.get(val)
finally:
lock.release()
if result:
return result
lock.acquire()
try:
result = lang_dict.setdefault(val, fun())
finally:
lock.release()
return result
def clear_cache(filename):
cache = global_language_cache.setdefault(
filename, ({}, RLock()))
lang_dict, lock = cache
lock.acquire()
try:
lang_dict.clear()
finally:
lock.release()
def read_dict_aux(filename):
lang_text = read_locked(filename).replace('\r\n', '\n')
clear_cache(filename)
try:
return safe_eval(lang_text) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_dict(filename):
""" Returns dictionary with translation messages
"""
return getcfs('lang:' + filename, filename,
lambda: read_dict_aux(filename))
def read_possible_plural_rules():
"""
Creates list of all possible plural rules files
The result is cached in PLURAL_RULES dictionary to increase speed
"""
plurals = {}
try:
import gluon.contrib.plural_rules as package
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
if len(modname) == 2:
module = __import__(package.__name__ + '.' + modname,
fromlist=[modname])
lang = modname
pname = modname + '.py'
nplurals = getattr(module, 'nplurals', DEFAULT_NPLURALS)
get_plural_id = getattr(
module, 'get_plural_id',
DEFAULT_GET_PLURAL_ID)
construct_plural_form = getattr(
module, 'construct_plural_form',
DEFAULT_CONSTRUCT_PLURAL_FORM)
plurals[lang] = (lang, nplurals, get_plural_id,
construct_plural_form)
except ImportError:
e = sys.exc_info()[1]
logging.warn('Unable to import plural rules: %s' % e)
return plurals
PLURAL_RULES = read_possible_plural_rules()
def read_possible_languages_aux(langdir):
def get_lang_struct(lang, langcode, langname, langfile_mtime):
if lang == 'default':
real_lang = langcode.lower()
else:
real_lang = lang
(prules_langcode,
nplurals,
get_plural_id,
construct_plural_form
) = PLURAL_RULES.get(real_lang[:2], ('default',
DEFAULT_NPLURALS,
DEFAULT_GET_PLURAL_ID,
DEFAULT_CONSTRUCT_PLURAL_FORM))
if prules_langcode != 'default':
(pluraldict_fname,
pluraldict_mtime) = plurals.get(real_lang,
plurals.get(real_lang[:2],
('plural-%s.py' % real_lang, 0)))
else:
pluraldict_fname = None
pluraldict_mtime = 0
return (langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname, # name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime, # m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
plurals = {}
flist = oslistdir(langdir) if isdir(langdir) else []
# scan languages directory for plural dict files:
for pname in flist:
if regex_plural_file.match(pname):
plurals[pname[7:-3]] = (pname,
ostat(pjoin(langdir, pname)).st_mtime)
langs = {}
# scan languages directory for langfiles:
for fname in flist:
if regex_langfile.match(fname) or fname == 'default.py':
fname_with_path = pjoin(langdir, fname)
d = read_dict(fname_with_path)
lang = fname[:-3]
langcode = d.get('!langcode!', lang if lang != 'default'
else DEFAULT_LANGUAGE)
langname = d.get('!langname!', langcode)
langfile_mtime = ostat(fname_with_path).st_mtime
langs[lang] = get_lang_struct(lang, langcode,
langname, langfile_mtime)
if 'default' not in langs:
# if default.py is not found,
# add DEFAULT_LANGUAGE as default language:
langs['default'] = get_lang_struct('default', DEFAULT_LANGUAGE,
DEFAULT_LANGUAGE_NAME, 0)
deflang = langs['default']
deflangcode = deflang[0]
if deflangcode not in langs:
# create language from default.py:
langs[deflangcode] = deflang[:2] + (0,) + deflang[3:]
return langs
def read_possible_languages(langpath):
return getcfs('langs:' + langpath, langpath,
lambda: read_possible_languages_aux(langpath))
def read_plural_dict_aux(filename):
lang_text = read_locked(filename).replace('\r\n', '\n')
try:
return eval(lang_text) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_plural_dict(filename):
return getcfs('plurals:' + filename, filename,
lambda: read_plural_dict_aux(filename))
def write_plural_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n{\n# "singular form (0)": ["first plural form (1)", "second plural form (2)", ...],\n')
for key in sorted(contents, sort_function):
forms = '[' + ','.join([repr(Utf8(form))
for form in contents[key]]) + ']'
fp.write('%s: %s,\n' % (repr(Utf8(key)), forms))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
def sort_function(x, y):
return cmp(unicode(x, 'utf-8').lower(), unicode(y, 'utf-8').lower())
def write_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('# -*- coding: utf-8 -*-\n{\n')
for key in sorted(contents, sort_function):
fp.write('%s: %s,\n' % (repr(Utf8(key)),
repr(Utf8(contents[key]))))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
class lazyT(object):
"""
Never to be called explicitly, returned by
translator.__call__() or translator.M()
"""
m = s = T = f = t = None
M = is_copy = False
def __init__(
self,
message,
symbols={},
T=None,
filter=None,
ftag=None,
M=False
):
if isinstance(message, lazyT):
self.m = message.m
self.s = message.s
self.T = message.T
self.f = message.f
self.t = message.t
self.M = message.M
self.is_copy = True
else:
self.m = message
self.s = symbols
self.T = T
self.f = filter
self.t = ftag
self.M = M
self.is_copy = False
def __repr__(self):
return "<lazyT %s>" % (repr(Utf8(self.m)), )
def __str__(self):
return str(self.T.apply_filter(self.m, self.s, self.f, self.t) if self.M else
self.T.translate(self.m, self.s))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __add__(self, other):
return '%s%s' % (self, other)
def __radd__(self, other):
return '%s%s' % (other, self)
def __mul__(self, other):
return str(self) * other
def __cmp__(self, other):
return cmp(str(self), str(other))
def __hash__(self):
return hash(str(self))
def __getattr__(self, name):
return getattr(str(self), name)
def __getitem__(self, i):
return str(self)[i]
def __getslice__(self, i, j):
return str(self)[i:j]
def __iter__(self):
for c in str(self):
yield c
def __len__(self):
return len(str(self))
def xml(self):
return str(self) if self.M else escape(str(self))
def encode(self, *a, **b):
return str(self).encode(*a, **b)
def decode(self, *a, **b):
return str(self).decode(*a, **b)
def read(self):
return str(self)
def __mod__(self, symbols):
if self.is_copy:
return lazyT(self)
return lazyT(self.m, symbols, self.T, self.f, self.t, self.M)
def pickle_lazyT(c):
return str, (c.xml(),)
copy_reg.pickle(lazyT, pickle_lazyT)
class translator(object):
"""
This class is instantiated by gluon.compileapp.build_environment
as the T object
Example:
T.force(None) # turns off translation
T.force('fr, it') # forces web2py to translate using fr.py or it.py
T("Hello World") # translates "Hello World" using the selected file
Note:
- there is no need to force since, by default, T uses
http_accept_language to determine a translation file.
- en and en-en are considered different languages!
- if language xx-yy is not found force() probes other similar languages
using such algorithm: `xx-yy.py -> xx.py -> xx-yy*.py -> xx*.py`
"""
def __init__(self, langpath, http_accept_language):
self.langpath = langpath
self.http_accept_language = http_accept_language
# filled in self.force():
# ------------------------
# self.cache
# self.accepted_language
# self.language_file
# self.plural_language
# self.nplurals
# self.get_plural_id
# self.construct_plural_form
# self.plural_file
# self.plural_dict
# self.requested_languages
# ----------------------------------------
# filled in self.set_current_languages():
# ----------------------------------------
# self.default_language_file
# self.default_t
# self.current_languages
self.set_current_languages()
self.lazy = True
self.otherTs = {}
self.filter = markmin
self.ftag = 'markmin'
self.ns = None
self.is_writable = True
def get_possible_languages_info(self, lang=None):
"""
Returns info for selected language or dictionary with all
possible languages info from `APP/languages/*.py`
It Returns:
- a tuple containing::
langcode, langname, langfile_mtime,
pluraldict_fname, pluraldict_mtime,
prules_langcode, nplurals,
get_plural_id, construct_plural_form
or None
- if *lang* is NOT defined a dictionary with all possible
languages::
{ langcode(from filename):
( langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname,# name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime,# m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
}
Args:
lang (str): language
"""
info = read_possible_languages(self.langpath)
if lang:
info = info.get(lang)
return info
def get_possible_languages(self):
""" Gets list of all possible languages for current application """
return list(set(self.current_languages +
[lang for lang in read_possible_languages(self.langpath).iterkeys()
if lang != 'default']))
def set_current_languages(self, *languages):
"""
Sets current AKA "default" languages
Setting one of this languages makes the force() function to turn
translation off
"""
if len(languages) == 1 and isinstance(languages[0], (tuple, list)):
languages = languages[0]
if not languages or languages[0] is None:
# set default language from default.py/DEFAULT_LANGUAGE
pl_info = self.get_possible_languages_info('default')
if pl_info[2] == 0: # langfile_mtime
# if languages/default.py is not found
self.default_language_file = self.langpath
self.default_t = {}
self.current_languages = [DEFAULT_LANGUAGE]
else:
self.default_language_file = pjoin(self.langpath,
'default.py')
self.default_t = read_dict(self.default_language_file)
self.current_languages = [pl_info[0]] # !langcode!
else:
self.current_languages = list(languages)
self.force(self.http_accept_language)
def plural(self, word, n):
"""
Gets plural form of word for number *n*
invoked from T()/T.M() in `%%{}` tag
Note:
"word" MUST be defined in current language (T.accepted_language)
Args:
word (str): word in singular
n (numeric): number plural form created for
Returns:
word (str): word in appropriate singular/plural form
"""
if int(n) == 1:
return word
elif word:
id = self.get_plural_id(abs(int(n)))
# id = 0 singular form
# id = 1 first plural form
# id = 2 second plural form
# etc.
if id != 0:
forms = self.plural_dict.get(word, [])
if len(forms) >= id:
# have this plural form:
return forms[id - 1]
else:
# guessing this plural form
forms += [''] * (self.nplurals - len(forms) - 1)
form = self.construct_plural_form(word, id)
forms[id - 1] = form
self.plural_dict[word] = forms
if self.is_writable and is_writable() and self.plural_file:
write_plural_dict(self.plural_file,
self.plural_dict)
return form
return word
def force(self, *languages):
"""
Selects language(s) for translation
if a list of languages is passed as a parameter,
the first language from this list that matches the ones
from the possible_languages dictionary will be
selected
default language will be selected if none
of them matches possible_languages.
"""
pl_info = read_possible_languages(self.langpath)
def set_plural(language):
"""
initialize plural forms subsystem
"""
lang_info = pl_info.get(language)
if lang_info:
(pname,
pmtime,
self.plural_language,
self.nplurals,
self.get_plural_id,
self.construct_plural_form
) = lang_info[3:]
pdict = {}
if pname:
pname = pjoin(self.langpath, pname)
if pmtime != 0:
pdict = read_plural_dict(pname)
self.plural_file = pname
self.plural_dict = pdict
else:
self.plural_language = 'default'
self.nplurals = DEFAULT_NPLURALS
self.get_plural_id = DEFAULT_GET_PLURAL_ID
self.construct_plural_form = DEFAULT_CONSTRUCT_PLURAL_FORM
self.plural_file = None
self.plural_dict = {}
language = ''
if len(languages) == 1 and isinstance(languages[0], str):
languages = regex_language.findall(languages[0].lower())
elif not languages or languages[0] is None:
languages = []
self.requested_languages = languages = tuple(languages)
if languages:
all_languages = set(lang for lang in pl_info.iterkeys()
if lang != 'default') \
| set(self.current_languages)
for lang in languages:
# compare "aa-bb" | "aa" from *language* parameter
# with strings from langlist using such alghorythm:
# xx-yy.py -> xx.py -> xx*.py
lang5 = lang[:5]
if lang5 in all_languages:
language = lang5
else:
lang2 = lang[:2]
if len(lang5) > 2 and lang2 in all_languages:
language = lang2
else:
for l in all_languages:
if l[:2] == lang2:
language = l
if language:
if language in self.current_languages:
break
self.language_file = pjoin(self.langpath, language + '.py')
self.t = read_dict(self.language_file)
self.cache = global_language_cache.setdefault(
self.language_file,
({}, RLock()))
set_plural(language)
self.accepted_language = language
return languages
self.accepted_language = language
if not language:
if self.current_languages:
self.accepted_language = self.current_languages[0]
else:
self.accepted_language = DEFAULT_LANGUAGE
self.language_file = self.default_language_file
self.cache = global_language_cache.setdefault(self.language_file,
({}, RLock()))
self.t = self.default_t
set_plural(self.accepted_language)
return languages
def __call__(self, message, symbols={}, language=None, lazy=None, ns=None):
"""
get cached translated plain text message with inserted parameters(symbols)
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self)
else:
return self.translate(message, symbols)
else:
if ns:
if ns != self.ns:
self.langpath = os.path.join(self.langpath, ns)
if self.ns is None:
self.ns = ns
otherT = self.__get_otherT__(language, ns)
return otherT(message, symbols, lazy=lazy)
def __get_otherT__(self, language=None, namespace=None):
if not language and not namespace:
raise Exception('Incorrect parameters')
if namespace:
if language:
index = '%s/%s' % (namespace, language)
else:
index = namespace
else:
index = language
try:
otherT = self.otherTs[index]
except KeyError:
otherT = self.otherTs[index] = translator(self.langpath,
self.http_accept_language)
if language:
otherT.force(language)
return otherT
def apply_filter(self, message, symbols={}, filter=None, ftag=None):
def get_tr(message, prefix, filter):
s = self.get_t(message, prefix)
return filter(s) if filter else self.filter(s)
if filter:
prefix = '@' + (ftag or 'userdef') + '\x01'
else:
prefix = '@' + self.ftag + '\x01'
message = get_from_cache(
self.cache, prefix + message,
lambda: get_tr(message, prefix, filter))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, xmlescape(value).translate(ttab_in))
for key, value in symbols.iteritems()
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else xmlescape(value).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return XML(message.translate(ttab_out))
def M(self, message, symbols={}, language=None,
lazy=None, filter=None, ftag=None, ns=None):
"""
Gets cached translated markmin-message with inserted parametes
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self, filter, ftag, True)
else:
return self.apply_filter(message, symbols, filter, ftag)
else:
if ns:
self.langpath = os.path.join(self.langpath, ns)
otherT = self.__get_otherT__(language, ns)
return otherT.M(message, symbols, lazy=lazy)
def get_t(self, message, prefix=''):
"""
Use ## to add a comment into a translation string
the comment can be useful do discriminate different possible
translations for the same string (for example different locations)::
T(' hello world ') -> ' hello world '
T(' hello world ## token') -> ' hello world '
T('hello ## world## token') -> 'hello ## world'
the ## notation is ignored in multiline strings and strings that
start with ##. This is needed to allow markmin syntax to be translated
"""
if isinstance(message, unicode):
message = message.encode('utf8')
if isinstance(prefix, unicode):
prefix = prefix.encode('utf8')
key = prefix + message
mt = self.t.get(key, None)
if mt is not None:
return mt
# we did not find a translation
if message.find('##') > 0 and not '\n' in message:
# remove comments
message = message.rsplit('##', 1)[0]
# guess translation same as original
self.t[key] = mt = self.default_t.get(key, message)
# update language file for latter translation
if self.is_writable and is_writable() and \
self.language_file != self.default_language_file:
write_dict(self.language_file, self.t)
return regex_backslash.sub(
lambda m: m.group(1).translate(ttab_in), mt)
def params_substitution(self, message, symbols):
"""
Substitutes parameters from symbols into message using %.
also parse `%%{}` placeholders for plural-forms processing.
Returns:
string with parameters
Note:
*symbols* MUST BE OR tuple OR dict of parameters!
"""
def sub_plural(m):
"""String in `%{}` is transformed by this rules:
If string starts with `\\`, `!` or `?` such transformations
take place::
"!string of words" -> "String of word" (Capitalize)
"!!string of words" -> "String Of Word" (Title)
"!!!string of words" -> "STRING OF WORD" (Upper)
"\\!string of words" -> "!string of word"
(remove \\ and disable transformations)
"?word?number" -> "word" (return word, if number == 1)
"?number" or "??number" -> "" (remove number,
if number == 1)
"?word?number" -> "number" (if number != 1)
"""
def sub_tuple(m):
""" word[number], !word[number], !!word[number], !!!word[number]
word, !word, !!word, !!!word, ?word?number, ??number, ?number
?word?word[number], ?word?[number], ??word[number]
"""
w, i = m.group('w', 'i')
c = w[0]
if c not in '!?':
return self.plural(w, symbols[int(i or 0)])
elif c == '?':
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
if i is None:
# ?[word]?number[?number] or ?number
if not part2:
return m.group(0)
num = int(part2)
else:
# ?[word]?word2[?word3][number]
num = int(symbols[int(i or 0)])
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
if i is not None:
return fun(self.plural(word, symbols[int(i)]))
return fun(word)
def sub_dict(m):
""" word(var), !word(var), !!word(var), !!!word(var)
word(num), !word(num), !!word(num), !!!word(num)
?word2(var), ?word1?word2(var), ?word1?word2?word0(var)
?word2(num), ?word1?word2(num), ?word1?word2?word0(num)
"""
w, n = m.group('w', 'n')
c = w[0]
n = int(n) if n.isdigit() else symbols[n]
if c not in '!?':
return self.plural(w, n)
elif c == '?':
# ?[word1]?word2[?word0](var or num), ?[word1]?word2(var or num) or ?word2(var or num)
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
num = int(n)
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
return fun(self.plural(word, n))
s = m.group(1)
part = regex_plural_tuple.sub(sub_tuple, s)
if part == s:
part = regex_plural_dict.sub(sub_dict, s)
if part == s:
return m.group(0)
return part
message = message % symbols
message = regex_plural.sub(sub_plural, message)
return message
def translate(self, message, symbols):
"""
Gets cached translated message with inserted parameters(symbols)
"""
message = get_from_cache(self.cache, message,
lambda: self.get_t(message))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, str(value).translate(ttab_in))
for key, value in symbols.iteritems()
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else str(value).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return message.translate(ttab_out)
def findT(path, language=DEFAULT_LANGUAGE):
"""
Note:
Must be run by the admin app
"""
lang_file = pjoin(path, 'languages', language + '.py')
sentences = read_dict(lang_file)
mp = pjoin(path, 'models')
cp = pjoin(path, 'controllers')
vp = pjoin(path, 'views')
mop = pjoin(path, 'modules')
for filename in \
listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\
+ listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0):
data = read_locked(filename)
items = regex_translate.findall(data)
for item in items:
try:
message = safe_eval(item)
except:
continue # silently ignore inproperly formatted strings
if not message.startswith('#') and not '\n' in message:
tokens = message.rsplit('##', 1)
else:
# this allows markmin syntax in translations
tokens = [message]
if len(tokens) == 2:
message = tokens[0].strip() + '##' + tokens[1].strip()
if message and not message in sentences:
sentences[message] = message
if not '!langcode!' in sentences:
sentences['!langcode!'] = (
DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language)
if not '!langname!' in sentences:
sentences['!langname!'] = (
DEFAULT_LANGUAGE_NAME if language in ('default', DEFAULT_LANGUAGE)
else sentences['!langcode!'])
write_dict(lang_file, sentences)
def update_all_languages(application_path):
"""
Note:
Must be run by the admin app
"""
path = pjoin(application_path, 'languages/')
for language in oslistdir(path):
if regex_langfile.match(language):
findT(application_path, language[:-3])
def update_from_langfile(target, source):
"""this will update untranslated messages in target from source (where both are language files)
this can be used as first step when creating language file for new but very similar language
or if you want update your app from welcome app of newer web2py version
or in non-standard scenarios when you work on target and from any reason you have partial translation in source
"""
src = read_dict(source)
sentences = read_dict(target)
for key in sentences:
val = sentences[key]
if not val or val == key:
new_val = src.get(key)
if new_val and new_val != val:
sentences[key] = new_val
write_dict(target, sentences)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
urrego093/proyecto_mv
|
gluon/languages.py
|
Python
|
gpl-3.0
| 36,357
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import gzip
import os
import re
import shutil
import socket
import stat
import StringIO
import time
import traceback
import urllib
import urllib2
import zlib
from httplib import BadStatusLine
try:
import json
except ImportError:
from lib import simplejson as json
from xml.dom.minidom import Node
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
import sickbeard
from sickbeard.exceptions import MultipleShowObjectsException, ex
from sickbeard import logger, classes
from sickbeard.common import USER_AGENT, mediaExtensions, XML_NSMAP
from sickbeard import db
from sickbeard import encodingKludge as ek
from sickbeard import notifiers
from lib.tvdb_api import tvdb_api, tvdb_exceptions
urllib._urlopener = classes.SickBeardURLopener()
def indentXML(elem, level=0):
'''
Does our pretty printing, makes Matt very happy
'''
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXML(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
# Strip out the newlines from text
if elem.text:
elem.text = elem.text.replace('\n', ' ')
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def replaceExtension(filename, newExt):
'''
>>> replaceExtension('foo.avi', 'mkv')
'foo.mkv'
>>> replaceExtension('.vimrc', 'arglebargle')
'.vimrc'
>>> replaceExtension('a.b.c', 'd')
'a.b.d'
>>> replaceExtension('', 'a')
''
>>> replaceExtension('foo.bar', '')
'foo.'
'''
sepFile = filename.rpartition(".")
if sepFile[0] == "":
return filename
else:
return sepFile[0] + "." + newExt
def isMediaFile(filename):
# ignore samples
if re.search('(^|[\W_])sample\d*[\W_]', filename.lower()):
return False
# ignore MAC OS's retarded "resource fork" files
if filename.startswith('._'):
return False
sepFile = filename.rpartition(".")
if sepFile[2].lower() in mediaExtensions:
return True
else:
return False
def sanitizeFileName(name):
'''
>>> sanitizeFileName('a/b/c')
'a-b-c'
>>> sanitizeFileName('abc')
'abc'
>>> sanitizeFileName('a"b')
'ab'
>>> sanitizeFileName('.a.b..')
'a.b'
'''
# remove bad chars from the filename
name = re.sub(r'[\\/\*]', '-', name)
name = re.sub(r'[:"<>|?]', '', name)
# remove leading/trailing periods and spaces
name = name.strip(' .')
return name
def getURL(url, post_data=None, headers=[]):
"""
Returns a byte-string retrieved from the url provider.
"""
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', USER_AGENT), ('Accept-Encoding', 'gzip,deflate')]
for cur_header in headers:
opener.addheaders.append(cur_header)
try:
usock = opener.open(url, post_data)
url = usock.geturl()
encoding = usock.info().get("Content-Encoding")
if encoding in ('gzip', 'x-gzip', 'deflate'):
content = usock.read()
if encoding == 'deflate':
data = StringIO.StringIO(zlib.decompress(content))
else:
data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(content))
result = data.read()
else:
result = usock.read()
usock.close()
except urllib2.HTTPError, e:
logger.log(u"HTTP error " + str(e.code) + " while loading URL " + url, logger.WARNING)
return None
except urllib2.URLError, e:
logger.log(u"URL error " + str(e.reason) + " while loading URL " + url, logger.WARNING)
return None
except BadStatusLine:
logger.log(u"BadStatusLine error while loading URL " + url, logger.WARNING)
return None
except socket.timeout:
logger.log(u"Timed out while loading URL " + url, logger.WARNING)
return None
except ValueError:
logger.log(u"Unknown error while loading URL " + url, logger.WARNING)
return None
except Exception:
logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING)
return None
return result
def findCertainShow(showList, tvdbid):
results = filter(lambda x: x.tvdbid == tvdbid, showList)
if len(results) == 0:
return None
elif len(results) > 1:
raise MultipleShowObjectsException()
else:
return results[0]
def findCertainTVRageShow(showList, tvrid):
if tvrid == 0:
return None
results = filter(lambda x: x.tvrid == tvrid, showList)
if len(results) == 0:
return None
elif len(results) > 1:
raise MultipleShowObjectsException()
else:
return results[0]
def makeDir(path):
if not ek.ek(os.path.isdir, path):
try:
ek.ek(os.makedirs, path)
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(path)
except OSError:
return False
return True
def searchDBForShow(regShowName):
showNames = [re.sub('[. -]', ' ', regShowName)]
myDB = db.DBConnection()
yearRegex = "([^()]+?)\s*(\()?(\d{4})(?(2)\))$"
for showName in showNames:
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE show_name LIKE ? OR tvr_name LIKE ?", [showName, showName])
if len(sqlResults) == 1:
return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"])
else:
# if we didn't get exactly one result then try again with the year stripped off if possible
match = re.match(yearRegex, showName)
if match and match.group(1):
logger.log(u"Unable to match original name but trying to manually strip and specify show year", logger.DEBUG)
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE (show_name LIKE ? OR tvr_name LIKE ?) AND startyear = ?", [match.group(1) + '%', match.group(1) + '%', match.group(3)])
if len(sqlResults) == 0:
logger.log(u"Unable to match a record in the DB for " + showName, logger.DEBUG)
continue
elif len(sqlResults) > 1:
logger.log(u"Multiple results for " + showName + " in the DB, unable to match show name", logger.DEBUG)
continue
else:
return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"])
return None
def sizeof_fmt(num):
'''
>>> sizeof_fmt(2)
'2.0 bytes'
>>> sizeof_fmt(1024)
'1.0 KB'
>>> sizeof_fmt(2048)
'2.0 KB'
>>> sizeof_fmt(2**20)
'1.0 MB'
>>> sizeof_fmt(1234567)
'1.2 MB'
'''
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def listMediaFiles(path):
if not path or not ek.ek(os.path.isdir, path):
return []
files = []
for curFile in ek.ek(os.listdir, path):
fullCurFile = ek.ek(os.path.join, path, curFile)
# if it's a folder do it recursively
if ek.ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
files += listMediaFiles(fullCurFile)
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
def copyFile(srcFile, destFile):
ek.ek(shutil.copyfile, srcFile, destFile)
try:
ek.ek(shutil.copymode, srcFile, destFile)
except OSError:
pass
def moveFile(srcFile, destFile):
try:
ek.ek(os.rename, srcFile, destFile)
fixSetGroupID(destFile)
except OSError:
copyFile(srcFile, destFile)
ek.ek(os.unlink, srcFile)
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG)
if not ek.ek(os.path.isdir, path):
# Windows, create all missing folders
if os.name == 'nt' or os.name == 'ce':
try:
logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.makedirs, path)
except (OSError, IOError), e:
logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep
# if it exists then just keep walking down the line
if ek.ek(os.path.isdir, sofar):
continue
try:
logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek.ek(os.path.normpath, sofar))
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(sofar)
except (OSError, IOError), e:
logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR)
return False
return True
def rename_ep_file(cur_path, new_path, old_path_length=0):
"""
Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
left that are now empty.
cur_path: The absolute path to the file you want to move/rename
new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
"""
new_dest_dir, new_dest_name = os.path.split(new_path) # @UnusedVariable
if old_path_length == 0 or old_path_length > len(cur_path):
# approach from the right
cur_file_name, cur_file_ext = os.path.splitext(cur_path) # @UnusedVariable
else:
# approach from the left
cur_file_ext = cur_path[old_path_length:]
# put the extension on the incoming file
new_path += cur_file_ext
make_dirs(os.path.dirname(new_path))
# move the file
try:
logger.log(u"Renaming file from " + cur_path + " to " + new_path)
ek.ek(os.rename, cur_path, new_path)
except (OSError, IOError), e:
logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR)
return False
# clean up any old folders that are empty
delete_empty_folders(ek.ek(os.path.dirname, cur_path))
return True
def delete_empty_folders(check_empty_dir, keep_dir=None):
"""
Walks backwards up the path and deletes any empty folders found.
check_empty_dir: The path to clean (absolute path to a folder)
keep_dir: Clean until this path is reached
"""
# treat check_empty_dir as empty when it only contains these items
ignore_items = []
logger.log(u"Trying to clean any empty folders under " + check_empty_dir)
# as long as the folder exists and doesn't contain any files, delete it
while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
check_files = ek.ek(os.listdir, check_empty_dir)
if not check_files or (len(check_files) <= len(ignore_items) and all([check_file in ignore_items for check_file in check_files])):
# directory is empty or contains only ignore_items
try:
logger.log(u"Deleting empty folder: " + check_empty_dir)
# need shutil.rmtree when ignore_items is really implemented
ek.ek(os.rmdir, check_empty_dir)
# do the library update for synoindex
notifiers.synoindex_notifier.deleteFolder(check_empty_dir)
except OSError, e:
logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + str(e), logger.WARNING)
break
check_empty_dir = ek.ek(os.path.dirname, check_empty_dir)
else:
break
def chmodAsParent(childPath):
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek.ek(os.path.dirname, childPath)
if not parentPath:
logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
return
parentMode = stat.S_IMODE(os.stat(parentPath)[stat.ST_MODE])
childPathStat = ek.ek(os.stat, childPath)
childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
if ek.ek(os.path.isfile, childPath):
childMode = fileBitFilter(parentMode)
else:
childMode = parentMode
if childPath_mode == childMode:
return
childPath_owner = childPathStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
return
try:
ek.ek(os.chmod, childPath, childMode)
logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode), logger.DEBUG)
except OSError:
logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.ERROR)
def fileBitFilter(mode):
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def fixSetGroupID(childPath):
if os.name == 'nt' or os.name == 'ce':
return
parentPath = ek.ek(os.path.dirname, childPath)
parentStat = os.stat(parentPath)
parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
if parentMode & stat.S_ISGID:
parentGID = parentStat[stat.ST_GID]
childStat = ek.ek(os.stat, childPath)
childGID = childStat[stat.ST_GID]
if childGID == parentGID:
return
childPath_owner = childStat.st_uid
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID", logger.DEBUG)
return
try:
ek.ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX
logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % (childPath), logger.DEBUG)
except OSError:
logger.log(u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (childPath, parentGID), logger.ERROR)
def sanitizeSceneName(name, ezrss=False):
"""
Takes a show name and returns the "scenified" version of it.
ezrss: If true the scenified version will follow EZRSS's cracksmoker rules as best as possible
Returns: A string containing the scene version of the show name given.
"""
if not ezrss:
bad_chars = u",:()'!?\u2019"
# ezrss leaves : and ! in their show names as far as I can tell
else:
bad_chars = u",()'?\u2019"
# strip out any bad chars
for x in bad_chars:
name = name.replace(x, "")
# tidy up stuff that doesn't belong in scene names
name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.')
name = re.sub("\.\.*", ".", name)
if name.endswith('.'):
name = name[:-1]
return name
def create_https_certificates(ssl_cert, ssl_key):
"""
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
"""
try:
from OpenSSL import crypto # @UnresolvedImport
from lib.certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, serial # @UnresolvedImport
except:
logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING)
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'SickBeard'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
open(ssl_key, 'w').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
open(ssl_cert, 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except:
logger.log(u"Error creating SSL key and certificate", logger.ERROR)
return False
return True
if __name__ == '__main__':
import doctest
doctest.testmod()
def parse_json(data):
"""
Parse json data into a python object
data: data string containing json
Returns: parsed data as json or None
"""
try:
parsedJSON = json.loads(data)
except ValueError:
logger.log(u"Error trying to decode json data:" + data, logger.ERROR)
return None
return parsedJSON
def parse_xml(data, del_xmlns=False):
"""
Parse data into an xml elementtree.ElementTree
data: data string containing xml
del_xmlns: if True, removes xmlns namesspace from data before parsing
Returns: parsed data as elementtree or None
"""
if del_xmlns:
data = re.sub(' xmlns="[^"]+"', '', data)
try:
parsedXML = etree.fromstring(data)
except Exception, e:
logger.log(u"Error trying to parse xml data: " + data + " to Elementtree, Error: " + ex(e), logger.DEBUG)
parsedXML = None
return parsedXML
def get_xml_text(element, mini_dom=False):
"""
Get all text inside a xml element
element: A xml element either created with elementtree.ElementTree or xml.dom.minidom
mini_dom: Default False use elementtree, True use minidom
Returns: text
"""
text = ""
if mini_dom:
node = element
for child in node.childNodes:
if child.nodeType in (Node.CDATA_SECTION_NODE, Node.TEXT_NODE):
text += child.data
else:
if element is not None:
for child in [element] + element.findall('.//*'):
if child.text:
text += child.text
return text.strip()
def backupVersionedFile(old_file, version):
numTries = 0
new_file = old_file + '.' + 'v' + str(version)
while not ek.ek(os.path.isfile, new_file):
if not ek.ek(os.path.isfile, old_file):
logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG)
break
try:
logger.log(u"Trying to back up " + old_file + " to " + new_file, logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log(u"Backup done", logger.DEBUG)
break
except Exception, e:
logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + ex(e), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log(u"Trying again.", logger.DEBUG)
if numTries >= 10:
logger.log(u"Unable to back up " + old_file + " to " + new_file + " please do it manually.", logger.ERROR)
return False
return True
|
Zelgadis87/Sick-Beard
|
sickbeard/helpers.py
|
Python
|
gpl-3.0
| 21,438
|
# Django settings for FLAP project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Wacharabuhm', 'n.kimmy@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'FLAP', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5433', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Bangkok'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'th-TH'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
"/Users/wt/Documents/FLAP/Static",
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g&3)0+_+8w%9xm(r5-j)+me3nt(+dm9$8vfo_ei8fmb!1@$uw0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'FLAP.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'FLAP.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'acura'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
javadevil/FLAP
|
FLAP/FLAP/settings.py
|
Python
|
gpl-3.0
| 5,444
|
# coding: utf-8
#
# Copyright (C) 2015 Google Inc.
# 2017 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, calling, raises
from mock import patch
from nose.tools import eq_
import functools
import os
from ycmd.completers.go.go_completer import ( _ComputeOffset, GoCompleter,
GO_BINARIES, FindBinary )
from ycmd.request_wrap import RequestWrap
from ycmd import user_options_store
from ycmd.utils import ReadFile, ToBytes
TEST_DIR = os.path.dirname( os.path.abspath( __file__ ) )
DATA_DIR = os.path.join( TEST_DIR, 'testdata' )
PATH_TO_TEST_FILE = os.path.join( DATA_DIR, 'test2.go' )
# Use test file as dummy binary
DUMMY_BINARY = PATH_TO_TEST_FILE
PATH_TO_POS121_RES = os.path.join( DATA_DIR, 'gocode_output_offset_121.json' )
PATH_TO_POS215_RES = os.path.join( DATA_DIR, 'gocode_output_offset_215.json' )
PATH_TO_POS292_RES = os.path.join( DATA_DIR, 'gocode_output_offset_292.json' )
# Gocode output when a parsing error causes an internal panic.
PATH_TO_PANIC_OUTPUT_RES = os.path.join(
DATA_DIR, 'gocode_dontpanic_output_offset_10.json' )
REQUEST_DATA = {
'line_num': 1,
'filepath' : PATH_TO_TEST_FILE,
'file_data' : { PATH_TO_TEST_FILE : { 'filetypes' : [ 'go' ] } }
}
def BuildRequest( line_num, column_num ):
request = REQUEST_DATA.copy()
request[ 'line_num' ] = line_num
request[ 'column_num' ] = column_num
request[ 'file_data' ][ PATH_TO_TEST_FILE ][ 'contents' ] = ReadFile(
PATH_TO_TEST_FILE )
return RequestWrap( request )
def SetUpGoCompleter( test ):
@functools.wraps( test )
def Wrapper( *args, **kwargs ):
user_options = user_options_store.DefaultOptions()
user_options[ 'gocode_binary_path' ] = DUMMY_BINARY
with patch( 'ycmd.utils.SafePopen' ):
completer = GoCompleter( user_options )
return test( completer, *args, **kwargs )
return Wrapper
def FindGoCodeBinary_test():
user_options = user_options_store.DefaultOptions()
eq_( GO_BINARIES.get( "gocode" ), FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DUMMY_BINARY
eq_( DUMMY_BINARY, FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DATA_DIR
eq_( None, FindBinary( "gocode", user_options ) )
def ComputeOffset_OutOfBoundsOffset_test():
assert_that(
calling( _ComputeOffset ).with_args( 'test', 2, 1 ),
raises( RuntimeError, 'Go completer could not compute byte offset '
'corresponding to line 2 and column 1.' ) )
# Test line-col to offset in the file before any unicode occurrences.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = ReadFile( PATH_TO_POS215_RES ) )
def ComputeCandidatesInner_BeforeUnicode_test( completer, execute_command ):
# Col 8 corresponds to cursor at log.Pr^int("Line 7 ...
completer.ComputeCandidatesInner( BuildRequest( 7, 8 ) )
execute_command.assert_called_once_with(
[ DUMMY_BINARY, '-sock', 'tcp', '-addr', completer._gocode_host,
'-f=json', 'autocomplete', PATH_TO_TEST_FILE, '119' ],
contents = ToBytes( ReadFile( PATH_TO_TEST_FILE ) ) )
# Test line-col to offset in the file after a unicode occurrences.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = ReadFile( PATH_TO_POS215_RES ) )
def ComputeCandidatesInner_AfterUnicode_test( completer, execute_command ):
# Col 9 corresponds to cursor at log.Pri^nt("Line 7 ...
completer.ComputeCandidatesInner( BuildRequest( 9, 9 ) )
execute_command.assert_called_once_with(
[ DUMMY_BINARY, '-sock', 'tcp', '-addr', completer._gocode_host,
'-f=json', 'autocomplete', PATH_TO_TEST_FILE, '212' ],
contents = ToBytes( ReadFile( PATH_TO_TEST_FILE ) ) )
# Test end to end parsing of completed results.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = ReadFile( PATH_TO_POS292_RES ) )
def ComputeCandidatesInner_test( completer, execute_command ):
# Col 40 corresponds to cursor at ..., log.Prefi^x ...
result = completer.ComputeCandidatesInner( BuildRequest( 10, 40 ) )
execute_command.assert_called_once_with(
[ DUMMY_BINARY, '-sock', 'tcp', '-addr', completer._gocode_host,
'-f=json', 'autocomplete', PATH_TO_TEST_FILE, '287' ],
contents = ToBytes( ReadFile( PATH_TO_TEST_FILE ) ) )
eq_( result, [ {
'menu_text': u'Prefix',
'typed_text': u'Prefix',
'completion_chunks': [{
'text': u'Prefix',
'placeholder': False
}],
'extra_menu_info': u'func() string',
'detailed_info': u'Prefix func() string func',
'kind': u'func'
} ] )
# Test Gocode failure.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = '' )
def ComputeCandidatesInner_GoCodeFailure_test( completer, *args ):
assert_that(
calling( completer.ComputeCandidatesInner ).with_args(
BuildRequest( 1, 1 ) ),
raises( RuntimeError, 'Gocode returned invalid JSON response.' ) )
# Test JSON parsing failure.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = "{this isn't parseable" )
def ComputeCandidatesInner_ParseFailure_test( completer, *args ):
assert_that(
calling( completer.ComputeCandidatesInner ).with_args(
BuildRequest( 1, 1 ) ),
raises( RuntimeError, 'Gocode returned invalid JSON response.' ) )
# Test empty results error.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = '[]' )
def ComputeCandidatesInner_NoResultsFailure_test( completer, *args ):
assert_that(
calling( completer.ComputeCandidatesInner ).with_args(
BuildRequest( 1, 1 ) ),
raises( RuntimeError, 'No completions found.' ) )
# Test panic error.
@SetUpGoCompleter
@patch( 'ycmd.completers.go.go_completer.GoCompleter._ExecuteCommand',
return_value = ReadFile( PATH_TO_PANIC_OUTPUT_RES ) )
def ComputeCandidatesInner_GoCodePanic_test( completer, *args ):
assert_that(
calling( completer.ComputeCandidatesInner ).with_args(
BuildRequest( 1, 1 ) ),
raises( RuntimeError,
'Gocode panicked trying to find completions, '
'you likely have a syntax error.' ) )
|
Qusic/ycmd
|
ycmd/tests/go/go_completer_test.py
|
Python
|
gpl-3.0
| 7,262
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
# Santiago Dueñas <sduenas@bitergia.com>
# Jesus M. Gonzalez-Barahona <jgb@gsyc.es>
# Harshal Mittal <harshalmittal4@gmail.com>
#
import json
import logging
import requests
from grimoirelab_toolkit.datetime import datetime_to_utc
from grimoirelab_toolkit.uris import urijoin
from ...backend import (Backend,
BackendCommand,
BackendCommandArgumentParser)
from ...client import HttpClient, RateLimitHandler
from ...errors import RepositoryError
from ...utils import DEFAULT_DATETIME
CATEGORY_EVENT = "event"
MEETUP_URL = 'https://meetup.com/'
MEETUP_API_URL = 'https://api.meetup.com/'
MAX_ITEMS = 200
# Range before sleeping until rate limit reset
MIN_RATE_LIMIT = 1
# Time to avoid too many request exception
SLEEP_TIME = 30
logger = logging.getLogger(__name__)
class Meetup(Backend):
"""Meetup backend.
This class allows to fetch the events of a group from the
Meetup server. Initialize this class passing the OAuth2 token needed
for authentication with the parameter `api_token`.
:param group: name of the group where data will be fetched
:param api_token: OAuth2 token to access the API
:param max_items: maximum number of issues requested on the same query
:param tag: label used to mark the data
:param archive: archive to store/retrieve items
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: time (in seconds) to sleep in case
of connection problems
:param ssl_verify: enable/disable SSL verification
"""
version = '0.17.0'
CATEGORIES = [CATEGORY_EVENT]
CLASSIFIED_FIELDS = [
['group', 'topics'],
['event_hosts'],
['rsvps'],
['venue']
]
EXTRA_SEARCH_FIELDS = {
'group_name': ['group', 'name'],
'group_id': ['group', 'id']
}
def __init__(self, group, api_token,
max_items=MAX_ITEMS, tag=None, archive=None,
sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT,
sleep_time=SLEEP_TIME, ssl_verify=True):
origin = MEETUP_URL
super().__init__(origin, tag=tag, archive=archive, ssl_verify=ssl_verify)
self.group = group
self.max_items = max_items
self.api_token = api_token
self.sleep_for_rate = sleep_for_rate
self.min_rate_to_sleep = min_rate_to_sleep
self.sleep_time = sleep_time
self.client = None
def fetch(self, category=CATEGORY_EVENT, from_date=DEFAULT_DATETIME, to_date=None,
filter_classified=False):
"""Fetch the events from the server.
This method fetches those events of a group stored on the server
that were updated since the given date. Data comments and rsvps
are included within each event.
:param category: the category of items to fetch
:param from_date: obtain events updated since this date
:param to_date: obtain events updated before this date
:param filter_classified: remove classified fields from the resulting items
:returns: a generator of events
"""
if not from_date:
from_date = DEFAULT_DATETIME
from_date = datetime_to_utc(from_date)
kwargs = {"from_date": from_date, "to_date": to_date}
items = super().fetch(category,
filter_classified=filter_classified,
**kwargs)
return items
def fetch_items(self, category, **kwargs):
"""Fetch the events
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
to_date = kwargs['to_date']
logger.info("Fetching events of '%s' group from %s to %s",
self.group, str(from_date),
str(to_date) if to_date else '--')
to_date_ts = datetime_to_utc(to_date).timestamp() if to_date else None
nevents = 0
stop_fetching = False
ev_pages = self.client.events(self.group, from_date=from_date)
for evp in ev_pages:
events = [event for event in self.parse_json(evp)]
for event in events:
event_id = event['id']
event['comments'] = self.__fetch_and_parse_comments(event_id)
event['rsvps'] = self.__fetch_and_parse_rsvps(event_id)
# Check events updated before 'to_date'
event_ts = self.metadata_updated_on(event)
if to_date_ts and event_ts >= to_date_ts:
stop_fetching = True
continue
yield event
nevents += 1
if stop_fetching:
break
logger.info("Fetch process completed: %s events fetched", nevents)
@classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend supports items archive
"""
return True
@classmethod
def has_resuming(cls):
"""Returns whether it supports to resume the fetch process.
:returns: this backend supports items resuming
"""
return True
@staticmethod
def metadata_id(item):
"""Extracts the identifier from a Meetup item."""
return str(item['id'])
@staticmethod
def metadata_updated_on(item):
"""Extracts and coverts the update time from a Meetup item.
The timestamp is extracted from 'updated' field and converted
to a UNIX timestamp.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
# Time is in milliseconds, convert it to seconds
ts = item['updated']
ts = ts / 1000.0
return ts
@staticmethod
def metadata_category(item):
"""Extracts the category from a Meetup item.
This backend only generates one type of item which is
'event'.
"""
return CATEGORY_EVENT
@staticmethod
def parse_json(raw_json):
"""Parse a Meetup JSON stream.
The method parses a JSON stream and returns a list
with the parsed data.
:param raw_json: JSON string to parse
:returns: a list with the parsed data
"""
result = json.loads(raw_json)
return result
def _init_client(self, from_archive=False):
"""Init client"""
return MeetupClient(self.api_token, self.max_items,
self.sleep_for_rate, self.min_rate_to_sleep, self.sleep_time,
self.archive, from_archive, self.ssl_verify)
def __fetch_and_parse_comments(self, event_id):
logger.debug("Fetching and parsing comments from group '%s' event '%s'",
self.group, str(event_id))
comments = []
raw_pages = self.client.comments(self.group, event_id)
for raw_page in raw_pages:
for comment in self.parse_json(raw_page):
comments.append(comment)
return comments
def __fetch_and_parse_rsvps(self, event_id):
logger.debug("Fetching and parsing rsvps from group '%s' event '%s'",
self.group, str(event_id))
rsvps = []
raw_pages = self.client.rsvps(self.group, event_id)
for raw_page in raw_pages:
for rsvp in self.parse_json(raw_page):
rsvps.append(rsvp)
return rsvps
class MeetupCommand(BackendCommand):
"""Class to run Meetup backend from the command line."""
BACKEND = Meetup
@classmethod
def setup_cmd_parser(cls):
"""Returns the Meetup argument parser."""
parser = BackendCommandArgumentParser(cls.BACKEND,
from_date=True,
to_date=True,
token_auth=True,
archive=True,
ssl_verify=True)
# Meetup options
group = parser.parser.add_argument_group('Meetup arguments')
group.add_argument('--max-items', dest='max_items',
type=int, default=MAX_ITEMS,
help="Maximum number of items requested on the same query")
group.add_argument('--sleep-for-rate', dest='sleep_for_rate',
action='store_true',
help="sleep for getting more rate")
group.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep',
default=MIN_RATE_LIMIT, type=int,
help="sleep until reset when the rate limit reaches this value")
group.add_argument('--sleep-time', dest='sleep_time',
default=SLEEP_TIME, type=int,
help="minimun sleeping time to avoid too many request exception")
# Required arguments
parser.parser.add_argument('group',
help="Meetup group name")
return parser
class MeetupClient(HttpClient, RateLimitHandler):
"""Meetup API client.
Client for fetching information from the Meetup server
using its REST API v3.
:param api_token: OAuth2 token needed to access the API
:param max_items: maximum number of items per request
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: time (in seconds) to sleep in case
of connection problems
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
:param ssl_verify: enable/disable SSL verification
"""
EXTRA_STATUS_FORCELIST = [429]
RCOMMENTS = 'comments'
REVENTS = 'events'
RRSVPS = 'rsvps'
PFIELDS = 'fields'
PKEY_OAUTH2 = 'Authorization'
PORDER = 'order'
PPAGE = 'page'
PRESPONSE = 'response'
PSCROLL = 'scroll'
PSTATUS = 'status'
VEVENT_FIELDS = ['event_hosts', 'featured', 'group_topics',
'plain_text_description', 'rsvpable', 'series']
VRSVP_FIELDS = ['attendance_status']
VRESPONSE = ['yes', 'no']
# FIXME: Add 'draft' status when the bug in the Meetup API gets fixed.
# More info in https://github.com/meetup/api/issues/260
VSTATUS = ['cancelled', 'upcoming', 'past', 'proposed', 'suggested']
VUPDATED = 'updated'
def __init__(self, api_token, max_items=MAX_ITEMS,
sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT, sleep_time=SLEEP_TIME,
archive=None, from_archive=False, ssl_verify=True):
self.api_token = api_token
self.max_items = max_items
super().__init__(MEETUP_API_URL, sleep_time=sleep_time,
extra_status_forcelist=self.EXTRA_STATUS_FORCELIST,
archive=archive, from_archive=from_archive, ssl_verify=ssl_verify)
super().setup_rate_limit_handler(sleep_for_rate=sleep_for_rate, min_rate_to_sleep=min_rate_to_sleep)
def calculate_time_to_reset(self):
"""Number of seconds to wait. They are contained in the rate limit reset header"""
time_to_reset = 0 if self.rate_limit_reset_ts < 0 else self.rate_limit_reset_ts
return time_to_reset
def events(self, group, from_date=DEFAULT_DATETIME):
"""Fetch the events pages of a given group."""
date = datetime_to_utc(from_date)
date = date.strftime("since:%Y-%m-%dT%H:%M:%S.000Z")
resource = urijoin(group, self.REVENTS)
# Hack required due to Metup API does not support list
# values with the format `?param=value1¶m=value2`.
# It only works with `?param=value1,value2`.
# Morever, urrlib3 encodes comma characters when values
# are given using params dict, which it doesn't work
# with Meetup, either.
fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VEVENT_FIELDS)
fixed_params += '&' + self.PSTATUS + '=' + ','.join(self.VSTATUS)
resource += fixed_params
params = {
self.PORDER: self.VUPDATED,
self.PSCROLL: date,
self.PPAGE: self.max_items
}
try:
for page in self._fetch(resource, params):
yield page
except requests.exceptions.HTTPError as error:
if error.response.status_code == 410:
msg = "Group is no longer accessible: {}".format(error)
raise RepositoryError(cause=msg)
else:
raise error
def comments(self, group, event_id):
"""Fetch the comments of a given event."""
resource = urijoin(group, self.REVENTS, event_id, self.RCOMMENTS)
params = {
self.PPAGE: self.max_items
}
for page in self._fetch(resource, params):
yield page
def rsvps(self, group, event_id):
"""Fetch the rsvps of a given event."""
resource = urijoin(group, self.REVENTS, event_id, self.RRSVPS)
# Same hack that in 'events' method
fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VRSVP_FIELDS)
fixed_params += '&' + self.PRESPONSE + '=' + ','.join(self.VRESPONSE)
resource += fixed_params
params = {
self.PPAGE: self.max_items
}
for page in self._fetch(resource, params):
yield page
@staticmethod
def sanitize_for_archive(url, headers, payload):
"""Sanitize payload of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns url, headers and the sanitized payload
"""
if MeetupClient.PKEY_OAUTH2 in headers:
headers.pop(MeetupClient.PKEY_OAUTH2)
return url, headers, payload
def _fetch(self, resource, params):
"""Fetch a resource.
Method to fetch and to iterate over the contents of a
type of resource. The method returns a generator of
pages for that resource and parameters.
:param resource: type of the resource
:param params: parameters to filter
:returns: a generator of pages for the requeste resource
"""
url = urijoin(self.base_url, resource)
headers = {
self.PKEY_OAUTH2: 'Bearer {}'.format(self.api_token)
}
do_fetch = True
while do_fetch:
logger.debug("Meetup client calls resource: %s params: %s",
resource, str(params))
if not self.from_archive:
self.sleep_for_rate_limit()
r = self.fetch(url, payload=params, headers=headers)
if not self.from_archive:
self.update_rate_limit(r)
yield r.text
if r.links and 'next' in r.links:
url = r.links['next']['url']
else:
do_fetch = False
|
grimoirelab/perceval
|
perceval/backends/core/meetup.py
|
Python
|
gpl-3.0
| 16,239
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""
(c) 2012 - Copyright Pierre-Yves Chibon
Author: Pierre-Yves Chibon <pingou@pingoured.fr>
Distributed under License GPLv3 or later
You can find a copy of this license on the website
http://www.gnu.org/licenses/gpl.html
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
fedocal.model test script
"""
from __future__ import unicode_literals, absolute_import, print_function
import unittest
import sys
import os
from datetime import time
from datetime import timedelta
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
from fedocal.fedocallib import model
from . import Modeltests, TODAY
from .test_calendar import Calendartests
# pylint: disable=R0904
class Meetingtests(Modeltests):
""" Meeting tests. """
session = None
def test_init_meeting(self):
""" Test the Meeting init function. """
caltest = Calendartests('test_init_calendar')
caltest.session = self.session
caltest.test_init_calendar()
obj = model.Meeting( # id:1
meeting_name='Fedora-fr-test-meeting',
meeting_date=TODAY,
meeting_date_end=TODAY,
meeting_time_start=time(19, 50),
meeting_time_stop=time(20, 50),
meeting_information='This is a test meeting',
calendar_name='test_calendar')
obj.save(self.session)
obj.add_manager(self.session, 'pingou, shaiton,')
self.session.commit()
self.assertNotEqual(obj, None)
obj = model.Meeting( # id:2
meeting_name='test-meeting2',
meeting_date=TODAY + timedelta(days=10),
meeting_date_end=TODAY + timedelta(days=10),
meeting_time_start=time(14, 15),
meeting_time_stop=time(16, 15),
meeting_information='This is another test meeting',
calendar_name='test_calendar')
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
obj = model.Meeting( # id:3
meeting_name='test-meeting23h59',
meeting_date=TODAY + timedelta(days=20),
meeting_date_end=TODAY + timedelta(days=20),
meeting_time_start=time(23, 00),
meeting_time_stop=time(23, 59),
meeting_information='This is another test meeting',
calendar_name='test_calendar')
obj.save(self.session)
obj.add_manager(self.session, ['pingou23h'])
self.session.commit()
self.assertNotEqual(obj, None)
# Meeting with end_recursion in the past
obj = model.Meeting( # id:4
meeting_name='test-meeting3',
meeting_date=TODAY - timedelta(days=16),
meeting_date_end=TODAY - timedelta(days=16),
meeting_time_start=time(14, 45),
meeting_time_stop=time(16, 35),
meeting_information='Test meeting with past end_recursion.',
calendar_name='test_calendar3',
recursion_frequency=7,
recursion_ends=TODAY - timedelta(days=7))
obj.save(self.session)
obj.add_manager(self.session, ['test2'])
self.session.commit()
self.assertNotEqual(obj, None)
# Two meetings at the same time
obj = model.Meeting( # id:5
meeting_name='test-meeting-st-1',
meeting_date=TODAY + timedelta(days=1),
meeting_date_end=TODAY + timedelta(days=1),
meeting_time_start=time(14, 00),
meeting_time_stop=time(16, 00),
meeting_information='This is a test meeting at the same time',
calendar_name='test_calendar4',
meeting_location='NA')
obj.save(self.session)
obj.add_manager(self.session, ['test'])
self.session.commit()
self.assertNotEqual(obj, None)
obj = model.Meeting( # id:6
meeting_name='test-meeting-st-2',
meeting_date=TODAY + timedelta(days=1),
meeting_date_end=TODAY + timedelta(days=1),
meeting_time_start=time(14, 00),
meeting_time_stop=time(16, 00),
meeting_information='This is a second test meeting at the'
' same time',
calendar_name='test_calendar4',
meeting_location='EMEA')
obj.save(self.session)
obj.add_manager(self.session, ['test'])
self.session.commit()
self.assertNotEqual(obj, None)
# Meeting with a recursion
obj = model.Meeting( # id:7
meeting_name='Another test meeting',
meeting_date=TODAY + timedelta(days=10),
meeting_date_end=TODAY + timedelta(days=10),
meeting_time_start=time(2, 00),
meeting_time_stop=time(3, 00),
meeting_information='This is a test meeting with recursion',
calendar_name='test_calendar',
recursion_frequency=7,
recursion_ends=TODAY + timedelta(days=90))
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
obj = model.Meeting( # id:8
meeting_name='Another test meeting2',
meeting_date=TODAY,
meeting_date_end=TODAY,
meeting_time_start=time(12, 00),
meeting_time_stop=time(13, 00),
meeting_information='This is a test meeting with recursion2',
calendar_name='test_calendar',
recursion_frequency=14,
recursion_ends=TODAY + timedelta(days=90))
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
# Meeting with a reminder
remobj = model.Reminder(
'H-12', 'pingou@fp.o', 'root@localhost',
'Come to our test meeting')
remobj.save(self.session)
self.session.flush()
obj = model.Meeting( # id:9
meeting_name='Test meeting with reminder',
meeting_date=TODAY + timedelta(days=11),
meeting_date_end=TODAY + timedelta(days=11),
meeting_time_start=time(11, 00),
meeting_time_stop=time(12, 00),
meeting_information='This is a test meeting with reminder',
calendar_name='test_calendar',
reminder_id=remobj.reminder_id)
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
# Meeting with a recursion and reminder
self.session.flush()
remobj = model.Reminder(
'H-12', 'pingou@fp.o', 'root@localhost',
'Come to our test meeting')
remobj.save(self.session)
self.session.flush()
obj = model.Meeting( # id:10
meeting_name='Test meeting with reminder and recursion',
meeting_date=TODAY + timedelta(days=12),
meeting_date_end=TODAY + timedelta(days=12),
meeting_time_start=time(10, 00),
meeting_time_stop=time(11, 00),
meeting_information='This is a test meeting with recursion'
' and reminder',
calendar_name='test_calendar',
reminder_id=remobj.reminder_id,
recursion_frequency=7,
recursion_ends=TODAY + timedelta(days=60))
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
obj = model.Meeting( # id:11
meeting_name='test-meeting-11',
meeting_date=TODAY + timedelta(days=20),
meeting_date_end=TODAY + timedelta(days=20),
meeting_time_start=time(17, 00),
meeting_time_stop=time(18, 00),
meeting_information='This is a second test meeting in EMEA',
calendar_name='test_calendar4',
meeting_location='EMEA')
obj.save(self.session)
obj.add_manager(self.session, ['test'])
self.session.commit()
self.assertNotEqual(obj, None)
# Past meeting with a recursion
obj = model.Meeting( # id:12
meeting_name='Another past test meeting',
meeting_date=TODAY - timedelta(days=14),
meeting_date_end=TODAY - timedelta(days=14),
meeting_time_start=time(4, 00),
meeting_time_stop=time(5, 00),
meeting_information='This is a past meeting with recursion',
calendar_name='test_calendar',
recursion_frequency=7,
recursion_ends=TODAY + timedelta(days=90),
full_day=False)
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
# Full day meeting
obj = model.Meeting( # id:13
meeting_name='Full-day meeting',
meeting_date=TODAY + timedelta(days=3),
meeting_date_end=TODAY + timedelta(days=3),
meeting_time_start=time(0, 00),
meeting_time_stop=time(23, 59),
meeting_information='This is a full day meeting',
calendar_name='test_calendar',
recursion_frequency=None,
recursion_ends=None,
full_day=True)
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
# Full day meeting with recursion
obj = model.Meeting( # id:14
meeting_name='Full-day meeting with recursion',
meeting_date=TODAY + timedelta(days=10),
meeting_date_end=TODAY + timedelta(days=10),
meeting_time_start=time(0, 00),
meeting_time_stop=time(23, 59),
meeting_information='Full day meeting with recursion',
calendar_name='test_calendar',
recursion_frequency=7,
recursion_ends=TODAY + timedelta(days=30),
full_day=True)
obj.save(self.session)
obj.add_manager(self.session, ['pingou'])
self.session.commit()
self.assertNotEqual(obj, None)
# Full day meeting
obj = model.Meeting( # id:15
meeting_name='Full-day meeting2',
meeting_date=TODAY + timedelta(days=2),
meeting_date_end=TODAY + timedelta(days=3),
meeting_time_start=time(0, 00),
meeting_time_stop=time(23, 59),
meeting_information='Full day meeting 2',
calendar_name='test_calendar2',
full_day=True)
obj.save(self.session)
obj.add_manager(self.session, ['toshio'])
self.session.commit()
self.assertNotEqual(obj, None)
def test_repr_meeting(self):
""" Test the Meeting string representation function. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
self.assertEqual(
str(obj),
"<Meeting('1' - '<Calendar('test_calendar')>', "
"'Fedora-fr-test-meeting', '" + str(TODAY) + "')>")
def test_repr_meeting_user(self):
""" Test the Meeting string representation function. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
self.assertEqual(
str(obj.meeting_manager_user[0]),
"<MeetingsUsers('1', 'pingou')>")
self.assertEqual(
str(obj.meeting_manager_user[0].user),
"<User('pingou')>")
def test_delete_meeting(self):
""" Test the Meeting delete function. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
obj.delete(self.session)
self.session.commit()
obj = model.Meeting.by_id(self.session, 1)
self.assertEqual(obj, None)
def test_copy_meeting(self):
""" Test the Meeting copy function. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
obj2 = obj.copy()
self.assertNotEqual(obj2, None)
self.assertEqual(obj.meeting_name, obj2.meeting_name)
self.assertEqual([], obj2.meeting_manager)
self.assertEqual(obj.meeting_date, obj2.meeting_date)
self.assertEqual(obj.meeting_time_start, obj2.meeting_time_start)
self.assertEqual(obj.meeting_time_stop, obj2.meeting_time_stop)
self.assertEqual(obj.meeting_information, obj2.meeting_information)
self.assertEqual(obj.calendar_name, obj2.calendar_name)
self.assertEqual(obj.reminder_id, obj2.reminder_id)
self.assertEqual(obj.meeting_location, obj2.meeting_location)
self.assertEqual(obj.recursion_frequency, obj2.recursion_frequency)
self.assertEqual(obj.recursion_ends, obj2.recursion_ends)
def test_copy_meeting_to_existing(self):
""" Test the Meeting copy existing function. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
obj2 = model.Meeting.by_id(self.session, 2)
self.assertNotEqual(obj2, None)
# Check that before the copy the object are different:
self.assertNotEqual(obj.meeting_name, obj2.meeting_name)
self.assertNotEqual([], obj2.meeting_manager)
self.assertNotEqual(obj.meeting_date, obj2.meeting_date)
self.assertNotEqual(obj.meeting_time_start, obj2.meeting_time_start)
self.assertNotEqual(obj.meeting_time_stop, obj2.meeting_time_stop)
self.assertNotEqual(obj.meeting_information, obj2.meeting_information)
self.assertEqual(obj.reminder_id, obj2.reminder_id)
self.assertEqual(obj.meeting_location, obj2.meeting_location)
self.assertEqual(obj.recursion_frequency, obj2.recursion_frequency)
self.assertEqual(obj.recursion_ends, obj2.recursion_ends)
obj.copy(obj2)
# Check that after the copy the object are equal
self.assertEqual(obj.meeting_name, obj2.meeting_name)
self.assertEqual(obj.meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj.meeting_manager, obj2.meeting_manager)
# The date remains not changed
self.assertNotEqual(obj.meeting_date, obj2.meeting_date)
self.assertEqual(obj.meeting_time_start, obj2.meeting_time_start)
self.assertEqual(obj.meeting_time_stop, obj2.meeting_time_stop)
# The meeting information remains also not changed
self.assertNotEqual(obj.meeting_information, obj2.meeting_information)
def test_get_meeting(self):
""" Test the query of a meeting by its identifier. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
self.assertNotEqual(obj, None)
self.assertEqual(obj.meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(obj.meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj.calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj.calendar.calendar_description, 'This is a test calendar')
self.assertEqual(obj.reminder, None)
def test_to_json_meeting(self):
""" Test the to_json method a meeting. """
self.test_init_meeting()
obj = model.Meeting.by_id(self.session, 1)
exp = {
"meeting_name": "Fedora-fr-test-meeting",
"meeting_manager": ['pingou', 'shaiton'],
"meeting_date": TODAY.strftime('%Y-%m-%d'),
"meeting_date_end": TODAY.strftime('%Y-%m-%d'),
"meeting_time_start": "19:50:00",
"meeting_time_stop": "20:50:00",
"meeting_information": "This is a test meeting",
"meeting_location": None,
"calendar_name": "test_calendar"
}
obs = obj.to_json()
self.assertEqual(len(set(obs.keys()).intersection(exp.keys())), 9)
keys = exp.keys()
for key in keys:
self.assertEqual(obs[key], exp[key])
def test_get_at_date(self):
""" Test the get_at_date function. """
self.test_init_meeting()
cal = model.Calendar.by_id(self.session, 'test_calendar')
obj = model.Meeting.get_at_date(
self.session, cal, TODAY)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 2)
self.assertEqual(obj[0].meeting_name, 'Another test meeting2')
self.assertEqual(obj[0].meeting_manager, ['pingou'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting with recursion2')
self.assertEqual(obj[0].reminder, None)
self.assertEqual(obj[1].meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(obj[1].meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj[1].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[1].meeting_information, 'This is a test meeting')
self.assertEqual(obj[1].reminder, None)
obj = model.Meeting.get_at_date(
self.session, cal, TODAY, full_day=True)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 0)
obj = model.Meeting.get_at_date(
self.session, cal, TODAY, full_day=False)
self.assertEqual(obj[0].meeting_name, 'Another test meeting2')
self.assertEqual(obj[0].meeting_manager, ['pingou'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting with recursion2')
self.assertEqual(obj[0].reminder, None)
self.assertEqual(obj[1].meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(obj[1].meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj[1].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[1].meeting_information, 'This is a test meeting')
self.assertEqual(obj[1].reminder, None)
def test_get_by_date(self):
""" Test the query of a list of meetings between two dates. """
self.test_init_meeting()
week_day = TODAY
week_start = week_day - timedelta(days=week_day.weekday())
week_stop = week_day + timedelta(days=6)
cal = model.Calendar.by_id(self.session, 'test_calendar')
obj = model.Meeting.get_by_date(
self.session, cal, week_start, week_stop)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 3)
self.assertEqual(obj[0].meeting_name, 'Another test meeting2')
self.assertEqual(obj[0].meeting_manager, ['pingou'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting with recursion2')
self.assertEqual(obj[0].reminder, None)
self.assertEqual(obj[1].meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(obj[1].meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj[1].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[1].meeting_information, 'This is a test meeting')
self.assertEqual(obj[1].reminder, None)
self.assertEqual(obj[2].meeting_name, 'Full-day meeting')
self.assertEqual(obj[2].meeting_manager, ['pingou'])
self.assertEqual(obj[2].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[2].meeting_information, 'This is a full day meeting')
self.assertEqual(obj[2].reminder, None)
obj = model.Meeting.get_by_date(
self.session, cal, week_start, week_stop, no_recursive=True)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 2)
self.assertEqual(obj[0].meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(obj[0].meeting_manager, ['pingou', 'shaiton'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information, 'This is a test meeting')
self.assertEqual(obj[0].reminder, None)
self.assertEqual(obj[1].meeting_name, 'Full-day meeting')
self.assertEqual(obj[1].meeting_manager, ['pingou'])
self.assertEqual(obj[1].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[1].meeting_information, 'This is a full day meeting')
self.assertEqual(obj[1].reminder, None)
week_stop = week_day + timedelta(days=12)
obj = model.Meeting.get_by_date(
self.session, cal, week_start, week_stop)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 8)
def test_get_by_date_and_region(self):
""" Test the query of a list of meetings between two dates. """
self.test_init_meeting()
week_day = TODAY
week_start = week_day - timedelta(days=week_day.weekday())
week_stop = week_day + timedelta(days=2)
cal = model.Calendar.by_id(self.session, 'test_calendar4')
obj = model.Meeting.get_by_date_and_location(
self.session, cal, week_start, week_stop, 'EMEA')
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 1)
self.assertEqual(obj[0].meeting_name, 'test-meeting-st-2')
self.assertEqual(obj[0].meeting_manager, ['test'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar4')
self.assertEqual(
obj[0].meeting_information,
'This is a second test meeting at the same time')
self.assertEqual(obj[0].reminder, None)
obj = model.Meeting.get_by_date_and_location(
self.session, cal, week_start, week_stop, 'NA')
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 1)
self.assertEqual(obj[0].meeting_name, 'test-meeting-st-1')
self.assertEqual(obj[0].meeting_manager, ['test'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar4')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting at the same time')
self.assertEqual(obj[0].reminder, None)
obj = model.Meeting.get_by_date_and_location(
self.session, cal, week_start, week_stop, 'APAC')
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 0)
def test_get_past_meeting_of_user(self):
""" Test the Meeting get_past_meeting_of_user function. """
self.test_init_meeting()
meetings = model.Meeting.get_past_meeting_of_user(
self.session, 'pingou', TODAY + timedelta(days=1))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 3)
self.assertEqual(
meetings[0].meeting_name, 'Another past test meeting')
self.assertEqual(
meetings[1].meeting_name, 'Another test meeting2')
self.assertEqual(
meetings[2].meeting_name, 'Fedora-fr-test-meeting')
meetings = model.Meeting.get_past_meeting_of_user(
self.session, 'shaiton', TODAY + timedelta(days=1))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 1)
self.assertEqual(
meetings[0].meeting_name, 'Fedora-fr-test-meeting')
# pylint: disable=C0103
def test_get_past_meeting_of_user_fail(self):
""" Test the Meeting get_past_meeting_of_user function when
the user does not exists or there is nothing on that day. """
self.test_init_meeting()
meetings = model.Meeting.get_past_meeting_of_user(
self.session, 'fakeuser', TODAY + timedelta(days=1))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_past_meeting_of_user(
self.session, 'fakeuser', TODAY - timedelta(days=1))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
def test_get_regular_meeting_at_date(self):
""" Test the get_regular_meeting_at_date function. """
self.test_init_meeting()
cal = model.Calendar.by_id(self.session, 'test_calendar')
obj = model.Meeting.get_regular_meeting_at_date(
self.session, cal, TODAY + timedelta(days=19))
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 1)
self.assertEqual(
obj[0].meeting_name,
'Test meeting with reminder and recursion')
self.assertEqual(obj[0].meeting_manager, ['pingou'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting with recursion and reminder')
self.assertNotEqual(obj[0].reminder, None)
obj = model.Meeting.get_regular_meeting_at_date(
self.session, cal, TODAY + timedelta(days=19), full_day=True)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 0)
self.assertEqual(obj, [])
obj = model.Meeting.get_regular_meeting_at_date(
self.session, cal, TODAY + timedelta(days=19), full_day=False)
self.assertNotEqual(obj, None)
self.assertEqual(len(obj), 1)
self.assertEqual(
obj[0].meeting_name,
'Test meeting with reminder and recursion')
self.assertEqual(obj[0].meeting_manager, ['pingou'])
self.assertEqual(obj[0].calendar.calendar_name, 'test_calendar')
self.assertEqual(
obj[0].meeting_information,
'This is a test meeting with recursion and reminder')
self.assertNotEqual(obj[0].reminder, None)
# pylint: disable=C0103
def test_get_future_single_meeting_of_user(self):
""" Test the Meeting get_future_single_meeting_of_user function.
"""
self.test_init_meeting()
meetings = model.Meeting.get_future_single_meeting_of_user(
self.session, 'pingou', TODAY)
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 4)
self.assertEqual(
meetings[0].meeting_name, 'Fedora-fr-test-meeting')
self.assertEqual(
meetings[1].meeting_name, 'Full-day meeting')
self.assertEqual(
meetings[2].meeting_name, 'test-meeting2')
self.assertEqual(
meetings[3].meeting_name, 'Test meeting with reminder')
# pylint: disable=C0103
def test_get_future_single_meeting_of_user_fail(self):
""" Test the Meeting get_future_single_meeting_of_user function
when there is no such user or the date has simply nothing
"""
self.test_init_meeting()
meetings = model.Meeting.get_future_single_meeting_of_user(
self.session, 'faikeuser', TODAY)
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_future_single_meeting_of_user(
self.session, 'pingou', TODAY + timedelta(days=100))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
# pylint: disable=C0103
def test_get_future_regular_meeting_of_user(self):
""" Test the Meeting get_future_regular_meeting_of_user function.
"""
self.test_init_meeting()
meetings = model.Meeting.get_future_regular_meeting_of_user(
self.session, 'pingou', TODAY)
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 5)
self.assertEqual(
meetings[0].meeting_name, 'Another past test meeting')
self.assertEqual(
meetings[1].meeting_name, 'Another test meeting2')
self.assertEqual(
meetings[2].meeting_name, 'Full-day meeting with recursion')
self.assertEqual(
meetings[3].meeting_name, 'Another test meeting')
self.assertEqual(
meetings[4].meeting_name,
'Test meeting with reminder and recursion')
# pylint: disable=C0103
def test_get_future_regular_meeting_of_user_fail(self):
""" Test the Meeting get_future_regular_meeting_of_user function
when the user does not exist or the date has simply nothing
"""
self.test_init_meeting()
meetings = model.Meeting.get_future_regular_meeting_of_user(
self.session, 'fakeuser', TODAY)
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_future_regular_meeting_of_user(
self.session, 'pingou', TODAY + timedelta(days=100))
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
# pylint: disable=C0103
def test_get_meeting_with_reminder(self):
""" Test the Meeting get_meeting_with_reminder function. """
self.test_init_meeting()
for delta in [0, 7, 14, 21]:
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=12 + delta),
time(10, 00), time(10, 30), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 1)
self.assertEqual(
meetings[0].meeting_name,
'Test meeting with reminder and recursion')
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=11),
time(11, 00), time(11, 30), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 1)
self.assertEqual(
meetings[0].meeting_name, 'Test meeting with reminder')
def test_get_meeting_with_reminder_fail(self):
""" Test the Meeting get_meeting_with_reminder function
when the offset is invalid or the time_start or the day have
nothing.
"""
self.test_init_meeting()
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=11),
time(11, 00), time(11, 30), 'H-168')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=11),
time(9, 00), time(9, 30), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=100),
time(11, 00), time(11, 30), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=11),
time(10, 30), time(11, 00), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=11),
time(11, 30), time(12, 00), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
for delta in [0, 7, 14, 21]:
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=12 + delta),
time(9, 30), time(10, 00), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
meetings = model.Meeting.get_meeting_with_reminder(
self.session, TODAY + timedelta(days=12 + delta),
time(10, 30), time(11, 00), 'H-12')
self.assertNotEqual(meetings, None)
self.assertEqual(len(meetings), 0)
self.assertEqual(meetings, [])
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Meetingtests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
fedora-infra/fedocal
|
tests/test_meeting.py
|
Python
|
gpl-3.0
| 33,466
|
# -*- coding: utf-8 -*-
""" Exception classes
"""
class DontwiNotImplementedError(NotImplementedError):
"""Raised when not implemented method was called.
"""
pass
class DontwiConfigError(AttributeError):
"""Raised when reading or parsing error occurred.
Attributes:
message -- explanation of why the specific transition is not allowed
"""
def __init__(self, message):
self.message = message
class DontwiMediaError(IOError):
"""Raised when reading or processing error occurred.
"""
pass
class StatusTextError(AttributeError):
"""Raised when processing error occurred.
"""
def __init__(self, message):
self.message = message
|
vocalodon/dontwi
|
dontwi/exception.py
|
Python
|
gpl-3.0
| 706
|
# coding: utf-8
#
# Copyright 2014 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import base64
from StringIO import StringIO
from PIL import Image
from pypln.backend.workers import WordCloud
from utils import TaskTest
class TestFreqDistWorker(TaskTest):
name = "WordCloud"
def test_wordcloud_should_return_a_base64_encoded_png(self):
self.document['freqdist'] = [('is', 2), ('the', 2), ('blue', 1), ('sun', 1),
('sky', 1), (',', 1), ('yellow', 1), ('.', 1)]
self.document['language'] = 'en'
WordCloud().delay(self.fake_id).get()
raw_png_data = base64.b64decode(self.document['wordcloud'])
fake_file = StringIO(raw_png_data)
img = Image.open(fake_file)
img.verify()
self.assertEqual(img.format, 'PNG')
|
fccoelho/pypln.backend
|
tests/test_worker_wordcloud.py
|
Python
|
gpl-3.0
| 1,468
|
# sample code for the Sieve of Eratosthenes
def sieve(limit):
composites = []
primes = []
for i in range(2,limit+1): # Iterate from 2 up to the limit
if i not in composites: # check if number is not already marked
primes.append(i) # add the unmarked number to the prime list!
for j in range(i*i,limit+1,i): # starting at i squared, find multiples
composites.append(j) # mark these numbers as non-prime
composites = composites[composites.index(i):]
return primes
if __name__ == "__main__": # if you run this file, it'll print the sieve.
print(sieve(100)) # if you import it, this line is not executed.
|
willybh11/python
|
Euler/primesSieve.py
|
Python
|
gpl-3.0
| 637
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para piratestreaming
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
# ------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
__channel__ = "italiaserie"
__category__ = "S,A"
__type__ = "generic"
__title__ = "italiaserie"
__language__ = "IT"
DEBUG = config.get_setting("debug")
host = "http://www.italiaserie.com"
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.filmpertutti mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Tutte Le Serie Tv[/COLOR]",
action="peliculas",
url=host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/New%20TV%20Shows.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - Top 10[/COLOR]",
action="peliculas2",
url="%s/top-10/" % host,
thumbnail="http://i.imgur.com/cnnUCXh.png"),
Item(channel=__channel__,
title="[COLOR azure]Sezione Cartoni Animati - Anime[/COLOR]",
action="peliculas",
url="%s/genere/anime-e-cartoni/" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def peliculas(item):
logger.info("streamondemand.italiaserie peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"[^>]+>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
# scrapedplot = ""
html = scrapertools.cache_page(scrapedurl)
start = html.find("<div class=\"entry-content\">")
end = html.find("</p>", start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Extrae el paginador
patronvideos = '<a class="next page-numbers" href="([^"]+)">Avanti »</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def HomePage(item):
import xbmc
xbmc.executebuiltin("ReplaceWindow(10024,plugin://plugin.video.streamondemand-pureita-master)")
def peliculas2(item):
logger.info("streamondemand.italiaserie peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<h3><a href="([^"]+)">(.*?)</a></h3>.*?<img.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("Streaming", "")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if scrapedtitle.startswith("Link to "):
scrapedtitle = scrapedtitle[8:]
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
return itemlist
def anime(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '</h2><ul><li><strong>Categoria:</strong>(.*?)</ul></li><li>'
data = scrapertools.find_single_match(data, patron)
patron = '<li><a href="([^"]+)" title="([^"]+)">.*?</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="animelink",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
return itemlist
def search(item, texto):
logger.info("[italiaserie.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
|
orione7/Italorione
|
channels/italiaserie.py
|
Python
|
gpl-3.0
| 6,895
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
import frappe.defaults
from frappe.utils import flt, nowdate, nowtime
from erpnext.stock.doctype.serial_no.serial_no import *
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt \
import set_perpetual_inventory
from erpnext.stock.doctype.stock_ledger_entry.stock_ledger_entry import StockFreezeError
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.stock.doctype.stock_reconciliation.test_stock_reconciliation import create_stock_reconciliation
from frappe.tests.test_permissions import set_user_permission_doctypes
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
def get_sle(**args):
condition, values = "", []
for key, value in args.iteritems():
condition += " and " if condition else " where "
condition += "`{0}`=%s".format(key)
values.append(value)
return frappe.db.sql("""select * from `tabStock Ledger Entry` %s
order by timestamp(posting_date, posting_time) desc, name desc limit 1"""% condition,
values, as_dict=1)
class TestStockEntry(unittest.TestCase):
def tearDown(self):
frappe.set_user("Administrator")
set_perpetual_inventory(0)
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctype="Stock Entry", role=role,
apply_user_permissions=0, user_permission_doctypes=None)
def test_fifo(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
item_code = "_Test Item 2"
warehouse = "_Test Warehouse - _TC"
create_stock_reconciliation(item_code="_Test Item 2", warehouse="_Test Warehouse - _TC",
qty=0, rate=100)
make_stock_entry(item_code=item_code, target=warehouse, qty=1, basic_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 10]], eval(sle.stock_queue))
# negative qty
make_stock_entry(item_code=item_code, source=warehouse, qty=2, basic_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-1, 10]], eval(sle.stock_queue))
# further negative
make_stock_entry(item_code=item_code, source=warehouse, qty=1)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-2, 10]], eval(sle.stock_queue))
# move stock to positive
make_stock_entry(item_code=item_code, target=warehouse, qty=3, basic_rate=20)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20]], eval(sle.stock_queue))
# incoming entry with diff rate
make_stock_entry(item_code=item_code, target=warehouse, qty=1, basic_rate=30)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20],[1, 30]], eval(sle.stock_queue))
frappe.db.set_default("allow_negative_stock", 0)
def test_auto_material_request(self):
from erpnext.stock.doctype.item.test_item import make_item_variant
make_item_variant()
self._test_auto_material_request("_Test Item")
self._test_auto_material_request("_Test Item", material_request_type="Transfer")
def test_auto_material_request_for_variant(self):
self._test_auto_material_request("_Test Variant Item-S")
def test_auto_material_request_for_warehouse_group(self):
self._test_auto_material_request("_Test Item Warehouse Group Wise Reorder", warehouse="_Test Warehouse Group-C1 - _TC")
def _test_auto_material_request(self, item_code, material_request_type="Purchase", warehouse="_Test Warehouse - _TC"):
item = frappe.get_doc("Item", item_code)
if item.variant_of:
template = frappe.get_doc("Item", item.variant_of)
else:
template = item
projected_qty, actual_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": warehouse}, ["projected_qty", "actual_qty"]) or [0, 0]
# stock entry reqd for auto-reorder
create_stock_reconciliation(item_code=item_code, warehouse=warehouse,
qty = actual_qty + abs(projected_qty) + 10, rate=100)
projected_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": warehouse}, "projected_qty") or 0
frappe.db.set_value("Stock Settings", None, "auto_indent", 1)
# update re-level qty so that it is more than projected_qty
if projected_qty >= template.reorder_levels[0].warehouse_reorder_level:
template.reorder_levels[0].warehouse_reorder_level += projected_qty
template.reorder_levels[0].material_request_type = material_request_type
template.save()
from erpnext.stock.reorder_item import reorder_item
mr_list = reorder_item()
frappe.db.set_value("Stock Settings", None, "auto_indent", 0)
items = []
for mr in mr_list:
for d in mr.items:
items.append(d.item_code)
self.assertTrue(item_code in items)
def test_material_receipt_gl_entry(self):
set_perpetual_inventory()
mr = make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, basic_rate=100, expense_account="Stock Adjustment - _TC")
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Stock",
"warehouse": mr.get("items")[0].t_warehouse})
self.check_stock_ledger_entries("Stock Entry", mr.name,
[["_Test Item", "_Test Warehouse - _TC", 50.0]])
self.check_gl_entries("Stock Entry", mr.name,
sorted([
[stock_in_hand_account, 5000.0, 0.0],
["Stock Adjustment - _TC", 0.0, 5000.0]
])
)
mr.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
def test_material_issue_gl_entry(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, basic_rate=100, expense_account="Stock Adjustment - _TC")
mi = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC",
qty=40, expense_account="Stock Adjustment - _TC")
self.check_stock_ledger_entries("Stock Entry", mi.name,
[["_Test Item", "_Test Warehouse - _TC", -40.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Stock",
"warehouse": "_Test Warehouse - _TC"})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mi.name}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mi.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
["Stock Adjustment - _TC", stock_value_diff, 0.0]
])
)
mi.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
self.assertFalse(frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
def test_material_transfer_gl_entry(self):
set_perpetual_inventory()
create_stock_reconciliation(qty=100, rate=100)
mtn = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC",
target="_Test Warehouse 1 - _TC", qty=45)
self.check_stock_ledger_entries("Stock Entry", mtn.name,
[["_Test Item", "_Test Warehouse - _TC", -45.0], ["_Test Item", "_Test Warehouse 1 - _TC", 45.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Stock",
"warehouse": mtn.get("items")[0].s_warehouse})
fixed_asset_account = frappe.db.get_value("Account", {"account_type": "Stock",
"warehouse": mtn.get("items")[0].t_warehouse})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mtn.name, "warehouse": "_Test Warehouse - _TC"}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mtn.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
[fixed_asset_account, stock_value_diff, 0.0],
])
)
mtn.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
def test_repack_no_change_in_valuation(self):
set_perpetual_inventory(0)
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC",
qty=50, basic_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.insert()
repack.submit()
self.check_stock_ledger_entries("Stock Entry", repack.name,
[["_Test Item", "_Test Warehouse - _TC", -50.0],
["_Test Item Home Desktop 100", "_Test Warehouse - _TC", 1]])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Stock Entry' and voucher_no=%s
order by account desc""", repack.name, as_dict=1)
self.assertFalse(gl_entries)
set_perpetual_inventory(0)
def test_repack_with_additional_costs(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.set("additional_costs", [
{
"description": "Actual Oerating Cost",
"amount": 1000
},
{
"description": "additional operating costs",
"amount": 200
},
])
repack.insert()
repack.submit()
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Stock",
"warehouse": repack.get("items")[1].t_warehouse})
rm_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item"}, "stock_value_difference"))
fg_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item Home Desktop 100"}, "stock_value_difference"))
stock_value_diff = flt(fg_stock_value_diff - rm_stock_value_diff, 2)
self.assertEqual(stock_value_diff, 1200)
self.check_gl_entries("Stock Entry", repack.name,
sorted([
[stock_in_hand_account, 1200, 0.0],
["Expenses Included In Valuation - _TC", 0.0, 1200.0]
])
)
set_perpetual_inventory(0)
def check_stock_ledger_entries(self, voucher_type, voucher_no, expected_sle):
expected_sle.sort(key=lambda x: x[0])
# check stock ledger entries
sle = frappe.db.sql("""select item_code, warehouse, actual_qty
from `tabStock Ledger Entry` where voucher_type = %s
and voucher_no = %s order by item_code, warehouse, actual_qty""",
(voucher_type, voucher_no), as_list=1)
self.assertTrue(sle)
sle.sort(key=lambda x: x[0])
for i, sle in enumerate(sle):
self.assertEquals(expected_sle[i][0], sle[0])
self.assertEquals(expected_sle[i][1], sle[1])
self.assertEquals(expected_sle[i][2], sle[2])
def check_gl_entries(self, voucher_type, voucher_no, expected_gl_entries):
expected_gl_entries.sort(key=lambda x: x[0])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account asc, debit asc""", (voucher_type, voucher_no), as_list=1)
self.assertTrue(gl_entries)
gl_entries.sort(key=lambda x: x[0])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_gl_entries[i][0], gle[0])
self.assertEquals(expected_gl_entries[i][1], gle[1])
self.assertEquals(expected_gl_entries[i][2], gle[2])
def test_serial_no_not_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].serial_no = "ABCD"
se.insert()
self.assertRaises(SerialNoNotRequiredError, se.submit)
def test_serial_no_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoRequiredError, se.submit)
def test_serial_no_qty_more(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH\nXYZ"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_qty_less(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_transfer_in(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
se.submit()
self.assertTrue(frappe.db.exists("Serial No", "ABCD"))
self.assertTrue(frappe.db.exists("Serial No", "EFGH"))
se.cancel()
self.assertFalse(frappe.db.get_value("Serial No", "ABCD", "warehouse"))
def test_serial_no_not_exists(self):
frappe.db.sql("delete from `tabSerial No` where name in ('ABCD', 'EFGH')")
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Issue"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 2
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = None
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoNotExistsError, se.submit)
def test_serial_duplicate(self):
se, serial_nos = self.test_serial_by_series()
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].transfer_qty = 1
se.insert()
self.assertRaises(SerialNoDuplicateError, se.submit)
def test_serial_by_series(self):
se = make_serialized_item()
serial_nos = get_serial_nos(se.get("items")[0].serial_no)
self.assertTrue(frappe.db.exists("Serial No", serial_nos[0]))
self.assertTrue(frappe.db.exists("Serial No", serial_nos[1]))
return se, serial_nos
def test_serial_item_error(self):
se, serial_nos = self.test_serial_by_series()
make_serialized_item("_Test Serialized Item", "ABCD\nEFGH")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
self.assertRaises(SerialNoItemError, se.submit)
def test_serial_move(self):
se = make_serialized_item()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_no
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
se.submit()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse 1 - _TC")
se.cancel()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse - _TC")
def test_serial_warehouse_error(self):
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
t = make_serialized_item()
serial_nos = get_serial_nos(t.get("items")[0].serial_no)
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse - _TC"
se.insert()
self.assertRaises(SerialNoWarehouseError, se.submit)
def test_serial_cancel(self):
se, serial_nos = self.test_serial_by_series()
se.cancel()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
self.assertFalse(frappe.db.get_value("Serial No", serial_no, "warehouse"))
def test_warehouse_company_validation(self):
set_perpetual_inventory(0)
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test2@example.com")
from erpnext.stock.utils import InvalidWarehouseCompany
st1 = frappe.copy_doc(test_records[0])
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
self.assertRaises(InvalidWarehouseCompany, st1.submit)
# permission tests
def test_warehouse_user(self):
set_perpetual_inventory(0)
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctype="Stock Entry", role=role,
apply_user_permissions=1, user_permission_doctypes=["Warehouse"])
frappe.defaults.add_default("Warehouse", "_Test Warehouse 1 - _TC", "test@example.com", "User Permission")
frappe.defaults.add_default("Warehouse", "_Test Warehouse 2 - _TC1", "test2@example.com", "User Permission")
test_user = frappe.get_doc("User", "test@example.com")
test_user.add_roles("Sales User", "Sales Manager", "Stock User")
test_user.remove_roles("Stock Manager")
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
self.assertRaises(frappe.PermissionError, st1.insert)
frappe.set_user("test2@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
st1.submit()
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 1 - _TC",
"test@example.com", parenttype="User Permission")
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 2 - _TC1",
"test2@example.com", parenttype="User Permission")
def test_freeze_stocks(self):
frappe.db.set_value('Stock Settings', None,'stock_auth_role', '')
# test freeze_stocks_upto
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", add_days(nowdate(), 5))
se = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", '')
# test freeze_stocks_upto_days
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 7)
se = frappe.copy_doc(test_records[0])
se.set_posting_time = 1
se.posting_date = add_days(nowdate(), -15)
se.insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 0)
def test_production_order(self):
from erpnext.manufacturing.doctype.production_order.production_order \
import make_stock_entry as _make_stock_entry
bom_no, bom_operation_cost = frappe.db.get_value("BOM", {"item": "_Test FG Item 2",
"is_default": 1, "docstatus": 1}, ["name", "operating_cost"])
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test FG Item 2",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC",
"additional_operating_cost": 1000
})
production_order.insert()
production_order.submit()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
stock_entry = _make_stock_entry(production_order.name, "Manufacture", 1)
rm_cost = 0
for d in stock_entry.get("items"):
if d.s_warehouse:
rm_cost += flt(d.amount)
fg_cost = filter(lambda x: x.item_code=="_Test FG Item 2", stock_entry.get("items"))[0].amount
self.assertEqual(fg_cost,
flt(rm_cost + bom_operation_cost + production_order.additional_operating_cost, 2))
def test_variant_production_order(self):
bom_no = frappe.db.get_value("BOM", {"item": "_Test Variant Item",
"is_default": 1, "docstatus": 1})
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test Variant Item-S",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC"
})
production_order.insert()
production_order.submit()
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
stock_entry = frappe.get_doc(make_stock_entry(production_order.name, "Manufacture", 1))
stock_entry.insert()
self.assertTrue("_Test Variant Item-S" in [d.item_code for d in stock_entry.items])
def test_same_serial_nos_in_repack_or_manufacture_entries(self):
s1 = make_serialized_item(target_warehouse="_Test Warehouse - _TC")
serial_nos = s1.get("items")[0].serial_no
s2 = make_stock_entry(item_code="_Test Serialized Item With Series", source="_Test Warehouse - _TC",
qty=2, basic_rate=100, purpose="Repack", serial_no=serial_nos, do_not_save=True)
s2.append("items", {
"item_code": "_Test Serialized Item",
"t_warehouse": "_Test Warehouse - _TC",
"qty": 2,
"basic_rate": 120,
"expense_account": "Stock Adjustment - _TC",
"conversion_factor": 1.0,
"cost_center": "_Test Cost Center - _TC",
"serial_no": serial_nos
})
s2.submit()
s2.cancel()
def make_serialized_item(item_code=None, serial_no=None, target_warehouse=None):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = item_code or "_Test Serialized Item With Series"
se.get("items")[0].serial_no = serial_no
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
if target_warehouse:
se.get("items")[0].t_warehouse = target_warehouse
se.insert()
se.submit()
return se
def get_qty_after_transaction(**args):
args = frappe._dict(args)
last_sle = get_previous_sle({
"item_code": args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"posting_date": args.posting_date or nowdate(),
"posting_time": args.posting_time or nowtime()
})
return flt(last_sle.get("qty_after_transaction"))
test_records = frappe.get_test_records('Stock Entry')
|
KanchanChauhan/erpnext
|
erpnext/stock/doctype/stock_entry/test_stock_entry.py
|
Python
|
gpl-3.0
| 23,016
|
# -*- coding: utf-8 -*-
"""
Qt4 TS file parser for Python
"""
from __future__ import absolute_import
import re, collections
import time
import xml.dom.minidom
import xml.parsers.expat
from xml.sax.saxutils import escape as xml_escape
from django.db import transaction
from django.db.models import get_model
from django.utils.translation import ugettext, ugettext_lazy as _
from transifex.txcommon.log import logger
from transifex.txcommon.exceptions import FileCheckError
from transifex.resources.formats.core import ParseError, CompileError, \
Handler, STRICT
from .compilation import PluralCompiler, SimpleCompilerFactory
from transifex.resources.formats.resource_collections import StringSet, \
GenericTranslation
from suggestions.models import Suggestion
from transifex.resources.formats.utils.decorators import *
from transifex.resources.formats.utils.hash_tag import hash_tag,\
escape_context, hash_regex
# Resources models
Resource = get_model('resources', 'Resource')
Translation = get_model('resources', 'Translation')
SourceEntity = get_model('resources', 'SourceEntity')
class LinguistParseError(ParseError):
pass
class LinguistCompileError(CompileError):
pass
def _context_of_message(message):
"""Get the context value of a message node."""
context_node = message.parentNode
context_name_element = _getElementByTagName(context_node, "name")
if context_name_element.firstChild:
if context_name_element.firstChild.nodeValue:
context_name = escape_context(
[context_name_element.firstChild.nodeValue])
else:
context_name = []
else:
context_name = []
try:
c_node = _getElementByTagName(message, "comment")
comment_text = _getText(c_node.childNodes)
if comment_text:
comment = escape_context([comment_text])
else:
comment = []
except LinguistParseError, e:
comment = []
return (context_name + comment) or "None"
def _getElementByTagName(element, tagName, noneAllowed = False):
elements = element.getElementsByTagName(tagName)
if not noneAllowed and not elements:
raise LinguistParseError(_("Element '%s' not found!" % tagName))
if len(elements) > 1:
raise LinguistParseError(_("Multiple '%s' elements found!" % tagName))
return elements[0]
def _get_attribute(element, key, die = False):
if element.attributes.has_key(key):
return element.attributes[key].value
elif die:
raise LinguistParseError("Could not find attribute '%s' "\
"for element '%s'" % (key, element.tagName))
else:
return None
def _getText(nodelist):
rc = []
for node in nodelist:
if hasattr(node, 'data'):
rc.append(node.data)
else:
rc.append(node.toxml())
return ''.join(rc)
class QtCompiler(PluralCompiler):
"""Compiler for Qt resources."""
def _update_plural_hashes(self, translations, content):
"""Add plurals hashes"""
language = self.language
doc = xml.dom.minidom.parseString(content.encode('utf-8'))
root = doc.documentElement
root.attributes["language"] = language.code
md5_pattern = r'[0-9a-f]{32}'
plural_pattern = r'(?P<md5>%s)_pl_\d' % md5_pattern
plural_regex = re.compile(plural_pattern, re.IGNORECASE)
for message in doc.getElementsByTagName("message"):
translation = _getElementByTagName(message, "translation")
if message.attributes.has_key("numerus") and \
message.attributes['numerus'].value=='yes':
source = _getElementByTagName(message, "source")
numerusforms = message.getElementsByTagName('numerusform')
# If we have an id for the message use this as the source
# string, otherwise use the actual source string
if message.attributes.has_key("id"):
sourceString = message.attributes['id'].value
else:
sourceString = _getText(source.childNodes)
#update plural hashes for target language
plural_keys = {}
lang_rules = language.get_pluralrules_numbers()
m = plural_regex.search(numerusforms[0].firstChild.data)
if m:
translation.childNodes = []
string_hash = m.group('md5')
# Initialize all plural rules up to the las
for p,n in enumerate(lang_rules):
plural_keys[p] = "%s_pl_%d" % (string_hash, p)
message.setAttribute('numerus', 'yes')
for key in plural_keys.iterkeys():
e = doc.createElement("numerusform")
e.appendChild(
doc.createTextNode(plural_keys[key])
)
translation.appendChild(e)
if not translations[plural_keys[key]]:
translation.attributes['type'] = 'unfinished'
else:
if not translation.childNodes:
# Translation elemnent should have only one tag, the text
translation.attributes['type'] = 'unfinished'
elif not translations.get(translation.childNodes[0].nodeValue, None):
translation.attributes['type'] = 'unfinished'
translation.childNodes = []
return doc.toxml()
def _post_compile(self):
super(QtCompiler, self)._post_compile(self.compiled_template)
esc_template_text = re.sub("'(?=(?:(?!>).)*<\/source>)",
r"'", self.compiled_template)
esc_template_text = re.sub("'(?=(?:(?!>).)*<\/translation>)",
r"'", esc_template_text)
self.compiled_template = esc_template_text
class LinguistHandler(SimpleCompilerFactory, Handler):
name = "Qt4 TS parser"
format = "Qt4 Translation XML files (*.ts)"
method_name = 'QT'
HandlerParseError = LinguistParseError
HandlerCompileError = LinguistCompileError
CompilerClass = QtCompiler
def _escape(self, s):
return xml_escape(s, {"'": "'", '"': '"'})
def _parse(self, is_source, lang_rules):
"""
Parses Qt file and exports all entries as GenericTranslations.
"""
def clj(s, w):
return s[:w].replace("\n", " ").ljust(w)
if lang_rules:
nplural = len(lang_rules)
else:
nplural = self.language.get_pluralrules_numbers()
try:
doc = xml.dom.minidom.parseString(
self.content.encode(self.format_encoding)
)
except Exception, e:
logger.warning("QT parsing: %s" % e.message, exc_info=True)
raise LinguistParseError(_(
"Your file doesn't seem to contain valid xml: %s!" % e.message
))
if hasattr(doc, 'doctype') and hasattr(doc.doctype, 'name'):
if doc.doctype.name != "TS":
raise LinguistParseError(_("Incorrect doctype!"))
else:
raise LinguistParseError(_("Uploaded file has no Doctype!"))
root = doc.documentElement
if root.tagName != "TS":
raise LinguistParseError(_("Root element is not 'TS'"))
# This needed to be commented out due the 'is_source' parameter.
# When is_source=True we return the value of the <source> node as the
# translation for the given file, instead of the <translation> node(s).
#stringset.target_language = language
#language = get_attribute(root, "language", die = STRICT)
i = 1
# There can be many <message> elements, they might have
# 'encoding' or 'numerus' = 'yes' | 'no' attributes
# if 'numerus' = 'yes' then 'translation' element contains 'numerusform' elements
for context in root.getElementsByTagName("context"):
context_name_element = _getElementByTagName(context, "name")
if context_name_element.firstChild:
if context_name_element.firstChild.nodeValue:
context_name = escape_context(
[context_name_element.firstChild.nodeValue])
else:
context_name = []
else:
context_name = []
for message in context.getElementsByTagName("message"):
occurrences = []
# NB! There can be zero to many <location> elements, but all
# of them must have 'filename' and 'line' attributes
for location in message.getElementsByTagName("location"):
if location.attributes.has_key("filename") and \
location.attributes.has_key("line"):
occurrences.append("%s:%i" % (
location.attributes["filename"].value,
int(location.attributes["line"].value)))
elif STRICT:
raise LinguistParseError(_("Malformed 'location' element"))
pluralized = False
if message.attributes.has_key("numerus") and \
message.attributes['numerus'].value=='yes':
pluralized = True
source = _getElementByTagName(message, "source")
try:
translation = _getElementByTagName(message, "translation")
except LinguistParseError:
translation = None
try:
ec_node = _getElementByTagName(message, "extracomment")
extracomment = _getText(ec_node.childNodes)
except LinguistParseError, e:
extracomment = None
# <commend> in ts files are also used to distinguish entries,
# so we append it to the context to make the entry unique
try:
c_node = _getElementByTagName(message, "comment")
comment_text = _getText(c_node.childNodes)
if comment_text:
comment = escape_context([comment_text])
else:
comment = []
except LinguistParseError, e:
comment = []
status = None
if source.firstChild:
sourceString = _getText(source.childNodes)
else:
sourceString = None # WTF?
# Check whether the message is using logical id
if message.attributes.has_key("id"):
sourceStringText = sourceString
sourceString = message.attributes['id'].value
else:
sourceStringText = None
same_nplural = True
obsolete, fuzzy = False, False
messages = []
if is_source:
if translation and translation.attributes.has_key("variants") and \
translation.attributes['variants'].value == 'yes':
logger.error("Source file has unsupported"
" variants.")
raise LinguistParseError(_("Qt Linguist variants are"
" not yet supported."))
# Skip obsolete strings.
if translation and translation.attributes.has_key("type"):
status = translation.attributes["type"].value.lower()
if status == "obsolete":
continue
translation_text = None
if translation:
translation_text = _getText(translation.childNodes)
messages = [(5, translation_text or sourceStringText or sourceString)]
# remove unfinished/obsolete attrs from template
if translation and translation.attributes.has_key("type"):
status = translation.attributes["type"].value.lower()
if status == "unfinished":
del translation.attributes["type"]
if pluralized:
if translation:
try:
numerusforms = translation.getElementsByTagName('numerusform')
messages = []
for n,f in enumerate(numerusforms):
if numerusforms[n].attributes.has_key("variants") and \
numerusforms[n].attributes['variants'].value == 'yes':
logger.error("Source file has unsupported"
" variants.")
raise LinguistParseError(_("Source file"
" could not be imported: Qt Linguist"
" variants are not supported."))
for n,f in enumerate(numerusforms):
if numerusforms[n].attributes.has_key("variants") and \
numerusforms[n].attributes['variants'].value == 'yes':
continue
for n,f in enumerate(numerusforms):
nf=numerusforms[n]
messages.append((nplural[n], _getText(nf.childNodes)
or sourceStringText or sourceString ))
except LinguistParseError, e:
pass
else:
plural_numbers = self.language.get_pluralrules_numbers()
for p in plural_numbers:
if p != 5:
messages.append((p, sourceStringText or sourceString))
elif translation and translation.firstChild:
# For messages with variants set to 'yes', we skip them
# altogether. We can't support variants at the momment...
if translation.attributes.has_key("variants") and \
translation.attributes['variants'].value == 'yes':
continue
# Skip obsolete strings.
if translation.attributes.has_key("type"):
status = translation.attributes["type"].value.lower()
if status == "obsolete":
continue
if translation.attributes.has_key("type"):
status = translation.attributes["type"].value.lower()
if status == "unfinished" and\
not pluralized:
suggestion = GenericTranslation(sourceString,
_getText(translation.childNodes),
context=context_name + comment,
occurrences= ";".join(occurrences))
self.suggestions.add(suggestion)
else:
logger.error("Element 'translation' attribute "\
"'type' is neither 'unfinished' nor 'obsolete'")
continue
if not pluralized:
messages = [(5, _getText(translation.childNodes))]
else:
numerusforms = translation.getElementsByTagName('numerusform')
try:
for n,f in enumerate(numerusforms):
if numerusforms[n].attributes.has_key("variants") and \
numerusforms[n].attributes['variants'].value == 'yes':
raise StopIteration
except StopIteration:
continue
if nplural:
nplural_file = len(numerusforms)
if len(nplural) != nplural_file:
logger.error("Passed plural rules has nplurals=%s"
", but '%s' file has nplurals=%s. String '%s'"
"skipped." % (nplural, self.filename,
nplural_file, sourceString))
same_nplural = False
else:
same_nplural = False
if not same_nplural:
# If we're missing plurals, skip them altogether
continue
for n,f in enumerate(numerusforms):
nf=numerusforms[n]
if nf.firstChild:
messages.append((nplural[n], _getText(nf.childNodes)))
# NB! If <translation> doesn't have type attribute, it means that string is finished
if sourceString and messages:
for msg in messages:
self._add_translation_string(
sourceString, msg[1],
context = context_name + comment, rule=msg[0],
occurrences = ";".join(occurrences),
pluralized=pluralized, fuzzy=fuzzy,
comment=extracomment, obsolete=obsolete)
i += 1
if is_source:
if sourceString is None:
continue
if message.attributes.has_key("numerus") and \
message.attributes['numerus'].value=='yes' and translation:
numerusforms = translation.getElementsByTagName('numerusform')
for n,f in enumerate(numerusforms):
f.appendChild(doc.createTextNode(
"%(hash)s_pl_%(key)s" %
{
'hash': hash_tag(sourceString,
context_name + comment),
'key': n
}
))
else:
if not translation:
translation = doc.createElement("translation")
# Delete all child nodes. This is usefull for xml like
# strings (eg html) where the translation text is split
# in multiple nodes.
translation.childNodes = []
translation.appendChild(doc.createTextNode(
("%(hash)s_tr" % {'hash': hash_tag(
sourceString, context_name + comment)})
))
return doc
def _generate_template(self, doc):
# Ugly fix to revert single quotes back to the escaped version
template_text = doc.toxml().encode('utf-8')
esc_template_text = re.sub(
"'(?=(?:(?!>).)*<\/source>)",
r"'", template_text
)
return esc_template_text
|
tymofij/adofex
|
transifex/resources/formats/qt.py
|
Python
|
gpl-3.0
| 19,864
|
# Authors: Rich Megginson <richm@redhat.com>
# Rob Crittenden <rcritten@redhat.com>
# John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import os.path
import socket
import ldif
import re
import string
import ldap
import cStringIO
import time
import struct
import ldap.sasl
import ldapurl
from ldap.controls import LDAPControl
from ldap.ldapobject import SimpleLDAPObject
from ipapython.ipa_log_manager import *
from ipapython import ipautil
from ipalib import errors
from ipapython.ipautil import format_netloc, wait_for_open_socket, wait_for_open_ports
from ipapython.dn import DN
from ipapython.entity import Entity
from ipaserver.plugins.ldap2 import IPASimpleLDAPObject
# Global variable to define SASL auth
SASL_AUTH = ldap.sasl.sasl({},'GSSAPI')
DEFAULT_TIMEOUT = 10
class IPAEntryLDAPObject(IPASimpleLDAPObject):
def __init__(self, *args, **kwds):
IPASimpleLDAPObject.__init__(self, *args, **kwds)
def result(self, msgid=ldap.RES_ANY, all=1, timeout=None):
objtype, data = IPASimpleLDAPObject.result(self, msgid, all, timeout)
# data is either a 2-tuple or a list of 2-tuples
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
return objtype, [Entry(x) for x in data]
else:
raise TypeError, "unknown data type %s returned by result" % type(data)
else:
return objtype, data
def add(self, dn, modlist):
if isinstance(dn, Entry):
return IPASimpleLDAPObject.add(self, dn.dn, dn.toTupleList())
else:
return IPASimpleLDAPObject.add(self, dn, modlist)
def add_s(self, dn, modlist):
if isinstance(dn, Entry):
return IPASimpleLDAPObject.add_s(self, dn.dn, dn.toTupleList())
else:
return IPASimpleLDAPObject.add_s(self, dn, modlist)
def add_ext(self, dn, modlist, serverctrls=None, clientctrls=None):
if isinstance(dn, Entry):
return IPASimpleLDAPObject.add_ext(self, dn.dn, dn.toTupleList(), serverctrls, clientctrls)
else:
return IPASimpleLDAPObject.add_ext(self, dn, modlist, serverctrls, clientctrls)
def add_ext_s(self, dn, modlist, serverctrls=None, clientctrls=None):
if isinstance(dn, Entry):
return IPASimpleLDAPObject.add_ext_s(self, dn.dn, dn.toTupleList(), serverctrls, clientctrls)
else:
return IPASimpleLDAPObject.add_ext_s(self, dn, modlist, serverctrls, clientctrls)
class Entry:
"""
This class represents an LDAP Entry object. An LDAP entry consists of
a DN and a list of attributes. Each attribute consists of a name and
a list of values. In python-ldap, entries are returned as a list of
2-tuples. Instance variables:
* dn - DN object - the DN of the entry
* data - CIDict - case insensitive dict of the attributes and values
"""
def __init__(self,entrydata):
"""data is the raw data returned from the python-ldap result method, which is
a search result entry or a reference or None.
If creating a new empty entry, data is the string DN."""
if entrydata:
if isinstance(entrydata,tuple):
self.dn = entrydata[0]
self.data = ipautil.CIDict(entrydata[1])
elif isinstance(entrydata,DN):
self.dn = entrydata
self.data = ipautil.CIDict()
elif isinstance(entrydata, basestring):
self.dn = DN(entrydata)
self.data = ipautil.CIDict()
else:
raise TypeError("entrydata must be 2-tuple, DN, or basestring, got %s" % type(entrydata))
else:
self.dn = DN()
self.data = ipautil.CIDict()
assert isinstance(self.dn, DN)
dn = ipautil.dn_attribute_property('_dn')
def __nonzero__(self):
"""This allows us to do tests like if entry: returns false if there is no data,
true otherwise"""
return self.data != None and len(self.data) > 0
def hasAttr(self,name):
"""Return True if this entry has an attribute named name, False otherwise"""
return self.data and self.data.has_key(name)
def getValues(self,name):
"""Get the list (array) of values for the attribute named name"""
return self.data.get(name)
def getValue(self,name, default=None):
"""Get the first value for the attribute named name"""
value = self.data.get(name, default)
if isinstance(value, (list, tuple)):
return value[0]
return value
def setValue(self, name, *value):
"""
Set a value on this entry.
The value passed in may be a single value, several values, or a
single sequence. For example:
* ent.setValue('name', 'value')
* ent.setValue('name', 'value1', 'value2', ..., 'valueN')
* ent.setValue('name', ['value1', 'value2', ..., 'valueN'])
* ent.setValue('name', ('value1', 'value2', ..., 'valueN'))
Since value is a tuple, we may have to extract a list or tuple from
that tuple as in the last two examples above.
"""
if isinstance(value[0],list) or isinstance(value[0],tuple):
self.data[name] = value[0]
else:
self.data[name] = value
setValues = setValue
def delAttr(self, name):
"""
Entirely remove an attribute of this entry.
"""
if self.hasAttr(name):
del self.data[name]
def toTupleList(self):
"""Convert the attrs and values to a list of 2-tuples. The first element
of the tuple is the attribute name. The second element is either a
single value or a list of values."""
r = []
for i in self.data.iteritems():
n = ipautil.utf8_encode_values(i[1])
r.append((i[0], n))
return r
def toDict(self):
"""Convert the attrs and values to a dict. The dict is keyed on the
attribute name. The value is either single value or a list of values."""
assert isinstance(self.dn, DN)
result = ipautil.CIDict(self.data)
for i in result.keys():
result[i] = ipautil.utf8_encode_values(result[i])
result['dn'] = self.dn
return result
def __str__(self):
"""Convert the Entry to its LDIF representation"""
return self.__repr__()
# the ldif class base64 encodes some attrs which I would rather see in
# raw form - to encode specific attrs as base64, add them to the list below
ldif.safe_string_re = re.compile('^$')
base64_attrs = ['nsstate', 'krbprincipalkey', 'krbExtraData']
def __repr__(self):
"""Convert the Entry to its LDIF representation"""
sio = cStringIO.StringIO()
# what's all this then? the unparse method will currently only accept
# a list or a dict, not a class derived from them. self.data is a
# cidict, so unparse barfs on it. I've filed a bug against python-ldap,
# but in the meantime, we have to convert to a plain old dict for
# printing
# I also don't want to see wrapping, so set the line width really high
# (1000)
newdata = {}
newdata.update(self.data)
ldif.LDIFWriter(sio,Entry.base64_attrs,1000).unparse(str(self.dn),newdata)
return sio.getvalue()
class IPAdmin(IPAEntryLDAPObject):
def __localinit(self):
if self.protocol == 'ldaps':
IPAEntryLDAPObject.__init__(self,'ldaps://%s' % format_netloc(self.host, self.port))
elif self.protocol == 'ldapi':
IPAEntryLDAPObject.__init__(self,'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % "-".join(self.realm.split(".")))
elif self.protocol == 'ldap':
IPAEntryLDAPObject.__init__(self,'ldap://%s' % format_netloc(self.host, self.port))
else:
raise ValueError('Protocol %r not supported' % self.protocol)
def __guess_protocol(self):
"""Return the protocol to use based on flags passed to the constructor
Only used when "protocol" is not specified explicitly.
If a CA certificate is provided then it is assumed that we are
doing SSL client authentication with proxy auth.
If a CA certificate is not present then it is assumed that we are
using a forwarded kerberos ticket for SASL auth. SASL provides
its own encryption.
"""
if self.cacert is not None:
return 'ldaps'
elif self.ldapi:
return 'ldapi'
else:
return 'ldap'
def __init__(self, host='', port=389, cacert=None, bindcert=None,
bindkey=None, proxydn=None, debug=None, ldapi=False,
realm=None, protocol=None):
"""We just set our instance variables and wrap the methods - the real
work is done in __localinit. This is separated out this way so
that we can call it from places other than instance creation
e.g. when we just need to reconnect
"""
log_mgr.get_logger(self, True)
if debug and debug.lower() == "on":
ldap.set_option(ldap.OPT_DEBUG_LEVEL,255)
if cacert is not None:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE,cacert)
if bindcert is not None:
ldap.set_option(ldap.OPT_X_TLS_CERTFILE,bindcert)
if bindkey is not None:
ldap.set_option(ldap.OPT_X_TLS_KEYFILE,bindkey)
self.port = port
self.host = host
self.cacert = cacert
self.bindcert = bindcert
self.bindkey = bindkey
self.proxydn = proxydn
self.ldapi = ldapi
self.realm = realm
self.suffixes = {}
self.protocol = protocol or self.__guess_protocol()
self.__localinit()
def __lateinit(self):
"""
This is executed after the connection is bound to fill in some useful
values.
"""
try:
ent = self.getEntry(DN(('cn', 'config'), ('cn', 'ldbm database'), ('cn', 'plugins'), ('cn', 'config')),
ldap.SCOPE_BASE, '(objectclass=*)',
[ 'nsslapd-directory' ])
self.dbdir = os.path.dirname(ent.getValue('nsslapd-directory'))
except ldap.LDAPError, e:
self.__handle_errors(e)
def __str__(self):
return self.host + ":" + str(self.port)
def __get_server_controls(self):
"""Create the proxy user server control. The control has the form
0x04 = Octet String
4|0x80 sets the length of the string length field at 4 bytes
the struct() gets us the length in bytes of string self.proxydn
self.proxydn is the proxy dn to send"""
if self.proxydn is not None:
proxydn = chr(0x04) + chr(4|0x80) + struct.pack('l', socket.htonl(len(self.proxydn))) + self.proxydn;
# Create the proxy control
sctrl=[]
sctrl.append(LDAPControl('2.16.840.1.113730.3.4.18',True,proxydn))
else:
sctrl=None
return sctrl
def __handle_errors(self, e, **kw):
"""
Centralize error handling in one place.
e is the error to be raised
**kw is an exception-specific list of options
"""
if not isinstance(e,ldap.TIMEOUT):
desc = e.args[0]['desc'].strip()
info = e.args[0].get('info','').strip()
arg_desc = kw.get('arg_desc')
if arg_desc is not None:
info += " arguments: %s" % arg_desc
else:
desc = ''
info = ''
try:
# re-raise the error so we can handle it
raise e
except ldap.NO_SUCH_OBJECT, e:
arg_desc = kw.get('arg_desc', "entry not found")
raise errors.NotFound(reason=arg_desc)
except ldap.ALREADY_EXISTS, e:
raise errors.DuplicateEntry()
except ldap.CONSTRAINT_VIOLATION, e:
# This error gets thrown by the uniqueness plugin
if info.startswith('Another entry with the same attribute value already exists'):
raise errors.DuplicateEntry()
else:
raise errors.DatabaseError(desc=desc,info=info)
except ldap.INSUFFICIENT_ACCESS, e:
raise errors.ACIError(info=info)
except ldap.NO_SUCH_ATTRIBUTE:
# this is raised when a 'delete' attribute isn't found.
# it indicates the previous attribute was removed by another
# update, making the oldentry stale.
raise errors.MidairCollision()
except ldap.ADMINLIMIT_EXCEEDED, e:
raise errors.LimitsExceeded()
except ldap.SIZELIMIT_EXCEEDED, e:
raise errors.LimitsExceeded()
except ldap.TIMELIMIT_EXCEEDED, e:
raise errors.LimitsExceeded()
except ldap.LDAPError, e:
raise errors.DatabaseError(desc=desc,info=info)
def __wait_for_connection(self, timeout):
lurl = ldapurl.LDAPUrl(self.uri)
if lurl.urlscheme == 'ldapi':
wait_for_open_socket(lurl.hostport, timeout)
else:
(host,port) = lurl.hostport.split(':')
wait_for_open_ports(host, int(port), timeout)
def __bind_with_wait(self, bind_func, timeout, *args, **kwargs):
try:
bind_func(*args, **kwargs)
except (ldap.CONNECT_ERROR, ldap.SERVER_DOWN), e:
if not timeout or 'TLS' in e.args[0].get('info', ''):
# No connection to continue on if we have a TLS failure
# https://bugzilla.redhat.com/show_bug.cgi?id=784989
raise e
try:
self.__wait_for_connection(timeout)
except:
raise e
bind_func(*args, **kwargs)
def toLDAPURL(self):
return "ldap://%s/" % format_netloc(self.host, self.port)
def set_proxydn(self, proxydn):
self.proxydn = proxydn
def set_krbccache(self, krbccache, principal):
try:
if krbccache is not None:
os.environ["KRB5CCNAME"] = krbccache
self.sasl_interactive_bind_s(None, SASL_AUTH)
self.principal = principal
self.proxydn = None
except ldap.LDAPError, e:
self.__handle_errors(e)
def do_simple_bind(self, binddn=DN(('cn', 'directory manager')), bindpw="", timeout=DEFAULT_TIMEOUT):
self.binddn = binddn # FIXME, self.binddn & self.bindpwd never referenced.
self.bindpwd = bindpw
self.__bind_with_wait(self.simple_bind_s, timeout, binddn, bindpw)
self.__lateinit()
def do_sasl_gssapi_bind(self, timeout=DEFAULT_TIMEOUT):
self.__bind_with_wait(self.sasl_interactive_bind_s, timeout, None, SASL_AUTH)
self.__lateinit()
def do_external_bind(self, user_name=None, timeout=DEFAULT_TIMEOUT):
auth_tokens = ldap.sasl.external(user_name)
self.__bind_with_wait(self.sasl_interactive_bind_s, timeout, None, auth_tokens)
self.__lateinit()
def getEntry(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
"""This wraps the search function. It is common to just get one entry"""
sctrl = self.__get_server_controls()
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
try:
res = self.search(base, scope, filterstr, attrlist, attrsonly)
objtype, obj = self.result(res)
except ldap.LDAPError, e:
arg_desc = 'base="%s", scope=%s, filterstr="%s"' % (base, scope, filterstr)
self.__handle_errors(e, arg_desc=arg_desc)
if not obj:
arg_desc = 'base="%s", scope=%s, filterstr="%s"' % (base, scope, filterstr)
raise errors.NotFound(reason=arg_desc)
elif isinstance(obj,Entry):
return obj
else: # assume list/tuple
return obj[0]
def getList(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
"""This wraps the search function to find multiple entries."""
sctrl = self.__get_server_controls()
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
try:
res = self.search(base, scope, filterstr, attrlist, attrsonly)
objtype, obj = self.result(res)
except ldap.LDAPError, e:
arg_desc = 'base="%s", scope=%s, filterstr="%s"' % (base, scope, filterstr)
self.__handle_errors(e, arg_desc=arg_desc)
if not obj:
arg_desc = 'base="%s", scope=%s, filterstr="%s"' % (base, scope, filterstr)
raise errors.NotFound(reason=arg_desc)
entries = []
for s in obj:
entries.append(s)
return entries
def getListAsync(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0):
"""This version performs an asynchronous search, to allow
results even if we hit a limit.
It returns a list: counter followed by the results.
If the results are truncated, counter will be set to -1.
"""
sctrl = self.__get_server_controls()
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
entries = []
partial = 0
try:
msgid = self.search_ext(base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
objtype, result_list = self.result(msgid, 0)
while result_list:
for result in result_list:
entries.append(result)
objtype, result_list = self.result(msgid, 0)
except (ldap.ADMINLIMIT_EXCEEDED, ldap.SIZELIMIT_EXCEEDED,
ldap.TIMELIMIT_EXCEEDED), e:
partial = 1
except ldap.LDAPError, e:
arg_desc = 'base="%s", scope=%s, filterstr="%s", timeout=%s, sizelimit=%s' % \
(base, scope, filterstr, timeout, sizelimit)
self.__handle_errors(e, arg_desc=arg_desc)
if not entries:
arg_desc = 'base="%s", scope=%s, filterstr="%s"' % (base, scope, filterstr)
raise errors.NotFound(reason=arg_desc)
if partial == 1:
counter = -1
else:
counter = len(entries)
return [counter] + entries
def addEntry(self, entry):
"""This wraps the add function. It assumes that the entry is already
populated with all of the desired objectclasses and attributes"""
if not isinstance(entry, (Entry, Entity)):
raise TypeError('addEntry expected an Entry or Entity object, got %s instead' % entry.__class__)
sctrl = self.__get_server_controls()
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.add_s(entry.dn, entry.toTupleList())
except ldap.LDAPError, e:
arg_desc = 'entry=%s: %s' % (entry.dn, entry.toTupleList())
self.__handle_errors(e, arg_desc=arg_desc)
return True
def updateRDN(self, dn, newrdn):
"""Wrap the modrdn function."""
assert isinstance(dn, DN)
assert isinstance(newrdn, DN)
sctrl = self.__get_server_controls()
if dn == newrdn:
# no need to report an error
return True
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.modrdn_s(dn, newrdn, delold=1)
except ldap.LDAPError, e:
self.__handle_errors(e)
return True
def updateEntry(self,dn,oldentry,newentry):
"""This wraps the mod function. It assumes that the entry is already
populated with all of the desired objectclasses and attributes"""
assert isinstance(dn, DN)
sctrl = self.__get_server_controls()
modlist = self.generateModList(oldentry, newentry)
if len(modlist) == 0:
raise errors.EmptyModlist
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.modify_s(dn, modlist)
except ldap.LDAPError, e:
self.__handle_errors(e)
return True
def generateModList(self, old_entry, new_entry):
"""A mod list generator that computes more precise modification lists
than the python-ldap version. For single-value attributes always
use a REPLACE operation, otherwise use ADD/DEL.
"""
# Some attributes, like those in cn=config, need to be replaced
# not deleted/added.
FORCE_REPLACE_ON_UPDATE_ATTRS = ('nsslapd-ssl-check-hostname', 'nsslapd-lookthroughlimit', 'nsslapd-idlistscanlimit', 'nsslapd-anonlimitsdn', 'nsslapd-minssf-exclude-rootdse')
modlist = []
old_entry = ipautil.CIDict(old_entry)
new_entry = ipautil.CIDict(new_entry)
keys = set(map(string.lower, old_entry.keys()))
keys.update(map(string.lower, new_entry.keys()))
for key in keys:
new_values = new_entry.get(key, [])
if not(isinstance(new_values,list) or isinstance(new_values,tuple)):
new_values = [new_values]
new_values = filter(lambda value:value!=None, new_values)
old_values = old_entry.get(key, [])
if not(isinstance(old_values,list) or isinstance(old_values,tuple)):
old_values = [old_values]
old_values = filter(lambda value:value!=None, old_values)
# We used to convert to sets and use difference to calculate
# the changes but this did not preserve order which is important
# particularly for schema
adds = [x for x in new_values if x not in old_values]
removes = [x for x in old_values if x not in new_values]
if len(adds) == 0 and len(removes) == 0:
continue
is_single_value = self.get_single_value(key)
force_replace = False
if key in FORCE_REPLACE_ON_UPDATE_ATTRS or is_single_value:
force_replace = True
# You can't remove schema online. An add will automatically
# replace any existing schema.
if old_entry.get('dn', DN()) == DN(('cn', 'schema')):
if len(adds) > 0:
modlist.append((ldap.MOD_ADD, key, adds))
else:
if adds:
if force_replace:
modlist.append((ldap.MOD_REPLACE, key, adds))
else:
modlist.append((ldap.MOD_ADD, key, adds))
if removes:
if not force_replace:
modlist.append((ldap.MOD_DELETE, key, removes))
return modlist
def inactivateEntry(self,dn, has_key):
"""Rather than deleting entries we mark them as inactive.
has_key defines whether the entry already has nsAccountlock
set so we can determine which type of mod operation to run."""
assert isinstance(dn, DN)
sctrl = self.__get_server_controls()
modlist=[]
if has_key:
operation = ldap.MOD_REPLACE
else:
operation = ldap.MOD_ADD
modlist.append((operation, "nsAccountlock", "TRUE"))
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.modify_s(dn, modlist)
except ldap.LDAPError, e:
self.__handle_errors(e)
return True
def deleteEntry(self, dn):
"""This wraps the delete function. Use with caution."""
assert isinstance(dn, DN)
sctrl = self.__get_server_controls()
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.delete_s(dn)
except ldap.LDAPError, e:
arg_desc = 'dn=%s' % (dn)
self.__handle_errors(e, arg_desc=arg_desc)
return True
def modifyPassword(self, dn, oldpass, newpass):
"""Set the user password using RFC 3062, LDAP Password Modify Extended
Operation. This ends up calling the IPA password slapi plugin
handler so the Kerberos password gets set properly.
oldpass is not mandatory
"""
assert isinstance(dn, DN)
sctrl = self.__get_server_controls()
try:
if sctrl is not None:
self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl)
self.passwd_s(dn, oldpass, newpass)
except ldap.LDAPError, e:
self.__handle_errors(e)
return True
def waitForEntry(self, dn, timeout=7200, attr='', quiet=True):
scope = ldap.SCOPE_BASE
filter = "(objectclass=*)"
attrlist = []
if attr:
filter = "(%s=*)" % attr
attrlist.append(attr)
timeout += int(time.time())
if isinstance(dn,Entry):
dn = dn.dn
assert isinstance(dn, DN)
# wait for entry and/or attr to show up
if not quiet:
sys.stdout.write("Waiting for %s %s:%s " % (self,dn,attr))
sys.stdout.flush()
entry = None
while not entry and int(time.time()) < timeout:
try:
entry = self.getEntry(dn, scope, filter, attrlist)
except ldap.NO_SUCH_OBJECT:
pass # no entry yet
except ldap.LDAPError, e: # badness
print "\nError reading entry", dn, e
break
if not entry:
if not quiet:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
if not entry and int(time.time()) > timeout:
print "\nwaitForEntry timeout for %s for %s" % (self,dn)
elif entry and not quiet:
print "\nThe waited for entry is:", entry
elif not entry:
print "\nError: could not read entry %s from %s" % (dn,self)
return entry
def checkTask(self, dn, dowait=False, verbose=False):
"""check task status - task is complete when the nsTaskExitCode attr
is set return a 2 tuple (true/false,code) first is false if task is
running, true if done - if true, second is the exit code - if dowait
is True, this function will block until the task is complete
"""
assert isinstance(dn, DN)
attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', 'nsTaskCurrentItem', 'nsTaskTotalItems']
done = False
exitCode = 0
while not done:
try:
entry = self.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist)
except errors.NotFound:
break
if verbose:
print entry
if entry.getValue('nsTaskExitCode'):
exitCode = int(entry.getValue('nsTaskExitCode'))
done = True
if dowait: time.sleep(1)
else: break
return (done, exitCode)
def get_single_value(self, attr):
"""
Check the schema to see if the attribute is single-valued.
If the attribute is in the schema then returns True/False
If there is a problem loading the schema or the attribute is
not in the schema return None
"""
obj = self.schema.get_obj(ldap.schema.AttributeType, attr)
return obj and obj.single_value
def get_dns_sorted_by_length(self, entries, reverse=False):
"""
Sorts a list of entries [(dn, entry_attrs)] based on their DN.
Entries within the same node are not sorted in any meaningful way.
If Reverse is set to True, leaf entries are returned first. This is
useful to perform recursive deletes where you need to delete entries
starting from the leafs and go up to delete nodes only when all its
leafs are removed.
Returns a list of list of dn's. Every dn in the dn list has
the same number of RDN's. The outer list is sorted according
to the number of RDN's in each inner list.
Example:
[['cn=bob', cn=tom], ['cn=bob,ou=people', cn=tom,ou=people]]
dn's in list[0] have 1 RDN
dn's in list[1] have 2 RDN's
"""
res = dict()
for e in entries:
dn = e.dn
assert isinstance(dn, DN)
rdn_count = len(dn)
rdn_count_list = res.setdefault(rdn_count, [])
if dn not in rdn_count_list:
rdn_count_list.append(dn)
keys = res.keys()
keys.sort(reverse=reverse)
return map(res.get, keys)
|
hatchetation/freeipa
|
ipaserver/ipaldap.py
|
Python
|
gpl-3.0
| 29,998
|
import sys
sys.path.append('flsk_app/malayalam_lesk/classes/')
sys.path.append('classes/')
import mysqldaccess as _m_acc
class mysqldbwordnet:
def __init__(self,HOST=None,USER=None,PASSWORD=None,DB=None):
self._ml_access_obj = _m_acc.DbAccess()
def getIdFromSense(self,sense):
qr = "SELECT word_id FROM sense_table WHERE sense ='"+sense+"'"
rows =self._ml_access_obj.selectDB(qr,"ERROR ON ID SELECTION")
return rows[0]
def getDefinitions(self,word):
qr = "SELECT sense FROM sense_table WHERE word ='"+word+"'"
rows=self._ml_access_obj.selectDB(qr,"ERROR ON SELECTION !@#")
return rows
|
omrehman/padam
|
flsk_app/malayalam_lesk/MalayalamWordnet.py
|
Python
|
gpl-3.0
| 692
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Custom Logger for SickRage
"""
from __future__ import unicode_literals
import io
import locale
import logging
import logging.handlers
import os
import platform
import re
import sys
import threading
import traceback
from logging import NullHandler
from urllib import quote
from github import InputFileContent
import sickbeard
from sickbeard import classes
from sickrage.helper.common import dateTimeFormat
from sickrage.helper.encoding import ek, ss
from sickrage.helper.exceptions import ex
# pylint: disable=line-too-long
# log levels
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
DB = 5
LOGGING_LEVELS = {
'ERROR': ERROR,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'DB': DB,
}
censored_items = {} # pylint: disable=invalid-name
class CensoredFormatter(logging.Formatter, object):
"""
Censor information such as API keys, user names, and passwords from the Log
"""
def __init__(self, fmt=None, datefmt=None, encoding='utf-8'):
super(CensoredFormatter, self).__init__(fmt, datefmt)
self.encoding = encoding
def format(self, record):
"""
Strips censored items from string
:param record: to censor
"""
msg = super(CensoredFormatter, self).format(record)
if not isinstance(msg, unicode):
msg = msg.decode(self.encoding, 'replace') # Convert to unicode
# set of censored items
censored = {item for _, item in censored_items.iteritems() if item}
# set of censored items and urlencoded counterparts
censored = censored | {quote(item) for item in censored}
# convert set items to unicode and typecast to list
censored = list({
item.decode(self.encoding, 'replace')
if not isinstance(item, unicode) else item
for item in censored
})
# sort the list in order of descending length so that entire item is censored
# e.g. password and password_1 both get censored instead of getting ********_1
censored.sort(key=len, reverse=True)
for item in censored:
msg = msg.replace(item, len(item) * '*')
# Needed because Newznab apikey isn't stored as key=value in a section.
msg = re.sub(r'([&?]r|[&?]apikey|[&?]api_key)(?:=|%3D)[^&]*([&\w]?)', r'\1=**********\2', msg, re.I)
return msg
class Logger(object): # pylint: disable=too-many-instance-attributes
"""
Logger to create log entries
"""
def __init__(self):
self.logger = logging.getLogger('sickrage')
self.loggers = [
logging.getLogger('sickrage'),
logging.getLogger('tornado.general'),
logging.getLogger('tornado.application'),
# logging.getLogger('subliminal'),
# logging.getLogger('tornado.access'),
# logging.getLogger('tvdb_api'),
# logging.getLogger("requests.packages.urllib3")
]
self.console_logging = False
self.file_logging = False
self.debug_logging = False
self.database_logging = False
self.log_file = None
self.submitter_running = False
def init_logging(self, console_logging=False, file_logging=False, debug_logging=False, database_logging=False):
"""
Initialize logging
:param console_logging: True if logging to console
:param file_logging: True if logging to file
:param debug_logging: True if debug logging is enabled
:param database_logging: True if logging database access
"""
self.log_file = self.log_file or ek(os.path.join, sickbeard.LOG_DIR, 'sickrage.log')
self.debug_logging = debug_logging
self.console_logging = console_logging
self.file_logging = file_logging
self.database_logging = database_logging
logging.addLevelName(DB, 'DB') # add a new logging level DB
logging.getLogger().addHandler(NullHandler()) # nullify root logger
# set custom root logger
for logger in self.loggers:
if logger is not self.logger:
logger.root = self.logger
logger.parent = self.logger
log_level = DB if self.database_logging else DEBUG if self.debug_logging else INFO
# set minimum logging level allowed for loggers
for logger in self.loggers:
logger.setLevel(log_level)
logging.getLogger("tornado.general").setLevel('ERROR')
# console log handler
if self.console_logging:
console = logging.StreamHandler()
console.setFormatter(CensoredFormatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
console.setLevel(log_level)
for logger in self.loggers:
logger.addHandler(console)
# rotating log file handler
if self.file_logging:
rfh = logging.handlers.RotatingFileHandler(self.log_file, maxBytes=int(sickbeard.LOG_SIZE * 1048576), backupCount=sickbeard.LOG_NR, encoding='utf-8')
rfh.setFormatter(CensoredFormatter('%(asctime)s %(levelname)-8s %(message)s', dateTimeFormat))
rfh.setLevel(log_level)
for logger in self.loggers:
logger.addHandler(rfh)
def set_level(self):
self.debug_logging = sickbeard.DEBUG
self.database_logging = sickbeard.DBDEBUG
level = DB if self.database_logging else DEBUG if self.debug_logging else INFO
for logger in self.loggers:
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
@staticmethod
def shutdown():
"""
Shut down the logger
"""
logging.shutdown()
def log(self, msg, level=INFO, *args, **kwargs):
"""
Create log entry
:param msg: to log
:param level: of log, e.g. DEBUG, INFO, etc.
:param args: to pass to logger
:param kwargs: to pass to logger
"""
cur_thread = threading.currentThread().getName()
cur_hash = ''
if level == ERROR and sickbeard.CUR_COMMIT_HASH and len(sickbeard.CUR_COMMIT_HASH) > 6:
cur_hash = '[{0}] '.format(
sickbeard.CUR_COMMIT_HASH[:7]
)
message = '{thread} :: {hash}{message}'.format(
thread=cur_thread, hash=cur_hash, message=msg)
# Change the SSL error to a warning with a link to information about how to fix it.
# Check for u'error [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:590)'
ssl_errors = [
r'error \[Errno \d+\] _ssl.c:\d+: error:\d+\s*:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert internal error',
r'error \[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE\] sslv3 alert handshake failure \(_ssl\.c:\d+\)',
]
for ssl_error in ssl_errors:
check = re.sub(ssl_error, 'See: http://git.io/vuU5V', message)
if check != message:
message = check
level = WARNING
if level == ERROR:
classes.ErrorViewer.add(classes.UIError(message))
elif level == WARNING:
classes.WarningViewer.add(classes.UIError(message))
if level == ERROR:
self.logger.exception(message, *args, **kwargs)
else:
self.logger.log(level, message, *args, **kwargs)
def log_error_and_exit(self, error_msg, *args, **kwargs):
self.log(error_msg, ERROR, *args, **kwargs)
if not self.console_logging:
sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
else:
sys.exit(1)
def submit_errors(self): # pylint: disable=too-many-branches,too-many-locals
submitter_result = ''
issue_id = None
if not all((sickbeard.GIT_USERNAME, sickbeard.GIT_PASSWORD, sickbeard.DEBUG, sickbeard.gh, classes.ErrorViewer.errors)):
submitter_result = 'Please set your GitHub username and password in the config and enable debug. Unable to submit issue ticket to GitHub!'
return submitter_result, issue_id
try:
from sickbeard.versionChecker import CheckVersion
checkversion = CheckVersion()
checkversion.check_for_new_version()
commits_behind = checkversion.updater.get_num_commits_behind()
except Exception: # pylint: disable=broad-except
submitter_result = 'Could not check if your SickRage is updated, unable to submit issue ticket to GitHub!'
return submitter_result, issue_id
if commits_behind is None or commits_behind > 0:
submitter_result = 'Please update SickRage, unable to submit issue ticket to GitHub with an outdated version!'
return submitter_result, issue_id
if self.submitter_running:
submitter_result = 'Issue submitter is running, please wait for it to complete'
return submitter_result, issue_id
self.submitter_running = True
try:
# read log file
log_data = None
if ek(os.path.isfile, self.log_file):
with io.open(self.log_file, encoding='utf-8') as log_f:
log_data = log_f.readlines()
for i in range(1, int(sickbeard.LOG_NR)):
f_name = '{0}.{1:d}'.format(self.log_file, i)
if ek(os.path.isfile, f_name) and (len(log_data) <= 500):
with io.open(f_name, encoding='utf-8') as log_f:
log_data += log_f.readlines()
log_data = [line for line in reversed(log_data)]
# parse and submit errors to issue tracker
for cur_error in sorted(classes.ErrorViewer.errors, key=lambda error: error.time, reverse=True)[:500]:
try:
title_error = ss(str(cur_error.title))
if not title_error or title_error == 'None':
title_error = re.match(r'^[A-Z0-9\-\[\] :]+::\s*(.*)(?: \[[\w]{7}\])$', ss(cur_error.message)).group(1)
if len(title_error) > 1000:
title_error = title_error[0:1000]
except Exception as err_msg: # pylint: disable=broad-except
self.log('Unable to get error title : {0}'.format(ex(err_msg)), ERROR)
title_error = 'UNKNOWN'
gist = None
regex = r'^({0})\s+([A-Z]+)\s+([0-9A-Z\-]+)\s*(.*)(?: \[[\w]{{7}}\])$'.format(cur_error.time)
for i, data in enumerate(log_data):
match = re.match(regex, data)
if match:
level = match.group(2)
if LOGGING_LEVELS[level] == ERROR:
paste_data = ''.join(log_data[i:i + 50])
if paste_data:
gist = sickbeard.gh.get_user().create_gist(False, {'sickrage.log': InputFileContent(paste_data)})
break
else:
gist = 'No ERROR found'
try:
locale_name = locale.getdefaultlocale()[1]
except Exception: # pylint: disable=broad-except
locale_name = 'unknown'
if gist and gist != 'No ERROR found':
log_link = 'Link to Log: {0}'.format(gist.html_url)
else:
log_link = 'No Log available with ERRORS:'
msg = [
'### INFO',
'Python Version: **{0}**'.format(sys.version[:120].replace('\n', '')),
'Operating System: **{0}**'.format(platform.platform()),
'Locale: {0}'.format(locale_name),
'Branch: **{0}**'.format(sickbeard.BRANCH),
'Commit: SickRage/SickRage@{0}'.format(sickbeard.CUR_COMMIT_HASH),
log_link,
'### ERROR',
'```',
cur_error.message,
'```',
'---',
'_STAFF NOTIFIED_: @SickRage/owners @SickRage/moderators',
]
message = '\n'.join(msg)
title_error = '[APP SUBMITTED]: {0}'.format(title_error)
repo = sickbeard.gh.get_organization(sickbeard.GIT_ORG).get_repo(sickbeard.GIT_REPO)
reports = repo.get_issues(state='all')
def is_ascii_error(title):
# [APP SUBMITTED]: 'ascii' codec can't encode characters in position 00-00: ordinal not in range(128)
# [APP SUBMITTED]: 'charmap' codec can't decode byte 0x00 in position 00: character maps to <undefined>
return re.search(r'.* codec can\'t .*code .* in position .*:', title) is not None
def is_malformed_error(title):
# [APP SUBMITTED]: not well-formed (invalid token): line 0, column 0
return re.search(r'.* not well-formed \(invalid token\): line .* column .*', title) is not None
ascii_error = is_ascii_error(title_error)
malformed_error = is_malformed_error(title_error)
issue_found = False
for report in reports:
if title_error.rsplit(' :: ')[-1] in report.title or \
(malformed_error and is_malformed_error(report.title)) or \
(ascii_error and is_ascii_error(report.title)):
issue_id = report.number
if not report.raw_data['locked']:
if report.create_comment(message):
submitter_result = 'Commented on existing issue #{0} successfully!'.format(issue_id)
else:
submitter_result = 'Failed to comment on found issue #{0}!'.format(issue_id)
else:
submitter_result = 'Issue #{0} is locked, check GitHub to find info about the error.'.format(issue_id)
issue_found = True
break
if not issue_found:
issue = repo.create_issue(title_error, message)
if issue:
issue_id = issue.number
submitter_result = 'Your issue ticket #{0} was submitted successfully!'.format(issue_id)
else:
submitter_result = 'Failed to create a new issue!'
if issue_id and cur_error in classes.ErrorViewer.errors:
# clear error from error list
classes.ErrorViewer.errors.remove(cur_error)
except Exception: # pylint: disable=broad-except
self.log(traceback.format_exc(), ERROR)
submitter_result = 'Exception generated in issue submitter, please check the log'
issue_id = None
finally:
self.submitter_running = False
return submitter_result, issue_id
# pylint: disable=too-few-public-methods
class Wrapper(object):
instance = Logger()
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, name):
try:
return getattr(self.wrapped, name)
except AttributeError:
return getattr(self.instance, name)
_globals = sys.modules[__name__] = Wrapper(sys.modules[__name__]) # pylint: disable=invalid-name
def log(*args, **kwargs):
return Wrapper.instance.log(*args, **kwargs)
|
jackkiej/SickRage
|
sickbeard/logger.py
|
Python
|
gpl-3.0
| 16,593
|
# https://www.codewars.com/kata/tribonacci-sequence/train/python
def tribonacci(signature, n):
# Print arguments
print('signature = {}'.format(signature))
print('n = {}'.format(n))
# The final tribonacci sequence. Start with the provided signature.
# If n is less than 3, then [:n] will shorten it
tribonacci = signature[:n]
# Create the tribonacci sequence, as long as n >= 4
for i in xrange(3, n):
next = tribonacci[i-3] + tribonacci[i-2] + tribonacci[i-1]
tribonacci.append(next)
# Display the final value
print('tribonacci = {}'.format(tribonacci))
return tribonacci
|
pcampese/codewars
|
tribonacci.py
|
Python
|
gpl-3.0
| 591
|
from flask import render_template, Response
import json
from valar import app
from valar.utils import get_summaries, get_devices
@app.route('/')
def index():
return render_template("index.html",
title = 'Valar',
)
@app.route('/miner')
def get_miners():
results = []
sums = get_summaries()
devs = get_devices()
for k, v in sums.iteritems():
if k != "err":
data = dict(
[('name', k)] + \
v['STATUS'][0].items() + \
v['SUMMARY'][0].items()
)
if k in devs:
data['devices'] = devs[k]
results.append(data)
return Response(json.dumps(results), mimetype='application/json')
|
BoulderBTC/valar
|
valar/views.py
|
Python
|
gpl-3.0
| 723
|
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def index(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
第七組-40223137-黃柏學
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@+node:2015.20150408193313.1717: *3* drawspur
@cherrypy.expose
def drawspur(self):
# 印出讓使用者輸入的超文件表單
outstring = '''<form method=POST action=BEN>
M:<input type=text name="a"><br />
T:<input type=text name="b"><br />
P:<input type=text name="c"><br />
<input type=submit value=send>
</form>'''
return outstring
#@+node:2015.20150408193313.1714: *3* BEN
@cherrypy.expose
def BEN(self,a, b, c):
# 印出讓使用者輸入的超文件表單
a = float(a)
b = float(b)
c = float(c)
total = a*b/2
# return str(total)
return '''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,''' + str(total) + ',' + str(b) + ''',"blue")
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
#@+node:2015.20150427142146.1645: *3* spur
@cherrypy.expose
def spur(self):
# 印出讓使用者輸入的超文件表單
outstring = '''<form method=POST action=lit>
M:<input type=text name="a"><br />
T:<input type=text name="b"><br />
P:<input type=text name="c"><br />
<input type=submit value=send>
</form>'''
return outstring
#@+node:2015.20150427142146.1646: *3* lit
@cherrypy.expose
def lit(self,a, b, c):
# 印出讓使用者輸入的超文件表單
return '模數' + a +'齒數' + b + '壓力角' + c
#@+node:2015.20150422191956.1844: *3* three
@cherrypy.expose
def three(self):
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 20
# m 為模數
m = 20
# 第1齒輪齒數
n_g1 = 17
# 第2齒輪齒數
n_g2 = 11
# 第3齒輪齒數
n_g3 = 13
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
# 繪圖第1齒輪的圓心座標
x_g1 = 400
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150422191956.1845: *3* four
@cherrypy.expose
def four(self):
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.0-20150301-090019/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 20
# m 為模數
m = 20
# 第1齒輪齒數
n_g1 = 17
# 第2齒輪齒數
n_g2 = 11
# 第3齒輪齒數
n_g3 = 13
# 第4齒輪齒數
n_g4 = 11
# 第5齒輪齒數
n_g5 = 11
# 第6齒輪齒數
n_g6 = 13
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
rp_g4 = m*n_g4/2
rp_g5 = m*n_g5/2
rp_g6 = m*n_g6/2
# 繪圖第1齒輪的圓心座標
x_g1 = 200
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 第4齒輪的圓心座標
x_g4 = x_g1 + rp_g1 + 2*rp_g2 + 2*rp_g3+rp_g4
y_g4 = y_g1
# 第5齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g5 = x_g1
y_g5 = y_g1+rp_g1+rp_g5
# 第6齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g6 = x_g1+rp_g2+rp_g6
y_g6 = y_g1+rp_g1+rp_g5
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
# 將第4齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g4, y_g4)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g4)
# put it back
ctx.translate(-x_g4, -y_g4)
spur.Spur(ctx).Gear(x_g4, y_g4, rp_g4, n_g4, pa, "black")
ctx.restore()
# 將第5齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g5, y_g5)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g5)
# put it back
ctx.translate(-x_g5, -y_g5)
spur.Spur(ctx).Gear(x_g5, y_g5, rp_g5, n_g5, pa, "black")
ctx.restore()
# 將第6齒輪逆時鐘轉 90 度之後, 再往回轉第5齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g6, y_g6)
# rotate to engage
# pi+pi/n_g5 為第5齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪6 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g6 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g5)*n_g5/n_g6 則是第5齒原定位線為順時鐘轉動 90 度,
# 但是第5齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第5齒輪的一齒角度, 因為要帶動第6齒輪定位,
# 這個修正角度必須要再配合第2齒與第6齒的轉速比加以轉換成第6齒輪的轉角, 因此乘上 n_g5/n_g6
ctx.rotate(-pi/2-pi/n_g6+(pi+pi/n_g5)*n_g5/n_g6)
# put it back
ctx.translate(-x_g6, -y_g6)
spur.Spur(ctx).Gear(x_g6, y_g6, rp_g6, n_g6, pa, "red")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
</body>
</html>
'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(Hello(), config=application_conf)
else:
# 表示在近端執行
cherrypy.config.update({'server.socket_port': 8099})
cherrypy.quickstart(Hello(), config=application_conf)
#@-leo
|
40223137/2015cd_midterm
|
wsgi.py
|
Python
|
gpl-3.0
| 25,645
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import nfoview
from gi.repository import Gtk
class TestAboutDialog(nfoview.TestCase):
def run_dialog(self):
self.dialog.run()
def setup_method(self, method):
self.dialog = nfoview.AboutDialog(Gtk.Window())
|
otsaloma/nfoview
|
nfoview/test/test_about.py
|
Python
|
gpl-3.0
| 929
|
# -*- coding: utf-8 -*-
'''
Created on 18 Νοε 2012
@author: tedlaz
'''
from PyQt4 import QtGui,Qt
from utils import dbutils, widgets
isValSQL = '''
SELECT m12_par.id FROM m12_par
INNER JOIN m12_xrisi ON m12_xrisi.id=m12_par.xrisi_id
INNER JOIN m12_period ON m12_period.id=m12_par.period_id
WHERE m12_xrisi.xrisi='%s' AND m12_period.period='%s'
'''
insParSQL = "INSERT INTO m12_par (xrisi_id, period_id) VALUES (%s, %s)"
insPardSQL= "INSERT INTO m12_pard (par_id, pro_id, ptyp_id, pos) VALUES (%s, %s, %s, %s)"
def automaticInsert(xrisi_id,period_id,db):
from utils import variusSQL
xrisi = dbutils.getDbSingleVal("SELECT xrisi FROM m12_xrisi WHERE id='%s'" % xrisi_id, db)
period = dbutils.getDbSingleVal("SELECT period FROM m12_period WHERE id='%s'" % period_id, db)
print xrisi_id, period_id
par = dbutils.getDbOneRow(isValSQL %(xrisi,period), db)
if par:
print u'Έχουν γίνει εγγραφές για την περίοδο %s %s' % (xrisi,period)
return False
arr = dbutils.getDbRows(variusSQL.InsertParousiesSQL % (xrisi,period,xrisi,period), db)
if not arr:
print u'Δεν υπάρχουν εργαζόμενοι στην περίοδο %s %s' % (xrisi,period)
return False
for el in arr:
for c in el:
print c,
print ''
par_id = dbutils.commitToDb(insParSQL % (xrisi_id, period_id), db)
insArr = []
for el in arr:
insArr.append(dbutils.commitToDb(insPardSQL % (par_id,el[0],1,0),db))
print insArr
class dlg(QtGui.QDialog):
def __init__(self, args=None, parent=None):
super(dlg, self).__init__(parent)
self.setAttribute(Qt.Qt.WA_DeleteOnClose)
if parent:
self.db = parent.db
else:
self.db = None #'c:/ted/mis.sql3'
xrisiLabel = QtGui.QLabel(u"Χρήση:")
xrisi = widgets.DbComboBox(dbutils.getDbRows("SELECT id, xrisi FROM m12_xrisi", self.db))
xrisiLabel.setBuddy(xrisi)
perLabel = QtGui.QLabel(u"Περίοδος Παρουσιών:")
per = widgets.DbComboBox(dbutils.getDbRows("SELECT id, periodp FROM m12_period", self.db))
perLabel.setBuddy(per)
bcalc = QtGui.QPushButton(u'Υπολογισμός')
def calcmis():
if not self.db:
return
xrid = xrisi.getValue()
perid = per.getValue()
automaticInsert(xrid,perid, self.db)
self.accept()
bcalc.clicked.connect(calcmis)
glayout = QtGui.QGridLayout()
glayout.addWidget(xrisiLabel,0,0)
glayout.addWidget(xrisi,0,1)
glayout.addWidget(perLabel,1,0)
glayout.addWidget(per,1,1)
vlayout = QtGui.QVBoxLayout()
vlayout.addLayout(glayout)
vlayout.addWidget(bcalc)
self.setLayout(vlayout)
self.setWindowTitle(u'Αυτόματη εισαγωγή παρουσιών')
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
form = dlg(sys.argv)
form.show()
app.exec_()
|
tedlaz/pyted
|
misthodosia/m13/f_calcpar.py
|
Python
|
gpl-3.0
| 3,290
|
# Copyright (C) MetaCarta, Incorporated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# Port of nilsimsa-20050414.rb from Ruby to Python
#
# Ported by Michael Itz at MetaCarta
#
# Original comments from Ruby version:
# ---------------------------------------------------------
# Nilsimsa hash (build 20050414)
# Ruby port (C) 2005 Martin Pirker
# released under GNU GPL V2 license
#
# inspired by Digest::Nilsimsa-0.06 from Perl CPAN and
# the original C nilsimsa-0.2.4 implementation by cmeclax
# http://ixazon.dynip.com/~cmeclax/nilsimsa.html
# ---------------------------------------------------------
"""
Computes and compares nilsimsa codes.
A nilsimsa code is something like a hash, but unlike hashes, a small
change in the message results in a small change in the nilsimsa
code. Such a function is called a locality-sensitive hash.
Python port of ruby version that was inspired by a perl version:
http://ixazon.dynip.com/~cmeclax/nilsimsa.html
"""
# $ Id: $
# table used in computing trigram statistics
# TRAN[x] is the accumulator that should be incremented when x
# is the value observed from hashing a triplet of recently
# seen characters (done in Nilsimsa.tran3(a, b, c, n))
TRAN = [ord(x) for x in
"\x02\xD6\x9E\x6F\xF9\x1D\x04\xAB\xD0\x22\x16\x1F\xD8\x73\xA1\xAC"\
"\x3B\x70\x62\x96\x1E\x6E\x8F\x39\x9D\x05\x14\x4A\xA6\xBE\xAE\x0E"\
"\xCF\xB9\x9C\x9A\xC7\x68\x13\xE1\x2D\xA4\xEB\x51\x8D\x64\x6B\x50"\
"\x23\x80\x03\x41\xEC\xBB\x71\xCC\x7A\x86\x7F\x98\xF2\x36\x5E\xEE"\
"\x8E\xCE\x4F\xB8\x32\xB6\x5F\x59\xDC\x1B\x31\x4C\x7B\xF0\x63\x01"\
"\x6C\xBA\x07\xE8\x12\x77\x49\x3C\xDA\x46\xFE\x2F\x79\x1C\x9B\x30"\
"\xE3\x00\x06\x7E\x2E\x0F\x38\x33\x21\xAD\xA5\x54\xCA\xA7\x29\xFC"\
"\x5A\x47\x69\x7D\xC5\x95\xB5\xF4\x0B\x90\xA3\x81\x6D\x25\x55\x35"\
"\xF5\x75\x74\x0A\x26\xBF\x19\x5C\x1A\xC6\xFF\x99\x5D\x84\xAA\x66"\
"\x3E\xAF\x78\xB3\x20\x43\xC1\xED\x24\xEA\xE6\x3F\x18\xF3\xA0\x42"\
"\x57\x08\x53\x60\xC3\xC0\x83\x40\x82\xD7\x09\xBD\x44\x2A\x67\xA8"\
"\x93\xE0\xC2\x56\x9F\xD9\xDD\x85\x15\xB4\x8A\x27\x28\x92\x76\xDE"\
"\xEF\xF8\xB2\xB7\xC9\x3D\x45\x94\x4B\x11\x0D\x65\xD5\x34\x8B\x91"\
"\x0C\xFA\x87\xE9\x7C\x5B\xB1\x4D\xE5\xD4\xCB\x10\xA2\x17\x89\xBC"\
"\xDB\xB0\xE2\x97\x88\x52\xF7\x48\xD3\x61\x2C\x3A\x2B\xD1\x8C\xFB"\
"\xF1\xCD\xE4\x6A\xE7\xA9\xFD\xC4\x37\xC8\xD2\xF6\xDF\x58\x72\x4E"]
# table used in comparing bit differences between digests
# POPC[x] = <number of 1 bits in x>
# so...
# POPC[a^b] = <number of bits different between a and b>
POPC = [ord(x) for x in
"\x00\x01\x01\x02\x01\x02\x02\x03\x01\x02\x02\x03\x02\x03\x03\x04"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x04\x05\x05\x06\x05\x06\x06\x07\x05\x06\x06\x07\x06\x07\x07\x08"]
class Nilsimsa(object):
"""Nilsimsa code calculator."""
def __init__(self, data=None):
"""Nilsimsa calculator, w/optional list of initial data chunks."""
self.count = 0 # num characters seen
self.acc = [0]*256 # accumulators for computing digest
self.lastch = [-1]*4 # last four seen characters (-1 until set)
if data:
for chunk in data:
self.update(chunk)
def tran3(self, a, b, c, n):
"""Get accumulator for a transition n between chars a, b, c."""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
def update(self, data):
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars."""
for character in data:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.lastch[1] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1
if self.lastch[2] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1
if self.lastch[3] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1
self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1
# adjust last seen chars
self.lastch = [ch] + self.lastch[:3]
def digest(self):
"""Get digest of data seen thus far as a list of bytes."""
total = 0 # number of triplets seen
if self.count == 3: # 3 chars = 1 triplet
total = 1
elif self.count == 4: # 4 chars = 4 triplets
total = 4
elif self.count > 4: # otherwise 8 triplets/char less
total = 8 * self.count - 28 # 28 'missed' during 'ramp-up'
threshold = total / 256 # threshold for accumulators
code = [0]*32 # start with all zero bits
for i in range(256): # for all 256 accumulators
if self.acc[i] > threshold: # if it meets the threshold
code[i >> 3] += 1 << (i&7) # set corresponding digest bit
return code[::-1] # reverse the byte order in result
def hexdigest(self):
"""Get digest of data seen this far as a 64-char hex string."""
return ("%02x" * 32) % tuple(self.digest())
def __str__(self):
"""Show digest for convenience."""
return self.hexdigest()
def from_file(self, filename):
"""Update running digest with content of named file."""
f = open(filename, 'rb')
while True:
data = f.read(10480)
if not data:
break
self.update(data)
f.close()
def compare(self, otherdigest, ishex=False):
"""Compute difference in bits between own digest and another.
returns -127 to 128; 128 is the same, -127 is different"""
bits = 0
myd = self.digest()
if ishex:
# convert to 32-tuple of unsighed two-byte INTs
otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)])
for i in range(32):
bits += POPC[255 & myd[i] ^ otherdigest[i]]
return 128 - bits
def compare_hexdigests( digest1, digest2 ):
"""Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different"""
# convert to 32-tuple of unsighed two-byte INTs
digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)])
digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)])
bits = 0
for i in range(32):
bits += POPC[255 & digest1[i] ^ digest2[i]]
return 128 - bits
def selftest( name=None, opt=None, value=None, parser=None ):
print "running selftest..."
n1 = Nilsimsa()
n1.update("abcdefgh")
n2 = Nilsimsa(["abcd", "efgh"])
print "abcdefgh:\t%s" % str(n1.hexdigest()==\
'14c8118000000000030800000004042004189020001308014088003280000078')
print "abcd efgh:\t%s" % str(n2.hexdigest()==\
'14c8118000000000030800000004042004189020001308014088003280000078')
print "digest:\t\t%s" % str(n1.digest() == n2.digest())
n1.update("ijk")
print "update(ijk):\t%s" % str(n1.hexdigest()==\
'14c811840010000c0328200108040630041890200217582d4098103280000078')
print "compare:\t%s" % str(n1.compare(n2.digest())==109)
print "compare:\t%s" % str(n1.compare(n2.hexdigest(), ishex=True)==109)
|
hohum/imap_autosort_nilsimsa
|
nilsimsa/__init__.py
|
Python
|
gpl-3.0
| 9,317
|
#! /usr/bin/env python
import sys, getopt
sys.path.append('/usr/local/nagios/share')
from pygraylog.monitoring import StreamCheck
hostname = ""
login = ""
password = ""
port = 0
options = ""
#############
try:
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'h:p:u:P:', ['host=', 'port=', 'user=', 'password=' ])
except getopt.GetoptError as err:
print "UNKNOWN - %s" % str(err)
exit(3)
if len(options) < 4:
print "UNKOWN - args error"
exit(3)
for opt, arg in options:
if opt in ('-h', '--host'):
hostname = arg
elif opt in ('-p', '--port'):
port = int(arg)
elif opt in ('-u', '--user'):
user = arg
elif opt in ('-P', '--password'):
password = arg
if port == 0:
print "UNKOWN - bad port given"
exit(3)
if len(user) == 0:
print "UNKOWN - bad user given"
exit(3)
if len(password) == 0:
print "UNKOWN - bad password given"
exit(3)
if len(hostname) == 0:
print "UNKOWN - bad hostname given"
exit(3)
#############
check = StreamCheck(hostname, port, login, password)
try:
check.perform()
except:
print "UNKOWN - failed to retrieve data: %s" % ( check.error_msg )
exit(3)
if len(check.failed_stuff) > 0:
print "CRITICAL -", check.get_failed_stuff_as_string()
exit(2)
print "OK - all streams processing"
exit(0)
|
mblakele/pygraylog
|
examples/check_streams.py
|
Python
|
gpl-3.0
| 1,258
|
import logging
import sys
# Logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
# Gracefully load numpy
try:
import numpy as np
except (ImportError):
logging.critical("Cannot import numpy. Skipping Test (%s)", __name__)
# Gracefully load BaseTest
try:
sys.path.append('../')
import BaseTest
except (ImportError):
logging.critical("Cannot import BaseTest. Dying gracefully (%s)", __name__)
sys.exit()
class Test_MeanPE(BaseTest.Tester):
"""
Instance of Tester class for testing the Mean PE
Pass if meanPE (output_files) +/- 5*SEM (expected_result)
"""
def do_test(self):
"""
Extend do_test
"""
energy_filepath = self.obj_runner.run_input_options['energy_file']
if BaseTest.check_file(energy_filepath, sys_exit=False):
energy = np.loadtxt(energy_filepath)
else:
self.test_pass = False
self.test_pass_info = "Test Failed. File not found: " + energy_filepath
return self.test_pass
# PE: Potential Energy
PE = energy[:,1]
meanPE = np.mean(PE)
stdPE = np.std(PE)
semPE = stdPE/np.sqrt(np.size(PE))
self.test_results_dict.update({'meanPE':meanPE, 'stdPE':stdPE, 'semPE':semPE})
if not self.expected_results_dict:
self.test_pass = False
self.test_pass_info = "Test Failed. Empty comparison file: " + self.expected_results_filepath
return self.test_pass
if meanPE < float(self.expected_results_dict['meanPE']) + 5*float(self.expected_results_dict['semPE']) and \
meanPE > float(self.expected_results_dict['meanPE']) - 5*float(self.expected_results_dict['semPE']):
self.test_pass = True
self.test_pass_info = "Test Passed"
return self.test_pass
|
rgatkinson/oxdna
|
TEST/UserTest/Test_MeanPE.py
|
Python
|
gpl-3.0
| 1,880
|
from pypers.core.step import CmdLineStep
class CreateIntervalList(CmdLineStep):
spec = {
"version": "0.1.19",
"descr": [
"Creates an interval list file suitable for use by picard tools, e.g., CalculateHsMetrics.",
"A SAM style header must be present in the file which lists the sequence records against which the intervals are described.",
"The SAM header is generated from the bam file provided as input.",
"The intervals input must be an agilent bed file to which a strand col was added"
],
"args":
{
"inputs": [
{
"name" : "input_files",
"type" : "file",
"iterable": True,
"descr" : "the input bam file names",
}
],
"outputs": [
{
"name" : "output_files",
"type" : "file",
"value" : "{{input_files}}.intervals.txt",
"descr" : "text file containing the intervals",
}
],
"params": [
{
'name' : 'baits_file',
'type' : 'str',
'value': '',
'descr': 'agilent baits file. It is a file containing regions to plot (format: chr start end label)'
},
]
},
"cmd": [
"( /software/pypers/samtools/samtools-0.1.18/bin/samtools view -H {{input_files}}",
" && /bin/cat {{baits_file}}",
" | /bin/awk 'BEGIN {OFS=\"\t\"};{print $1,$2,$3,\"+\",$4}'",
" ) > {{output_files}}"
]
}
|
frankosan/pypers
|
pypers/steps/qc/createintervallist.py
|
Python
|
gpl-3.0
| 1,745
|
#!/bin/env python
#
# AutoPyfactory batch plugin for Condor
#
from CondorBaseBatchSubmitPlugin import CondorBaseBatchSubmitPlugin
from autopyfactory import jsd
class CondorGridBatchSubmitPlugin(CondorBaseBatchSubmitPlugin):
def __init__(self, apfqueue, config=None):
if not config:
qcl = apfqueue.factory.qcl
else:
qcl = config
newqcl = qcl.clone().filterkeys('batchsubmit.condorgrid', 'batchsubmit.condorbase')
super(CondorGridBatchSubmitPlugin, self).__init__(apfqueue, newqcl)
self.log.info('CondorGridBatchSubmitPlugin: Object initialized.')
def _addJSD(self):
'''
add things to the JSD object
'''
self.log.debug('CondorGridBatchSubmitPlugin.addJSD: Starting.')
self.JSD.add("universe", "grid")
super(CondorGridBatchSubmitPlugin, self)._addJSD()
self.log.debug('CondorGridBatchSubmitPlugin.addJSD: Leaving.')
|
edquist/autopyfactory
|
autopyfactory/plugins/batchsubmit/CondorGridBatchSubmitPlugin.py
|
Python
|
gpl-3.0
| 1,010
|
# encoding=utf-8
# @modified 2021/10/24 16:36:19
import web
import time
import hashlib
import xutils
import xauth
import xtemplate
from xutils import dateutil, cacheutil, dbutil
from xutils import Storage
from xutils import webutil
RETRY_LIMIT = 3
dbutil.register_table("record", "记录表")
dbutil.register_table("user_op_log", "用户操作日志表")
USER_LOG_TABLE = dbutil.get_table("user_op_log")
def get_real_ip():
return webutil.get_real_ip()
def save_login_info(name, value, error = None):
if name != "":
real_ip = get_real_ip()
now = xutils.format_datetime()
detail = "登录IP: %s" % real_ip
if error != None:
detail += ",登录失败:%s" % error
login_log = Storage(type = "login", user_name = name, ip = real_ip, ctime = now, detail = detail)
USER_LOG_TABLE.insert_by_user(name, login_log)
def save_login_error_count(name, count):
cacheutil.set("login.fail.count#%s" % name, count, 60)
class LoginHandler:
def POST(self):
name = xutils.get_argument("username", "")
pswd = xutils.get_argument("password", "")
target = xutils.get_argument("target")
users = xauth.get_users()
error = ""
count = cacheutil.get("login.fail.count#%s" % name, 0)
name = name.strip()
pswd = pswd.strip()
if count >= RETRY_LIMIT:
error = "重试次数过多"
elif name in users:
user = users[name]
if pswd == user["password"]:
save_login_info(name, "success")
xauth.login_user_by_name(name, login_ip = get_real_ip())
if target is None:
raise web.seeother("/")
raise web.seeother(target)
else:
error = "用户名或密码错误"
save_login_info(name, pswd, error)
save_login_error_count(name, count + 1)
else:
error = "用户名或密码错误"
save_login_info(name, pswd, error)
save_login_error_count(name, count + 1)
return xtemplate.render("user/page/login.html",
show_aside = False,
username = name,
password = pswd,
error = error)
def GET(self):
return xtemplate.render("user/page/login.html",
show_aside=False,
username="",
password="",
error="")
xurls = (
r"/login", LoginHandler
)
|
xupingmao/xnote
|
handlers/user/login.py
|
Python
|
gpl-3.0
| 2,520
|
#!/usr/bin/env python
'''Download multipage PCS ranking data'''
import os
import csv
import json
import argparse
import pathlib
from bs4 import BeautifulSoup
from tidylib import tidy_document
import requests
BASE_URL = 'https://www.procyclingstats.com/race/{race}/{year}/stage-{stage}'
def get_soup_for(filename):
with open(filename) as handle:
html_data = handle.read()
return BeautifulSoup(html_data, 'html.parser')
def save_records_to_csv(records, filename):
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=records[0]._fields)
writer.writeheader()
for record in records:
writer.writerow(record._asdict())
def download(url, filename):
'''Downloads url to filename'''
r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'})
r.raise_for_status()
data, _errors = tidy_document(r.text)
with open(filename, 'w', encoding='utf-8') as output:
output.write(data)
def get(url, filename, source):
if source == 'download' or not os.path.isfile(filename):
download(url, filename)
def cleanup(key, value):
if key in ('BIB', 'Age', 'UCI', 'Pnt', 'GC'):
if value.endswith('*'):
value = value[:-1]
return int(value) if value else 0
if key in ('Avg',):
return float(value) if value else 0.0
if key in ('GC-Time',):
if '-' in value:
value = value[3:] # Skip the prefix '+ -'
else:
value = value[1:]
tokens = [int(x) for x in value.split(':')]
acc = tokens[-1]
acc += tokens[-2] * 60
try:
acc += tokens[-3] * 3600
except IndexError:
pass # No hours yet
return acc
return value
def process(filename, stage):
soup = get_soup_for(filename)
table = soup.select('table.basic')[0]
headers = [x.text.strip() for x in table.select('thead > tr > th')]
print(headers)
riders = []
for row in table.select('tbody > tr'):
cells = [cell.text.strip() for cell in row.select('td')]
rider_data = {key: cleanup(key, value) for key, value in zip(headers, cells)}
rider_data['Stage'] = stage
riders.append(rider_data)
return riders
def jsonify(filename, riders_results):
riders = {}
for result in sorted(riders_results, key=lambda res: res['Stage']):
print(result)
name = result['Rider']
if name not in riders:
riders[name] = {
'name': name,
'pos': result['GC'],
'time': [result['GC-Time']],
'team': result['Team']
}
else:
riders[name]['time'].append(result['GC-Time'])
riders[name]['pos'] = result['GC']
# In order to use Map in JS, we change to a list.
riders_list = list(riders.items())
with open(filename, 'w') as handle:
json.dump(riders_list, handle)
def main():
parser = argparse.ArgumentParser(description='Download pcs data.')
parser.add_argument('source', help='download or try local data', nargs='?',
choices=('download', 'local'), default='local')
parser.add_argument('--max-stage', help='last stage to download',
action='store', type=int, default=21)
parser.add_argument('--race', help='race to get',
action='store', default='giro-d-italia')
parser.add_argument('--year', help='year to get',
action='store', type=int, default=2018)
parser.add_argument('--directory', help='directory to store the files',
action='store', default='.')
args = parser.parse_args()
pathlib.Path(args.directory).mkdir(parents=True, exist_ok=True)
print(args)
riders_results = []
for stage in range(1, args.max_stage+1):
filename = '{a.race}-{a.year}-stage-{stage:02d}.html'.format(a=args, stage=stage)
path = os.path.join(args.directory, filename)
url = BASE_URL.format(race=args.race, year=args.year, stage=stage)
print(url, path, args.source)
get(url, path, args.source)
riders_results += process(path, stage)
print(riders_results)
jsonify('{a.race}-{a.year}.json'.format(a=args), riders_results)
for team in set(x['Team'] for x in riders_results):
print(team)
if __name__ == '__main__':
main()
|
lauromoura/cyclinggctracker
|
src/pcs/downloader.py
|
Python
|
gpl-3.0
| 4,542
|
# good question
class Solution(object):
def removeDuplicateLetters(self, s):
"""
:type s: str
:rtype: str
"""
n = len(s)
last_index = {}
for i, elem in enumerate(s):
last_index[elem] = i
seq = sorted(((idx, k) for k, idx in last_index.items()))
select = {}
start = 0
for idx, k in seq:
if k not in select:
for p, i in sorted([(s[i], i) for i in range(start, idx + 1)
if s[i] <= k and s[i] not in select]):
if p not in select and i >= start:
select[p] = i
start = i + 1
return ''.join(sorted(select.keys(), key=lambda x: select[x]))
############################################################
# try2 **only no smaller element before is also not enough**
############################################################
# n = len(s)
# last_index = {}
# for i, elem in enumerate(s):
# last_index[elem] = i
# seq = sorted(((idx, k) for k, idx in last_index.items()))
# select = {}
# start = 0
# for idx, k in seq:
# if k not in select:
# before = sorted([(s[i], i) for i in range(start, idx)
# if s[i] < k and s[i] not in select])
# if before:
# select[k] = idx
# i_left = start
# for p, i in before:
# if p not in select and i >= i_left:
# select[p] = i_left = i
# start = idx + 1
# else:
# while s[start] != k:
# start += 1
# select[k] = start
# start += 1
# return ''.join(sorted(select.keys(), key=lambda x: select[x]))
############################################################
# try1 **only minimun among left (leave) is not enough**
############################################################
# n = len(s)
# last_index = {}
# for i, elem in enumerate(s):
# last_index[elem] = i
# seq = sorted(((idx, k) for k, idx in last_index.items()))
# left = sorted(last_index.keys(), reverse=True)
# select = {}
# start = 0
# for idx, k in seq:
# if k not in select:
# while left[-1] in select:
# left.pop()
# if k == left[-1]: # if k is minimun among left, choose the first
# while s[start] != k:
# start += 1
# select[k] = start
# start += 1
# else:
# select[k] = idx
# i_left = start
# for p, i in sorted([(s[i], i) for i in range(start, idx)
# if s[i] < k and s[i] not in select]):
# if p not in select and i >= i_left:
# select[p] = i_left = i
# start = idx + 1
# return ''.join(sorted(select.keys(), key=lambda x: select[x]))
assert Solution().removeDuplicateLetters("bcabc") == "abc"
assert Solution().removeDuplicateLetters("cbacdcbc") == "acdb"
assert Solution().removeDuplicateLetters("bbcaac") == 'bac'
assert Solution().removeDuplicateLetters("abacb") == 'abc'
assert Solution().removeDuplicateLetters("mitnlruhznjfyzmtmfnstsxwktxlboxutbic") == "ilrhjfyzmnstwkboxuc"
assert Solution().removeDuplicateLetters("wmxkuuoordmnpnebikzzujdpscpedcrsjphcaykjsmobturjjxxpoxvvrynmapegvtlasmyuddgxygkaztmbpkrnukbxityz") == "wbcdhajmoegvlskprnuxityz"
|
wufangjie/leetcode
|
316. Remove Duplicate Letters.py
|
Python
|
gpl-3.0
| 3,854
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 21:51:00 2016
@author: Rrich
"""
# Tic- Tac- Toe
import random
# print the board on screen
def showBoard(Board):
'''The function prints the board on screen.
Board is a list of 10 strings representing the board.
'''
print ' | | '
print str(Board[7]) + ' | ' + str(Board[8]) + ' | '+ str(Board[9])
print ' | | '
print '_ _ _ _ _ _ '
print ' | | '
print str(Board[4]) + ' | ' + str(Board[5]) + ' | '+ str(Board[6])
print ' | | '
print '_ _ _ _ _ _ '
print ' | | '
print str(Board[1]) + ' | ' + str(Board[2]) + ' | '+ str(Board[3])
print ' | | '
print
#Board= [' ' , 'x', ' ', ' ', 'O' , ' ' , ' ', ' ', 'x' , 'O']
#showBoard(Board)
def inputPlayerLetter():
''' Ask for player's letter.
Returns a list with the player’s letter as the first item,
and the computer's letter as the second.
'''
while (True):
letter = raw_input('Do you want to be X or O? ')
if letter == 'X' or letter == 'O' or letter == 'x' or letter == 'o':
letter= letter.upper()
if letter== 'X':
return ['X', 'O']
else:
return ['O', 'X']
else:
print 'Invalid input.'
#print inputPlayerLetter()
def decide():
''' Randomly choose the player who goes first.'''
if random.randint(0,1)==0:
return 'Computer'
else:
return 'Player'
def playAgain():
'''This function returns True if the player wants to
play again, otherwise it returns False.
'''
while (True):
again= raw_input('Do you want to play again? (yes/no): ')
again= again.lower()
if again== 'yes':
return True
elif again == 'no':
return False
else:
print 'Invalid Input.'
def makeMove(Board, letter, move):
'''place the move on the board'''
Board[move]= letter
def isWinner(Board, letter):
'''Given a board and a player’s letter, this function returns True
if that player has won.
'''
if Board[7]== letter and Board[8]== letter and Board[9]== letter:
return True
elif Board[4]== letter and Board[5]== letter and Board[6]== letter:
return True
elif Board[1]== letter and Board[2]== letter and Board[3]== letter:
return True
elif Board[1]== letter and Board[5]== letter and Board[9]== letter:
return True
elif Board[1]== letter and Board[4]== letter and Board[7]== letter:
return True
elif Board[2]== letter and Board[5]== letter and Board[8]== letter:
return True
elif Board[3]== letter and Board[6]== letter and Board[9]== letter:
return True
elif Board[7]== letter and Board[5]== letter and Board[3]== letter:
return True
else:
return False
def getBoardCopy(Board):
'''Make a duplicate of the board list and return it the duplicate.'''
dupBoard=[]
for i in Board:
dupBoard.append(i)
return dupBoard
def isSpaceFree(Board, move):
'''Return true if the passed move is free on the passed board.'''
return Board[move]== ' '
def getPlayerMove(Board,movesList):
'''Let the player type in their move.'''
while (True):
available= availableMoves(Board,movesList)
print "Available moves " + str(available)
move= raw_input('What is your next move? (1-9): ')
if move in '1 2 3 4 5 6 7 8 9'.split() and isSpaceFree(Board, int(move)):
return int(move)
else:
print 'Invalid Input.'
def availableMoves(Board,movesList):
''' returns a list of available moves'''
available=[]
for move in movesList:
if isSpaceFree(Board, move)== True:
available.append(move)
return available
#Board= [' ' , 'x', ' ', ' ', 'O' , ' ' , ' ', ' ', 'x' , 'O']
#movesList= [1,2,3,4,5,6,7,8,9]
#print availableMoves(Board, movesList)
def ChooseRandomMove(Board,movesList):
'''Computer chooses a move from the available moves randomly'''
available= availableMoves(Board,movesList)
if len(available)!= 0:
return random.choice(available) # computer's move
else:
return None
def getComputerMove(Board, computerLetter):
''' Given a board and the computer's letter, determine where
to move and return that move.
'''
if computerLetter== 'X':
playerLetter ='O'
else:
playerLetter = 'X'
for i in range(1,10):
dupBoard= getBoardCopy(Board)
if isSpaceFree(Board,i)== True:
makeMove(dupBoard, computerLetter, i)
if isWinner(dupBoard, computerLetter)== True:
return int(i)
# Check if the player could win on their next move, and block them.
for i in range(1,10):
dupBoard= getBoardCopy(Board)
if isSpaceFree(Board,i)== True:
makeMove(dupBoard, playerLetter, i)
if isWinner(dupBoard, playerLetter)==True:
return int(i)
# Try to take one of the corners, if they are free.
move= ChooseRandomMove(Board,[1,3,7,9])
if move != None:
return int(move)
#Try to take the center, if it is free.
if isSpaceFree(Board,5):
return 5
#Try to take the sides, if they are free.
return ChooseRandomMove(Board,[2,4,6,8])
def isBoardFull(Board):
''' Return True if every space on the board has been taken.
Otherwise return False.
'''
for i in range(1,10):
if isSpaceFree(Board,i)==True:
return False
return True
def sampleBoard(Board):
'''Returns a sample of the board with position numbers'''
dupBoard=getBoardCopy(Board)
i= 1
while(i < 10):
dupBoard[i]= i
i += 1
showBoard(dupBoard)
#Start of the game
print('Welcome to Tic Tac Toe!')
while True:
#Reset the board
Board= [' ']*10
movesList= [1, 2, 3, 4, 5, 6, 7, 8, 9]
playerLetter, computerLetter= inputPlayerLetter()
turn= decide()
print 'The '+ str(turn) + ' will go first'
print 'This is what the board looks like -> '
sampleBoard(Board)
gameplaying= True
while gameplaying:
if turn == 'Player':
showBoard(Board)
move= getPlayerMove(Board, movesList)
makeMove(Board, playerLetter, move)
if isWinner(Board, playerLetter):
showBoard(Board)
print 'Wuhooooo! You have won the game! :D '
gameplaying= False
else:
if isBoardFull(Board):
showBoard(Board)
print 'The game is a tie!'
gameplaying= False
else:
turn = 'Computer'
else:
#computer's turn
move= getComputerMove(Board, computerLetter)
makeMove(Board, computerLetter, move)
if isWinner(Board, computerLetter):
showBoard(Board)
print 'Sorry! You lost the game! :('
gameplaying= False
else:
if isBoardFull(Board):
showBoard(Board)
print 'The game is a tie!'
gameplaying= False
else:
turn = 'Player'
again= playAgain()
if not again:
break
|
rrichajalota/The-Python-Project
|
Tic Tac Toe/tictactoe.py
|
Python
|
gpl-3.0
| 7,281
|
#!/home/dante/Projects/free-art/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
DanteOnline/free-art
|
venv/bin/django-admin.py
|
Python
|
gpl-3.0
| 155
|
class Solution:
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
left, right = 1, 1
num = len(nums)
ans = [1 for i in range(num)]
for i in range(num):
ans[i] *= left;
ans[num-1-i] *= right;
left *= nums[i];
right *= nums[num-1-i];
return ans
|
YiqunPeng/Leetcode-pyq
|
solutions/238ProductOfArrayExceptSelf.py
|
Python
|
gpl-3.0
| 400
|
from __future__ import print_function, absolute_import
import time, math, numpy
def turn(lbot, angle=0):
if lbot.getGyroscope("Z_AXIS") is not None:
lbot.resetGyroscope("Z_AXIS")
curAngle = lbot.getGyroscope("Z_AXIS")
while curAngle>2:
curAngle = lbot.getGyroscope("Z_AXIS")
curAngle = lbot.getGyroscope("Z_AXIS")
diff = math.fabs(angle)
s = numpy.sign(angle)
curVel = 0
keep_turning = True
while keep_turning:
while math.fabs(curAngle-angle)>1:
if diff>30:
angleVel = 30
else:
angleVel = diff
if angleVel < 10:
angleVel = 10
finalVel = (angleVel*s)
if curVel!=finalVel:
lbot.setBaseSpeed(0, finalVel)
curVel = finalVel
time.sleep(0.1)
curAngle = lbot.getGyroscope("Z_AXIS")
diff = math.fabs(curAngle-angle)
s = numpy.sign(angle-curAngle)
print("angle", curAngle)
lbot.setBaseSpeed(0, 0)
time.sleep(1)
curAngle = lbot.getGyroscope("Z_AXIS")
diff = math.fabs(curAngle-angle)
if diff>1:
s = numpy.sign(angle-curAngle)
curVel = 0
else:
keep_turning = False
print("angle after stopping", curAngle)
else:
angleVel = angle
if angle > 0:
if angle > 20:
angleVel = 20
else:
if angle < -20:
angleVel = -20
if angleVel!=0:
movingTime = angle/angleVel
lbot.setBaseSpeed(0, angleVel)
time.sleep(movingTime)
lbot.setBaseSpeed(0, 0)
|
robocomp/learnbot
|
learnbot_dsl/functions/motor/base/turn.py
|
Python
|
gpl-3.0
| 1,875
|
#!/usr/bin/python
'''
Copyright (C) 2015 Luxembourg Institute of Science and Technology
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author Matteo De Stefano [matteo.destefano@list.lu]
Abstract
========
Script to insert a catalogue record whenever a new dataset is registered.
The variables added to the pycsw repository are received from the create action
of dataset_controller. The XML template is populated using the jinja2 Template Class.
owslib used to manage the transaction with pycsw.
'''
import os
import sys
import datetime
from owslib.csw import CatalogueServiceWeb
from jinja2 import Template
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
pycsw_url = "http://meta.iguess.list.lu/"
scriptname, service, identifier, city_id, abstract, server_url, title = sys.argv
id = "meta-" + str(identifier)
csw = CatalogueServiceWeb(pycsw_url)
def serialize_metadata(**kwargs):
text = ""
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "iguess", "csw_template.xml")), "r") as r:
text = r.read()
print r
template = Template(text)
result = template.render(**kwargs)
return result
now = datetime.datetime.now()
organisation = "List"
language="eng"
a = serialize_metadata(id=id, abstract=abstract, title=title, datestamp=now, organisation=organisation, language=language)
csw.transaction(ttype='insert', typename='gmd:MD_Metadata', record=a)
print csw.results
|
ERIN-LIST/iguess
|
transaction_insert_register.py
|
Python
|
gpl-3.0
| 2,052
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import traceback
import sickbeard
from sickbeard import common, failed_history, generic_queue, history, logger, search, ui
BACKLOG_SEARCH = 10
DAILY_SEARCH = 20
FAILED_SEARCH = 30
MANUAL_SEARCH = 40
MANUAL_SEARCH_HISTORY = []
MANUAL_SEARCH_HISTORY_SIZE = 100
class SearchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SEARCHQUEUE"
def is_in_queue(self, show, segment):
for cur_item in self.queue:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
return False
def is_ep_in_queue(self, segment):
for cur_item in self.queue:
if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment:
return True
return False
def is_show_in_queue(self, show):
for cur_item in self.queue:
if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show:
return True
return False
def get_all_ep_from_queue(self, show):
ep_obj_list = []
for cur_item in self.queue:
if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show:
ep_obj_list.append(cur_item)
return ep_obj_list
def pause_backlog(self):
self.min_priority = generic_queue.QueuePriorities.HIGH
def unpause_backlog(self):
self.min_priority = 0
def is_backlog_paused(self):
# backlog priorities are NORMAL, this should be done properly somewhere
return self.min_priority >= generic_queue.QueuePriorities.NORMAL
def is_manualsearch_in_progress(self):
# Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!!
if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)):
return True
return False
def is_backlog_in_progress(self):
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, BacklogQueueItem):
return True
return False
def is_dailysearch_in_progress(self):
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, DailySearchQueueItem):
return True
return False
def queue_length(self):
length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0}
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, DailySearchQueueItem):
length['daily'] += 1
elif isinstance(cur_item, BacklogQueueItem):
length['backlog'] += 1
elif isinstance(cur_item, ManualSearchQueueItem):
length['manual'] += 1
elif isinstance(cur_item, FailedQueueItem):
length['failed'] += 1
return length
def add_item(self, item):
add_item = False
if isinstance(item, DailySearchQueueItem):
# daily searches
add_item = True
elif isinstance(item, BacklogQueueItem):
# backlog searches
add_item = not self.is_in_queue(item.show, item.segment)
elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)):
# manual and failed searches
add_item = not self.is_ep_in_queue(item.segment)
else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
if add_item:
super(SearchQueue, self).add_item(item)
class DailySearchQueueItem(generic_queue.QueueItem):
def __init__(self):
self.success = None
generic_queue.QueueItem.__init__(self, u'Daily Search', DAILY_SEARCH)
def run(self):
super(DailySearchQueueItem, self).run()
try:
logger.log(u"Beginning daily search for new episodes")
found_results = search.searchForNeededEpisodes()
if not found_results:
logger.log(u"No needed episodes found")
else:
for result in found_results:
# just use the first result for now
logger.log(u"Downloading " + result.name + " from " + result.provider.name)
self.success = search.snatchEpisode(result)
# give the CPU a break
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
if self.success is None:
self.success = False
super(DailySearchQueueItem, self).finish()
self.finish()
class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment, downCurQuality=False):
generic_queue.QueueItem.__init__(self, u'Manual Search', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.name = 'MANUAL-' + str(show.indexerid)
self.success = None
self.show = show
self.segment = segment
self.started = None
self.downCurQuality = downCurQuality
def run(self):
super(ManualSearchQueueItem, self).run()
try:
logger.log(u"Beginning manual search for: [" + self.segment.prettyName() + "]")
self.started = True
searchResult = search.searchProviders(self.show, [self.segment], True, self.downCurQuality)
if searchResult:
# just use the first result for now
logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
self.success = search.snatchEpisode(searchResult[0])
# give the CPU a break
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
else:
ui.notifications.message('No downloads were found',
"Couldn't find a download for <i>{0}</i>".format(self.segment.prettyName()))
logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]")
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
# ## Keep a list with the 100 last executed searches
fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
if self.success is None:
self.success = False
super(ManualSearchQueueItem, self).finish()
self.finish()
class BacklogQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, u'Backlog', BACKLOG_SEARCH)
self.priority = generic_queue.QueuePriorities.LOW
self.name = 'BACKLOG-' + str(show.indexerid)
self.success = None
self.show = show
self.segment = segment
def run(self):
super(BacklogQueueItem, self).run()
if not self.show.paused:
try:
logger.log(u"Beginning backlog search for: [" + self.show.name + "]")
searchResult = search.searchProviders(self.show, self.segment, False)
if searchResult:
for result in searchResult:
# just use the first result for now
logger.log(u"Downloading " + result.name + " from " + result.provider.name)
search.snatchEpisode(result)
# give the CPU a break
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
else:
logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]")
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
super(BacklogQueueItem, self).finish()
self.finish()
class FailedQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment, downCurQuality=False):
generic_queue.QueueItem.__init__(self, u'Retry', FAILED_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.name = 'RETRY-' + str(show.indexerid)
self.show = show
self.segment = segment
self.success = None
self.started = None
self.downCurQuality = downCurQuality
def run(self):
super(FailedQueueItem, self).run()
self.started = True
try:
for epObj in self.segment:
logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]")
failed_history.markFailed(epObj)
(release, provider) = failed_history.findRelease(epObj)
if release:
failed_history.logFailed(release)
history.logFailed(epObj, release, provider)
failed_history.revertEpisode(epObj)
logger.log(u"Beginning failed download search for: [" + epObj.prettyName() + "]")
# If it is wanted, self.downCurQuality doesnt matter
# if it isnt wanted, we need to make sure to not overwrite the existing ep that we reverted to!
searchResult = search.searchProviders(self.show, self.segment, True, False)
if searchResult:
for result in searchResult:
# just use the first result for now
logger.log(u"Downloading " + result.name + " from " + result.provider.name)
search.snatchEpisode(result)
# give the CPU a break
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
else:
pass
# logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]")
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
# ## Keep a list with the 100 last executed searches
fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)
if self.success is None:
self.success = False
super(FailedQueueItem, self).finish()
self.finish()
def fifo(myList, item, maxSize=100):
if len(myList) >= maxSize:
myList.pop(0)
myList.append(item)
|
Maximilian-Reuter/SickRage-1
|
sickbeard/search_queue.py
|
Python
|
gpl-3.0
| 11,131
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.commands.runners."""
import pytest
from qutebrowser.misc import objects
from qutebrowser.commands import runners, cmdexc
class TestCommandParser:
def test_parse_all(self, cmdline_test):
"""Test parsing of commands.
See https://github.com/qutebrowser/qutebrowser/issues/615
Args:
cmdline_test: A pytest fixture which provides testcases.
"""
parser = runners.CommandParser()
if cmdline_test.valid:
parser.parse_all(cmdline_test.cmd, aliases=False)
else:
with pytest.raises(cmdexc.NoSuchCommandError):
parser.parse_all(cmdline_test.cmd, aliases=False)
def test_parse_all_with_alias(self, cmdline_test, monkeypatch,
config_stub):
if not cmdline_test.cmd:
pytest.skip("Empty command")
config_stub.val.aliases = {'alias_name': cmdline_test.cmd}
parser = runners.CommandParser()
if cmdline_test.valid:
assert len(parser.parse_all("alias_name")) > 0
else:
with pytest.raises(cmdexc.NoSuchCommandError):
parser.parse_all("alias_name")
@pytest.mark.parametrize('command', ['', ' '])
def test_parse_empty_with_alias(self, command):
"""An empty command should not crash.
See https://github.com/qutebrowser/qutebrowser/issues/1690
and https://github.com/qutebrowser/qutebrowser/issues/1773
"""
parser = runners.CommandParser()
with pytest.raises(cmdexc.NoSuchCommandError):
parser.parse_all(command)
class TestCompletions:
"""Tests for completions.use_best_match."""
@pytest.fixture(autouse=True)
def cmdutils_stub(self, monkeypatch, stubs):
"""Patch the cmdutils module to provide fake commands."""
monkeypatch.setattr(objects, 'commands', {
'one': stubs.FakeCommand(name='one'),
'two': stubs.FakeCommand(name='two'),
'two-foo': stubs.FakeCommand(name='two-foo'),
})
def test_partial_parsing(self, config_stub):
"""Test partial parsing with a runner where it's enabled.
The same with it being disabled is tested by test_parse_all.
"""
parser = runners.CommandParser(partial_match=True)
result = parser.parse('on')
assert result.cmd.name == 'one'
def test_dont_use_best_match(self, config_stub):
"""Test multiple completion options with use_best_match set to false.
Should raise NoSuchCommandError
"""
config_stub.val.completion.use_best_match = False
parser = runners.CommandParser(partial_match=True)
with pytest.raises(cmdexc.NoSuchCommandError):
parser.parse('tw')
def test_use_best_match(self, config_stub):
"""Test multiple completion options with use_best_match set to true.
The resulting command should be the best match
"""
config_stub.val.completion.use_best_match = True
parser = runners.CommandParser(partial_match=True)
result = parser.parse('tw')
assert result.cmd.name == 'two'
|
forkbong/qutebrowser
|
tests/unit/commands/test_runners.py
|
Python
|
gpl-3.0
| 3,986
|
import expyriment
from pydoc import getdoc
x = None
y = None
def _get_doc_and_function(obj):
rtn = []
for var in dir(obj):
if not var.startswith("_"):
rtn.append(var)
return getdoc(obj), rtn
def _read_module(mod, doc_dict):
doc_dict[mod.__name__], classes = _get_doc_and_function(mod)
for cl in classes:
cl = "{0}.{1}".format(mod.__name__, cl)
exec("x =" + cl)
doc_dict[cl], functions = _get_doc_and_function(x)
for fnc in functions:
fnc = "{0}.{1}".format(cl, fnc)
exec("y =" + fnc)
doc_dict[fnc], _tmp = _get_doc_and_function(y)
def search_doc(search_str, doc_dict):
for k in doc_dict.keys():
if k.lower().find(search_str.lower()) > 0 \
or doc_dict[k].lower().find(search_str.lower()) > 0:
print("\n-------------------------------------------------------------------------------")
print("[ {0} ]\n".format(k))
print("{0}".format(doc_dict[k]))
doc_dict = {}
_read_module(expyriment.io, doc_dict)
_read_module(expyriment.stimuli, doc_dict)
_read_module(expyriment.design, doc_dict)
_read_module(expyriment.misc, doc_dict)
while True:
search = raw_input("New search (q=quit): ")
if search == "q":
break
else:
search_doc(search, doc_dict)
print("\n")
|
expyriment/expyriment
|
documentation/api/expyriment_doc_search.py
|
Python
|
gpl-3.0
| 1,367
|
# -*- coding: utf-8 -*-
import numpy as np
from pandas import Series, DataFrame
print("## Series index and default ordinal number index")
obj = Series(np.arange(4.0), index = ['a', 'b', 'c', 'd'])
print(obj)
print(obj['b'])
print(obj[3])
print(obj[[1, 3]])
print(obj[obj < 2])
print()
print("## Series slice:")
print(obj['b':'c']) ## closed interval
obj['b':'c'] = 5
print(obj)
print()
print("## DataFrame index:")
data = DataFrame(np.arange(16).reshape((4, 4)),
index = ['Ohio', 'Colorado', 'Utah', 'New York'],
columns = ['one', 'two', 'three', 'four'])
print(data)
print(data['two'])
print(data[['three', 'one']])
print(data[:2]) # interval, select by index location,
# print(data[2]) # Error, not interval
print(data[2:3])
print(data.ix['Colorado', ['two', 'three']])
print(data.ix[['Colorado', 'Utah'], [3, 0, 1]])
print(data.ix[2])
print(data.ix[:'Utah', 'two'])
print()
print("## Setect based on condition:")
print(data[data.three > 5])
print(data < 5)
data[data < 5] = 0
print(data)
|
lamontu/data-analysis
|
pandas/indexing_selection_and_filtering.py
|
Python
|
gpl-3.0
| 1,034
|
#!/usr/bin/env python3
import os, sys, signal
euid = os.geteuid()
if euid != 0:
print("Script not started as root. Running sudo..")
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
print('Running. Your euid is', euid)
sys.path.insert(0, './Network')
from NetworkMgr import Client
from ArpDetector import ArpDetector
from HostMgr import HostMgr
from NetworkScanner import NetworkScanner
from AttackProcess import AttackProcess
import nmap
class Main():
def __init__(self):
self.nm = nmap.PortScanner()
self.HostMgr = HostMgr(self)
self.HostMgr.start()
self.ArpDetector = ArpDetector(self)
self.ArpDetector.start()
self.Client = Client(self)
self.Client.start()
self.NetworkScanner = NetworkScanner(self)
self.NetworkScanner.start()
self.AttackProcess = AttackProcess(self)
self.AttackProcess.start()
signal.signal(signal.SIGINT, signal.SIG_DFL)
main = Main()
|
GadgeurX/NetworkLiberator
|
Daemon/main.py
|
Python
|
gpl-3.0
| 1,012
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk
from gi.repository import GLib
import logging
import os
import configparser
import signal
class Co2MonitorGui():
def __init__(self):
# initially set the standard logger
self.set_logger(logging.getLogger(__name__))
# initially set an empty configuration
self.set_config(configparser.ConfigParser())
# set up the quit signals
self.setup_signals(
signals = [signal.SIGINT, signal.SIGTERM, signal.SIGHUP],
handler = self.quit
)
def setup_signals(self, signals, handler):
"""
This is a workaround to signal.signal(signal, handler)
which does not work with a GLib.MainLoop() for some reason.
Thanks to: http://stackoverflow.com/a/26457317/5433146
args:
signals (list of signal.SIG... signals): the signals to connect to
handler (function): function to be executed on these signals
"""
def install_glib_handler(sig): # add a unix signal handler
GLib.unix_signal_add( GLib.PRIORITY_HIGH,
sig, # for the given signal
handler, # on this signal, run this function
sig # with this argument
)
for sig in signals: # loop over all signals
GLib.idle_add( # 'execute'
install_glib_handler, sig, # add a handler for this signal
priority = GLib.PRIORITY_HIGH )
# build the gui
def load_builder(self):
# get a GTK builder
self.builder = Gtk.Builder()
# load the gladefile
self.builder.add_from_file(self.config.get('gui-general','gladefile'))
# set the config
def set_config(self, config):
self.config = config
# set the logger
def set_logger(self, logger):
self.logger = logger
# set up the gui
def setup_gui(self):
# load the builder
self.load_builder()
# define handlers
self.handlers = {
"CloseWindow": self.quit,
}
self.builder.connect_signals(self.handlers)
window = self.builder.get_object("window1")
window.show_all()
label = self.builder.get_object("label1")
label.set_text(_("Welcome to Co2monitor!"))
# run the gui
def run(self):
# can't use Gtk.main() because of a bug that prevents proper SIGINT
# handling. use Glib.MainLoop() directly instead.
self.mainloop = GLib.MainLoop() # main loop
# signal.signal(signal.SIGINT, signal.SIG_DFL)
self.logger.debug(_("Starting GLib main loop..."))
self.mainloop.run()
self.logger.debug(_("GLib main loop ended."))
# quit the gui
def quit(self, *args):
self.logger.debug(_("Received quitting signal."))
self.mainloop.quit()
|
nobodyinperson/co2monitor
|
usr/lib/co2monitor/python/co2monitor/gui.py
|
Python
|
gpl-3.0
| 2,936
|
"""Define PVs, constants and properties of LIEmitMeas IOC."""
from .. import csdev as _csdev
# --- Enumeration Types ---
class ETypes(_csdev.ETypes):
"""Local enumerate types."""
MEASURESTATE = ('Stopped', 'Measuring')
PLANE = ('Y', 'X')
_et = ETypes # syntactic sugar
# --- Const class ---
class Const(_csdev.Const):
"""Const class defining constants."""
DEFAULT_PROFILE = 'LA-BI:PRF5'
MeasureState = _csdev.Const.register('MeasureState', _et.MEASURESTATE)
Plane = _csdev.Const.register('Plane', _et.PLANE)
@classmethod
def get_database(cls, prefix=''):
"""Return IOC database."""
dbase = {
'MeasureCtrl-Sel': {
'type': 'enum', 'value': cls.MeasureState.Stopped,
'enums': cls.MeasureState._fields},
'MeasureCtrl-Sts': {
'type': 'enum', 'value': cls.MeasureState.Stopped,
'enums': cls.MeasureState._fields},
}
for val in dbase.values():
low = val.get('lolim', None)
hig = val.get('hilim', None)
if low is not None:
val['lolo'] = low
val['low'] = low
if hig is not None:
val['hihi'] = hig
val['high'] = hig
if prefix:
dbase = {prefix + k: v for k, v in dbase.items()}
dbase.update(_csdev.Const.get_database(prefix=prefix))
return dbase
|
lnls-sirius/dev-packages
|
siriuspy/siriuspy/meas/liemit/csdev.py
|
Python
|
gpl-3.0
| 1,455
|
#!/usr/bin/env python
# Fetch and build keywords from the "subject" field of federal register data
from datetime import date, timedelta
from glob import glob
import itertools
import logging
import re
import os
import string
import requests
from requests_cache.core import CachedSession
import yaml
FR_BASE = "https://www.federalregister.gov"
API_BASE = FR_BASE + "/api/v1/"
FR_ARTICLES = API_BASE + "articles"
def fetch_page(year, month, page_num, client=requests):
"""Download a single page of 1000 results; return the results dict"""
# Don't use a dict as we need the same order with each request (for
# caching)
params = [
("conditions[publication_date][gte]", "%d-%02d-01" % (year, month)),
("conditions[publication_date][lte]",
"%d-%02d-%02d" % (year, month, last_day_in_month(year, month))),
("fields[]", ["agency_names", "topics"]),
("order", "oldest"),
("page", page_num),
("per_page", 1000),
]
result = client.get(FR_ARTICLES, params=params)
if result.status_code != 200:
logging.warning("Received %s on %s-%s (%s)", result.status_code, year,
month, page_num)
return {'results': []}
try:
return result.json()
except ValueError:
logging.warning("Error converting to json on %s-%s (%s)",
year, month, page_num)
return {'results': []}
def results_from_month(year, month, client=requests):
"""Download a month of documents and emit any agency-topic pairs via a
generator"""
page_num = 1
finished = False
while not finished:
finished = True
results = fetch_page(year, month, page_num, client)
for result in results['results']:
agencies = result['agency_names'] or []
topics = result['topics'] or []
for pair in itertools.product(agencies, topics):
yield pair
if 'next_page_url' in results:
finished = False
page_num += 1
def normalize_name(name):
"""The agency names used in the federal register don't always match those
in the FOIA data. Uppercase everything and strip off any references to the
US"""
name = name.split(' - ')[0]
name = name.upper().strip()
name = "".join(filter(lambda x: x in (string.ascii_uppercase + " "), name))
replacements = (('CENTERS', 'CENTER'), ('SERVICES', 'SERVICE'))
remove = ('UNITED STATES', 'DEPARTMENT', 'OFFICE', 'COMMISSION', 'BUREAU',
'BOARD', 'AGENCY', 'ADMINISTRATION', 'SERVICE', 'FEDERAL', 'US',
'AND', 'OF', 'THE', 'FOR', 'ON', 'CFR')
for old, new in replacements:
name = re.sub(r'\b' + old + r'\b', new, name)
for old in remove:
name = re.sub(r'\b' + old + r'\b', ' ', name)
while ' ' in name:
name = name.replace(' ', ' ')
return name.strip()
def normalize_and_map(keywords):
"""Maps old dictionary to dictionary with new keys without loosing
keys in the process """
new_dictionary = dict()
for key in keywords.keys():
normal_key = normalize_name(key)
new_dictionary[normal_key] = \
keywords.get(key, []) | set(new_dictionary.get(normal_key, []))
return new_dictionary
def add_results(results, keywords):
"""Add entries found in the results to the dictionary of keywords"""
for result in results['results']:
agencies = result['agency_names'] or []
topics = result['topics'] or []
for agency, topic in itertools.product(agencies, topics):
if agency not in keywords:
keywords[agency] = set()
keywords[agency].add(topic)
def subtract_month(cursor):
"""Timedeltas don't encompass months, so just subtract a day until we hit
the previous month"""
original_month = cursor.month
while cursor.month == original_month:
cursor = cursor - timedelta(days=1)
return cursor
def last_day_in_month(year, month):
"""Find the last day in this cursor's month"""
cursor = date(year, month, 28)
while cursor.month == month:
cursor = cursor + timedelta(days=1)
cursor = cursor - timedelta(days=1)
return cursor.day
def build_keywords():
"""Hit page after page of FR search results (if not cached). Return a
dictionary of agency-name mapped to the set of applicable topics."""
keywords = {}
today = date.today()
# this_year, this_month = today.year, today.month
# Do not cache this month as it'll change with each run
# for agency, topic in results_from_month(this_year, this_month):
# if agency not in keywords:
# keywords[agency] = set()
# keywords[agency].add(topic)
# Now, step back until 1999 - there are no topics before 2000
client = CachedSession('fr')
cursor = subtract_month(today)
while cursor.year > 1999:
num_distinct = sum(len(words) for words in keywords.values())
logging.info("Processing %d-%02d. Num distinct keywords: %d",
cursor.year, cursor.month, num_distinct)
for agency, topic in results_from_month(
cursor.year, cursor.month, client):
if agency not in keywords:
keywords[agency] = set()
keywords[agency].add(topic)
cursor = subtract_month(cursor)
return keywords
def new_keywords(agency_data, fr_keywords):
"""Return the number of new keywords and the (potentially modified) agency
data"""
name = normalize_name(agency_data['name'])
if name in fr_keywords:
original_keywords = set(agency_data.get('keywords', []))
keywords = original_keywords | fr_keywords[name]
return len(keywords), dict(agency_data,
keywords=list(sorted(keywords)))
return 0, agency_data
def patch_yaml():
"""Go through the YAML files; for all agencies, check if we have some new
keywords based on FR data. If so, update the YAML"""
fr_keywords = normalize_and_map(build_keywords())
for filename in glob("data" + os.sep + "*.yaml"):
num_new_keywords = 0
with open(filename) as f:
yaml_data = yaml.load(f.read())
# First, check if keywords need to be added to the root
num_new, modified = new_keywords(yaml_data, fr_keywords)
if num_new:
del fr_keywords[normalize_name(yaml_data['name'])]
yaml_data = modified
num_new_keywords += num_new
# Next, check the children
departments = []
for yaml_office in yaml_data['departments']:
num_new, modified = new_keywords(yaml_office, fr_keywords)
if num_new:
del fr_keywords[normalize_name(yaml_office['name'])]
departments.append(modified)
num_new_keywords += num_new
else:
departments.append(yaml_office)
if num_new_keywords:
yaml_data = dict(yaml_data, departments=departments)
with open(filename, 'w') as f:
f.write(yaml.dump(yaml_data, default_flow_style=False,
allow_unicode=True))
logging.info('Rewrote %s with %d new keywords', filename,
num_new_keywords)
for name in fr_keywords:
logging.warning('Could not find this agency: %s', name)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
patch_yaml()
|
sunlightlabs/foia-data
|
new/new-more/foia-master/contacts/keywords_from_fr.py
|
Python
|
gpl-3.0
| 7,540
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 11:23:34 2017
@author: shiweisun
"""
from .datafile import NC
class OUT(NC):
def __init__(self,fpath):
super(OUT,self).__init__(fpath)
pass
def getvar(self,vrbl,ztype=None,zlevels=None):
'''
Get the variables.
'''
from .originvar import originvar
temp = originvar(self,vrbl)
if ztype is None:
var = temp
pass
else:
from .interpvar import interp_z
var = interp_z(self,temp,ztype,zlevels)
return var
pass
def open(fpath):
'''
Open the file, and get a class 'NC'
'''
return OUT(fpath)
|
weinihou/pyWRFPost
|
getvar/main.py
|
Python
|
gpl-3.0
| 744
|
#!/usr/bin/env python
'''Pixbufs
A GdkPixbuf represents an image, normally in RGB or RGBA format.
Pixbufs are normally used to load files from disk and perform image scaling.
This demo is not all that educational, but looks cool. It was written by
Extreme Pixbuf Hacker Federico Mena Quintero. It also shows off how to use
GtkDrawingArea to do a simple animation.
Look at the Image demo for additional pixbuf usage examples.'''
# pygtk version: Maik Hertha <maik.hertha@berlin.de>
import os
import math
import gobject
import gtk
FRAME_DELAY = 50
CYCLE_LEN = 60
IMAGE_DIR = os.path.join(os.path.dirname(__file__), 'images')
BACKGROUND_NAME = "background.jpg"
image_names = [
"apple-red.png",
"gnome-applets.png",
"gnome-calendar.png",
"gnome-foot.png",
"gnome-gmush.png",
"gnome-gimp.png",
"gnome-gsame.png",
"gnu-keys.png"
]
class PixbufsDemo(gtk.Window):
frame = None # frame of the background image
background = None # background-pixbuf
images = [] # list of pixbufs
back_width = 0 # width of background image
back_height = 0 # height of background image
timeout_id = 0 # timeout id
frame_num = 0 # number of the current frame
timeout_id = None
def __init__(self, parent=None):
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect("destroy", lambda *w: gtk.main_quit())
self.connect("destroy", self.cleanup_callback)
self.set_title(self.__class__.__name__)
self.set_resizable(False)
if not self.load_pixbufs():
dialog = gtk.MessageDialog(self,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE,
"Failed to load an image")
dialog.connect("response", lambda d, r: d.destroy())
dialog.show()
else:
self.set_size_request(self.back_width, self.back_height)
self.frame = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
self.back_width, self.back_height)
da = gtk.DrawingArea()
da.connect("expose_event", self.expose_cb)
self.add(da)
self.timeout_id = gobject.timeout_add(FRAME_DELAY, self.timeout)
self.show_all()
def load_pixbufs(self):
''' Loads the images for the demo and returns whether the
operation succeeded.
'''
if self.background is not None:
return True # already loaded earlier
# look in the the current directory where the file is installed
try:
self.background = gtk.gdk.pixbuf_new_from_file(
os.path.join(IMAGE_DIR, BACKGROUND_NAME))
except gobject.GError, error:
return False
self.back_width = self.background.get_width()
self.back_height = self.background.get_height()
for filename in image_names:
try:
self.images.append(gtk.gdk.pixbuf_new_from_file(
os.path.join(IMAGE_DIR, filename)))
except gobject.GError, error:
return False
return True
def expose_cb(self, draw_area, event):
''' Expose callback for the drawing area. '''
rowstride = self.frame.get_rowstride()
# FIXME: what should be the result, string guchar an integer result?
#pixels = frame.get_pixels() + rowstride * event.area.y + event.area.x * 3
#pixels = frame.get_pixels()[len(frame.get_pixels()) + rowstride * event.area.y + event.area.x * 3]
pixels = self.frame.get_pixels()
draw_area.window.draw_rgb_image(
draw_area.style.black_gc,
event.area.x, event.area.y,
event.area.width, event.area.height,
'normal',
pixels, rowstride,
event.area.x, event.area.y)
return True
def cleanup_callback(self, win):
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
def timeout(self):
''' Timeout handler to regenerate the frame. '''
self.background.copy_area(0, 0, self.back_width, self.back_height,
self.frame, 0, 0)
f = float(self.frame_num % CYCLE_LEN) / float(CYCLE_LEN)
xmid = self.back_width / 2.0
ymid = self.back_height / 2.0
radius = min(xmid, ymid) / 2.0
N_IMAGES = len(image_names)
for i_name in image_names:
i = image_names.index(i_name)
ang = 2.0 * math.pi * i / N_IMAGES - f * 2.0 * math.pi
iw = self.images[i].get_width()
ih = self.images[i].get_height()
r = radius +(radius / 3.0) * math.sin(f * 2.0 * math.pi)
xpos = math.floor(xmid + r * math.cos(ang) - iw / 2.0 + 0.5)
ypos = math.floor(ymid + r * math.sin(ang) - ih / 2.0 + 0.5)
if i % 2 == 0:
k = math.cos(f * 2.0 * math.pi)
else:
k = math.sin(f * 2.0 * math.pi)
k = 2.0 * k * k
k = max(0.25, k)
# satisfy the c-source
r1 = gtk.gdk.Rectangle()
r1.x = int(xpos)
r1.y = int(ypos)
r1.width = iw * k
r1.height = ih * k
r2 = gtk.gdk.Rectangle()
r2.x = 0
r2.y = 0
r2.width = self.back_width
r2.height = self.back_height
dest = r1.intersect(r2)
if dest is not None:
if i % 2 == 0:
alpha = int(
max(127, math.fabs(255 * math.cos(f * 2.0 * math.pi))))
else:
alpha = int(
max(127, math.fabs(255 * math.sin(f * 2.0 * math.pi))))
self.images[i].composite(
self.frame,
dest.x, dest.y,
dest.width, dest.height,
xpos, ypos,
k, k,
gtk.gdk.INTERP_NEAREST,
alpha)
if self is not None:
self.queue_draw()
self.frame_num += 1
return True
def main():
PixbufsDemo()
gtk.main()
if __name__ == '__main__':
main()
|
chriskmanx/qmole
|
QMOLEDEV/pygtk-2.16.0/examples/pygtk-demo/demos/pixbufs.py
|
Python
|
gpl-3.0
| 6,410
|
# -*- coding: utf-8 -*-
"""
Code relating to controlling 200^2, BiStim^2, and Rapid^2 Magstim TMS units
@author: Nicolas McNair
"""
from __future__ import division
import serial
from sys import version_info, platform
from os.path import realpath, join, dirname
from os import getcwd
from math import floor
from time import sleep
from multiprocessing import Queue, Process
from functools import partial
from yaml import load
from ast import literal_eval
# Switch timer based on python version and platform
if version_info >= (3,3):
# In python
from time import perf_counter
defaultTimer = perf_counter
else:
if platform == 'win32':
# On Windows, use time.clock
from time import clock
defaultTimer = clock
else:
# On other platforms use time.time
from time import time
defaultTimer = time
# Calculate checksum for command
if version_info >= (3,):
def calcCRC(command):
"""Return the CRC checksum for the command string."""
# Convert command string to sum of ASCII/byte values
commandSum = sum(command)
# Convert command sum to binary, then invert and return 8-bit character value
return bytearray(chr(~commandSum & 0xff),encoding='latin_1')
else:
def calcCRC(command):
"""Return the CRC checksum for the command string."""
# Convert command string to sum of ASCII/byte values
commandSum = sum(command)
# Convert command sum to binary, then invert and return 8-bit character value
return chr(~commandSum & 0xff)
class MagstimError(Exception):
pass
class serialPortController(Process):
"""
The class creates a Python process which has direct control of the serial port. Commands for relaying via the serial port are received from separate Python processes via Queues.
N.B. To start the process you must call start() from the parent Python process.
Args:
serialWriteQueue (multiprocessing.Queue): a Queue for receiving commands to be written to the Magstim unit via the serial port
serialReadQueue (multiprocessing.Queue): a Queue for returning automated replies from the Magstim unit when requested
"""
# Error codes
SERIAL_WRITE_ERR = (1, 'SERIAL_WRITE_ERR: Could not send the command.')
SERIAL_READ_ERR = (2, 'SERIAL_READ_ERR: Could not read the magstim response.')
def __init__(self, serialConnection, serialWriteQueue, serialReadQueue):
Process.__init__(self)
self._serialWriteQueue = serialWriteQueue
self._serialReadQueue = serialReadQueue
self._address = serialConnection
def run(self):
"""
Continuously monitor the serialWriteQueue for commands from other Python processes to be sent to the Magstim.
When requested, will return the automated reply from the Magstim unit to the calling process via the serialReadQueue.
N.B. This should be called via start() from the parent Python process.
"""
# N.B. most of these settings are actually the default in PySerial, but just being careful.
self._port = serial.Serial(port=self._address,
baudrate=9600,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
xonxoff=False)
# Make sure the RTS pin is set to off
self._port.setRTS(False)
# Set up version compatibility
if int(serial.VERSION.split('.')[0]) >= 3:
self._port.write_timeout = 0.3
self._port.portFlush = self._port.reset_input_buffer
self._port.anyWaiting = lambda:self._port.in_waiting
else:
self._port.writeTimeout = 0.3
self._port.portFlush = self._port.flushInput
self._port.anyWaiting = self._port.inWaiting
# This continually monitors the serialWriteQueue for write requests
while True:
message, reply, readBytes = self._serialWriteQueue.get()
try:
# If the first part of the message is None this signals the process to close the port and stop
if message is None:
break
# If the first part of the message is a 1 this signals the process to trigger a quick fire using the RTS pin
elif message == 1:
self._port.setRTS(True)
# If the first part of the message is a -1 this signals the process to reset the RTS pin
elif message == -1:
self._port.setRTS(False)
# Otherwise, the message is a command string
else:
# There shouldn't be any rubbish in the input buffer, but check and clear it just in case
if self._port.anyWaiting():
self._port.portFlush()
try:
# Try writing to the port
self._port.write(message)
# Read response (this gets a little confusing, as I don't want to rely on timeout to know if there's an error)
try:
# Read the first byte
message = bytearray(self._port.read(1))
# If the first returned byte is a 'N', we need to read the version number in one byte at a time to catch the string terminator.
if message == b'N':
while message[-1] > 0:
message += self._port.read(1)
# After the end of the version number, read one more byte to grab the CRC
message += self._port.read(1)
# If the first byte is not '?', then the message was understood so carry on reading in the response (if it was a '?', then this will be the only returned byte).
elif message != b'?':
# Read the second byte
message += self._port.read(1)
# If the second returned byte is a '?' or 'S', then the data value supplied either wasn't acceptable ('?') or the command conflicted with the current settings ('S'),
# In these cases, just grab the CRC - otherwise, everything is ok so carry on reading the rest of the message
message += self._port.read(readBytes - 2) if message[-1] not in {83, 63} else self._port.read(1)
# Return the reply if we want it
if reply:
self._serialReadQueue.put([0, message])
except Exception: #serial.SerialException:
self._serialReadQueue.put(serialPortController.SERIAL_READ_ERR)
except Exception: #serial.SerialException:
self._serialReadQueue.put(serialPortController.SERIAL_WRITE_ERR)
except IOError:
break
#If we get here, it's time to shutdown the serial port controller
self._port.close()
return
class connectionRobot(Process):
"""
The class creates a Python process which sends an 'enable remote control' command to the Magstim via the serialPortController process every 500ms.
N.B. To start the process you must call start() from the parent Python process.
Args:
serialWriteQueue (multiprocessing.Queue): a Queue for sending commands to be written to the Magstim unit via the serialPortController process
updateTimeQueue (multiprocessing.Queue): a Queue for receiving requests from the parent Python process to delay sending its next command
"""
def __init__(self, serialWriteQueue, updateRobotQueue):
Process.__init__(self)
self._serialWriteQueue = serialWriteQueue
self._updateRobotQueue = updateRobotQueue
self._stopped = False
self._paused = True
self._nextPokeTime = None
self._connectionCommand = None
def _setCommand(self, connectionCommand):
self._connectionCommand = connectionCommand
def run(self):
"""
Continuously send commands to the serialPortController process at regular intervals, while also monitoring the updateTimeQueue for commands from the parent Python process if this should be delayed, paused, or stopped.
N.B. This should be called via start() from the parent Python process.
"""
# This sends an "enable remote control" command to the serial port controller every 500ms (if armed) or 5000 ms (if disarmed); only runs once the stimulator is armed
pokeLatency = 5
while True:
# If the robot is currently paused, wait until we get a None (stop) or a non-negative number (start/resume) in the queue
while self._paused:
message = self._updateRobotQueue.get()
if message is None:
self._stopped = True
self._paused = False
elif message >= 0:
# If message is a 2, that means we've just armed so speed up the poke latency (not sure that's possible while paused, but just in case)
if message == 2:
pokeLatency = 0.5
# If message is a 1, that means we've just disarmed so slow down the poke latency
elif message == 1:
pokeLatency = 5
self._paused = False
# Check if we're stopping the robot
if self._stopped:
break
# Update next poll time to the next poke latency
self._nextPokeTime = defaultTimer() + pokeLatency
# While waiting for next poll...
while defaultTimer() < self._nextPokeTime:
# ...check to see if there has been an update send from the parent magstim object
if not self._updateRobotQueue.empty():
message = self._updateRobotQueue.get()
# If the message is None this signals the process to stop
if message is None:
self._stopped = True
# If the message is -1, we've relinquished remote control so signal the process to pause
elif message == -1:
pokeLatency = 5
self._paused = True
# Any other message signals a command has been sent to the serial port controller
else:
# If message is a 2, that means we've just armed so speed up the poke latency (not sure that's possible while paused, but just in case)
if message == 2:
pokeLatency = 0.5
# If message is a 1, that means we've just disarmed so slow down the poke latency
elif message == 1:
pokeLatency = 5
self._nextPokeTime = defaultTimer() + pokeLatency
break
# If we made it all the way to the next poll time, send a poll to the port controller
else:
self._serialWriteQueue.put(self._connectionCommand)
# If we get here, it's time to shutdown the robot
return
class Magstim(object):
"""
The base Magstim class. This is used for controlling 200^2 Magstim units, and acts as a parent class for the BiStim^2 and Rapid^2 sub-classes.
It also creates two additional Python processes; one for the purposes of directly controlling the serial port and another for maintaining constant contact with the Magstim.
N.B. This class can effect limited control over BiStim^2 and Rapid^2 units, however some functionality will not be able to be accessed and return values (including confirmation of commands) may be invalid.
To begin sending commands to the Magstim, and start the additional Python processes, you must first call connect().
Args:
serialConnection (str): The address of the serial port. On Windows this is typically 'COM1' or similar. To create a virtual magstim, set the address to 'virtual'
"""
# Hardware error codes (for all types of stimulators)
INVALID_COMMAND_ERR = (3, 'INVALID_COMMAND_ERR: Invalid command sent.')
INVALID_DATA_ERR = (4, 'INVALID_DATA_ERR: Invalid data provided.')
COMMAND_CONFLICT_ERR = (5, 'COMMAND_CONFLICT_ERR: Command conflicts with current system configuration.')
INVALID_CONFIRMATION_ERR = (6, 'INVALID_CONFIRMATION_ERR: Unexpected command confirmation received.')
CRC_MISMATCH_ERR = (7, 'CRC_MISMATCH_ERR: Message contents and CRC value do not match.')
NO_REMOTE_CONTROL_ERR = (8, 'NO_REMOTE_CONTROL_ERR: You have not established control of the Magstim unit.')
PARAMETER_ACQUISTION_ERR = (9, 'PARAMETER_ACQUISTION_ERR: Could not obtain prior parameter settings.')
PARAMETER_UPDATE_ERR = (10, 'PARAMETER_UPDATE_ERR: Could not update secondary parameter to accommodate primary parameter change.')
PARAMETER_FLOAT_ERR = (11, 'PARAMETER_FLOAT_ERR: A float value is not allowed for this parameter.')
PARAMETER_PRECISION_ERR = (12, 'PARAMETER_PRECISION_ERR: Only one decimal placed allowed for this parameter.')
PARAMETER_RANGE_ERR = (13, 'PARAMETER_RANGE_ERR: Parameter value is outside the allowed range.')
GET_SYSTEM_STATUS_ERR = (14, 'GET_SYSTEM_STATUS_ERR: Cannot call getSystemStatus() until software version has been established.')
SYSTEM_STATUS_VERSION_ERR = (15, 'SYSTEM_STATUS_VERSION_ERR: Method getSystemStatus() is not compatible with your software version.')
SEQUENCE_VALIDATION_ERR = (16, 'SEQUENCE_VALIDATION_ERR: You must call validateSequence() before you can run a rTMS train.')
MIN_WAIT_TIME_ERR = (17, 'MIN_WAIT_TIME_ERR: Minimum wait time between trains violated. Call isReadyToFire() to check.')
MAX_ON_TIME_ERR = (18, 'MAX_ON_TIME_ERR: Maximum on time exceeded for current train.')
@staticmethod
def parseMagstimResponse(responseString, responseType):
"""Interprets responses sent from the Magstim unit."""
if responseType == 'version':
#magstimResponse = tuple(int(x) for x in bytes(responseString[1:-1]).strip().split(b'.') if x.isdigit())
magstimResponse = tuple(int(x) for x in ''.join([chr(x) for x in responseString[1:-1]]).strip().split('.') if x.isdigit())
else:
# Get ASCII code of first data character
temp = responseString.pop(0)
# Interpret bits
magstimResponse = {'instr':{'standby': temp & 1,
'armed': (temp >> 1) & 1,
'ready': (temp >> 2) & 1,
'coilPresent': (temp >> 3) & 1,
'replaceCoil': (temp >> 4) & 1,
'errorPresent': (temp >> 5) & 1,
'errorType': (temp >> 6) & 1,
'remoteStatus': (temp >> 7) & 1}}
# If a Rapid system and response includes rTMS status
if responseType in {'instrRapid','rapidParam','systemRapid'}:
# Get ASCII code of second data character
temp = responseString.pop(0)
# Interpret bits
magstimResponse['rapid'] = {'enhancedPowerMode': temp & 1,
'train': (temp >> 1) & 1,
'wait': (temp >> 2) & 1,
'singlePulseMode': (temp >> 3) & 1,
'hvpsuConnected': (temp >> 4) & 1,
'coilReady': (temp >> 5) & 1,
'thetaPSUDetected': (temp >> 6) & 1,
'modifiedCoilAlgorithm': (temp >> 7) & 1}
# If requesting parameter settings or coil temperature
if responseType == 'bistimParam':
magstimResponse['bistimParam'] = {'powerA': int(''.join(chr(x) for x in responseString[0:3])),
'powerB': int(''.join(chr(x) for x in responseString[3:6])),
'ppOffset': int(''.join(chr(x) for x in responseString[6:9]))}
elif responseType == 'magstimParam':
magstimResponse['magstimParam'] = {'power': int(''.join(chr(x) for x in responseString[:3]))}
elif responseType in 'rapidParam':
# This is a bit of a hack to determine which software version we're dealing with
if len(responseString) == 20:
magstimResponse['rapidParam'] = {'power': int(''.join(chr(x) for x in responseString[0:3])),
'frequency': int(''.join(chr(x) for x in responseString[3:7])) / 10.0,
'nPulses': int(''.join(chr(x) for x in responseString[7:12])),
'duration': int(''.join(chr(x) for x in responseString[12:16])) / 10.0,
'wait': int(''.join(chr(x) for x in responseString[16:])) / 10.0}
else:
magstimResponse['rapidParam'] = {'power': int(''.join(chr(x) for x in responseString[0:3])),
'frequency': int(''.join(chr(x) for x in responseString[3:7])) / 10.0,
'nPulses': int(''.join(chr(x) for x in responseString[7:11])),
'duration': int(''.join(chr(x) for x in responseString[11:14])) / 10.0,
'wait': int(''.join(chr(x) for x in responseString[14:])) / 10.0}
elif responseType == 'magstimTemp':
magstimResponse['magstimTemp'] = {'coil1Temp': int(''.join(chr(x) for x in responseString[0:3])) / 10.0,
'coil2Temp': int(''.join(chr(x) for x in responseString[3:6])) / 10.0}
elif responseType == 'systemRapid':
temp = responseString.pop(0)
magstimResponse['extInstr'] = {'plus1ModuleDetected': temp & 1,
'specialTriggerModeActive': (temp >> 1) & 1,
'chargeDelaySet': (temp >> 2) & 1}
elif responseType == 'error':
magstimResponse['currentErrorCode'] = ''.join(chr(x) for x in responseString[:-1])
elif responseType == 'instrCharge':
magstimResponse['chargeDelay'] = int(''.join(chr(x) for x in responseString))
return magstimResponse
def __init__(self, serialConnection):
self._sendQueue = Queue()
self._receiveQueue = Queue()
self._setupSerialPort(serialConnection)
self._robotQueue = Queue()
self._connection.daemon = True
self._robot = connectionRobot(self._sendQueue, self._robotQueue)
self._robot.daemon = True
self._connected = False
self._connectionCommand = (b'Q@n', None, 3)
self._queryCommand = partial(self.remoteControl, enable=True, receipt=True)
def _setupSerialPort(self, serialConnection):
if serialConnection.lower() == 'virtual':
from _virtual import virtualPortController
self._connection = virtualPortController(self.__class__.__name__,self._sendQueue,self._receiveQueue)
else:
self._connection = serialPortController(serialConnection, self._sendQueue, self._receiveQueue)
def connect(self):
"""
Connect to the Magstim.
This starts the serial port controller, as well as a process that constantly keeps in contact with the Magstim so as not to lose control.
"""
if not self._connected:
self._connection.start()
if not self.remoteControl(enable=True, receipt=True)[0]:
self._connected = True
self._robot._setCommand(self._connectionCommand)
self._robot.start()
else:
self._sendQueue.put((None, None, None))
if self._connection.is_alive():
self._connection.join()
raise MagstimError('Could not establish remote control over the Magstim.')
def disconnect(self):
"""
Disconnect from the Magstim.
This stops maintaining contact with the Magstim and turns the serial port controller off.
"""
if self._connected:
self.disarm()
self._robotQueue.put(-1)
self.remoteControl(enable=False, receipt=True)
self._robotQueue.put(None)
if self._robot.is_alive():
self._robot.join(timeout=2.0)
self._sendQueue.put((None, None, None))
if self._connection.is_alive():
self._connection.join(timeout=2.0)
self._connected = False
def _processCommand(self, commandString, receiptType, readBytes):
"""
Process Magstim command.
Args:
commandString (str): command and data characters making up the command string (N.B. do not include CRC character)
reciptType (bool): whether to return the occurrence of any error when executing the command and the automated response from the Magstim unit
readBytes (int): number of bytes in the response
Returns:
If receiptType argument is not None:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing one or more Magstim parameter dicts, otherwise returns an error string
If receiptType argument is None:
None
"""
# Unify Python 2 and 3 strings
commandString = bytearray(commandString)
# Only process command if toggling remote control, querying parameters, or disarming, or otherwise only if connected to the Magstim
# N.B. For Rapid stimulators, we first need to have established what version number we are (which sets _parameterReturnBytes) before we can query parameters
if self._connected or (commandString[0] in {81, 82, 74, 70}) or commandString == b'EA' or (commandString[0] == 92 and self._parameterReturnBytes is not None):
# Put command in the send queue to the serial port controller along with what kind of reply is requested and how many bytes to read back from the Magstim
self._sendQueue.put((bytes(commandString + calcCRC(commandString)), receiptType, readBytes))
# If expecting a response, start inspecting the receive queue back from the serial port controller
if receiptType is not None:
error, reply = self._receiveQueue.get()
# If error is true, that means we either couldn't send the command or didn't get anything back from the Magstim
if error:
return (error, reply)
# If we did get something back from the Magstim, parse the message and the return it
else:
# Check for error messages
if reply[0] == 63:
return Magstim.INVALID_COMMAND_ERR
elif reply[1] == 63:
return Magstim.INVALID_DATA_ERR
elif reply[1] == 83:
return Magstim.COMMAND_CONFLICT_ERR
elif reply[0] != commandString[0]:
return Magstim.INVALID_CONFIRMATION_ERR
elif ord(calcCRC(reply[:-1])) != reply[-1]:
return Magstim.CRC_MISMATCH_ERR
# If we haven't returned yet, we got a valid message; so update the connection robot if we're connected
if self._connected:
if commandString[:2] == b'EA':
self._robotQueue.put(1)
elif commandString[:2] == b'EB':
self._robotQueue.put(2)
else:
self._robotQueue.put(0)
# Then return the parsed response if requested
return (0, Magstim.parseMagstimResponse(list(reply[1:-1]), receiptType) if receiptType is not None else None)
else:
return Magstim.NO_REMOTE_CONTROL_ERR
def remoteControl(self, enable, receipt=False):
"""
Enable/Disable remote control of stimulator. Disabling remote control will first disarm the Magstim unit.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'Q@' if enable else b'R@', 'instr' if receipt else None, 3)
def getParameters(self):
"""
Request current parameter settings from the Magstim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Magstim instrument status ['instr'] and parameter setting ['magstimParam'] dicts, otherwise returns an error string
"""
return self._processCommand(b'J@', 'magstimParam', 12)
def setPower(self, newPower, receipt=False, delay=False, _commandByte=b'@'):
"""
Set power level for Magstim.
N.B. Allow 100 ms per unit drop in power, or 10 ms per unit increase in power.
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
delay (bool): enforce delay to allow Magstim time to change Power (defaults to False)
_commandByte should not be changed by the user
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
# Make sure we have a valid power value
if newPower % 1:
return Magstim.PARAMETER_FLOAT_ERR
elif not 0 <= newPower <= 100:
return Magstim.PARAMETER_RANGE_ERR
#If enforcing power change delay, grab current parameters
if delay:
error, priorPower = self.getParameters()
if error:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
# Switch keys depending on whether we're returning for a BiStim
if type(self).__name__ == 'BiStim':
priorPower = priorPower['bistimParam']['powerA'] if _commandByte == b'@' else priorPower['bistimParam']['powerB']
elif type(self).__name__ == 'Rapid':
priorPower = priorPower['rapidParam']['power']
else:
priorPower = priorPower['magstimParam']['power']
error, message = self._processCommand(_commandByte + bytearray(str(int(newPower)).zfill(3),encoding='ascii'), 'instr' if (receipt or delay) else None, 3)
# If we're meant to delay (and we were able to change the power), then enforce if prior power settings are available
if delay and not error:
if not error:
if newPower > priorPower:
sleep((newPower - priorPower) * 0.01)
else:
sleep((priorPower - newPower) * 0.1)
else:
return Magstim.PARAMETER_UPDATE_ERR
return (error, message) if receipt else None
def getTemperature(self):
"""
Request current coil temperature from the Magstim.
N.B. Coil1 and Coil2 refer to the separate windings in a single figure-8 coil connected to the Magstim.
Magstim units will automatically disarm (and cannot be armed) if the coil temperature exceeds 40 degrees celsius.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Magstim instrument status ['instr'] and coil temperature ['magstimTemp'] dicts, otherwise returns an error string
"""
return self._processCommand(b'F@', 'magstimTemp', 9)
def poke(self, silent=False):
"""
'Poke' the stimulator with an enable remote control command (only if currently connected).
This should be used prior to any time-senstive commands, such as triggering the magstim to coincide with stimulus presentation. Conservatively, around 40-50ms should
be enough time to allow for (~20ms if 'silently' poking). This needs to be done to ensure that the ongoing communication with the magstim to maintain remote control
does not interfere with the sent command. Note that this simply resets the timer controlling this ongoing communication (i.e., incrementing it a further 500 ms).
Args:
silent (bool): whether to bump polling robot but without sending enable remote control command (defaults to False)
"""
if silent and self._connected:
self._robotQueue.put(0)
else:
self._processCommand(*self._connectionCommand)
def arm(self, receipt=False, delay=False):
"""
Arm the stimulator.
N.B. You must allow at around 1 s for the stimulator to arm.
If you send an arm() command when the Magstim is already armed, you will receive an non-fatal error reply from the Magstim that the command conflicts with the current settings.
If the unit does not fire for more than 1 min while armed, it will disarm
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
delay (bool): enforce delay to allow Magstim time to arm (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
error, message = self._processCommand(b'EB', 'instr' if receipt else None, 3)
#Enforcing arming delay if requested
if delay:
sleep(1.1)
return (error, message)
def disarm(self, receipt=False):
"""
Disarm the stimulator.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'EA', 'instr' if receipt else None, 3)
def isArmed(self):
"""
Helper function that returns True if the Magstim is armed or ready, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return (bool(parameters['instr']['armed']) or bool(parameters['instr']['remoteStatus'])) if not error else False
def isUnderControl(self):
"""
Helper function that returns True if the Magstim is under remote control, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['instr']['remoteStatus']) if not error else False
def isReadyToFire(self):
"""
Helper function that returns True if the Magstim is ready to fire, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['instr']['ready']) if not error else False
def fire(self, receipt=False):
"""
Fire the stimulator.
N.B. Will only succeed if previously armed.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'EH', 'instr' if receipt else None, 3)
def resetQuickFire(self):
"""
Reset the RTS pin used for quick firing.
N.B. There must be a few ms between triggering QuickFire and reseting the pin.
"""
self._sendQueue.put((-1, None, 0))
def quickFire(self):
"""
Trigger the stimulator to fire with very low latency using the RTS pin and a custom serial connection.
"""
self._sendQueue.put((1, None, 0))
class BiStim(Magstim):
"""
This is a sub-class of the parent Magstim class used for controlling BiStim^2 Magstim units. It allows firing in either BiStim mode or Simultaneous Discharge mode.
To enable Simultaneous Discharge mode, you must change the pulseInterval parameter to 0 s (i.e., by calling: setPulseInterval(0)).
N.B. In BiStim mode, the maximum firing frequency is 0.25 Hz. In Simulatenous Discharge mode, the maximum frequency depends on the power level (0.25 - 0.5 Hz)
"""
def __init__(self, serialConnection):
super(BiStim, self).__init__(serialConnection)
self._highResolutionMode = False
def highResolutionMode(self, enable, receipt=False):
"""
Enable/Disable high resolution timing of interpulse interval.
When enabling high-resolution mode, the system will default to a 1ms interval.
When disabling high-resolution mode, the system will default to a 10ms interval.
N.B. This cannot be changed while the system is armed.
Args:
enable (bool): whether to enable (True) or disable (False) high-resolution mode
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error strin
If receipt argument is False:
None
"""
error,message = self._processCommand(b'Y@' if enable else b'Z@', 'instr' if receipt else None, 3)
if not error:
self._highResolutionMode = enable
return (error,message)
def getParameters(self):
"""
Request current coil temperature from the BiStim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing BiStim instrument status ['instr'] and parameter setting ['bistimParam'] dicts, otherwise returns an error string
"""
(error,message) = self._processCommand(b'J@', 'bistimParam', 12)
if not error and self._highResolutionMode:
message['bistimParam']['ppOffset'] /= 10.0
return (error,message)
def setPowerA(self, newPower, receipt=False, delay=False):
"""
Set power level for BiStim A.
N.B. Allow 100ms per unit drop in power, or 10ms per unit increase in power.
In BiStim mode, power output is actually 90% of a 200^2 unit's power output. In Simulatenous Discharge mode (pulseInterval = 0), power output is actually 113% of a 200^2 unit's power output
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
delay (bool): enforce delay to allow BiStim time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
#This is just an alias for the base magstim class method setPower
return super(BiStim, self).setPower(newPower, receipt=receipt, delay=delay, _commandByte=b'@')
def setPowerB(self, newPower, receipt=False, delay=False):
"""
Set power level for BiStim B.
N.B. Allow 100ms per unit drop in power, or 10ms per unit increase in power.
Power output is actually 90% of a 200^2 unit's power output.
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
delay (bool): enforce delay to allow BiStim time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
#This is just an alias for the base magstim class method setPower
return super(BiStim, self).setPower(newPower, receipt=receipt, delay=delay, _commandByte=b'A')
def setPulseInterval(self, newInterval, receipt=False):
"""
Set interpulse interval.
Args:
newInterval (int/float): new interpulse interval in milliseconds (Range low-resolution mode: 0-999; Range high-resolution mode: 0-99.9)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
# If we're in high resolution mode, then convert to tenths of a millisecond
if self._highResolutionMode:
newInterval = newInterval * 10
# Make sure we have a valid ipi value
if newInterval % 1:
return Magstim.PARAMETER_PRECISION_ERR if self._highResolutionMode else Magstim.PARAMETER_FLOAT_ERR
elif not (0 <= newInterval <= 999):
return Magstim.PARAMETER_RANGE_ERR
return self._processCommand(b'C' + bytearray(str(int(newInterval)).zfill(3),encoding='ascii'), 'instr' if receipt else None, 3)
class Rapid(Magstim):
"""
This is a sub-class of the parent Magstim class used for controlling Rapid^2 Magstim units. It allows firing in either single-pulse mode or rTMS mode.
In single-pulse mode, the maximum firing frequency is 1 Hz (0.5 Hz if enhanced-power mode is enabled and power is 100 - 110%).
To enable rTMS mode, you must first call rTMSMode(True). To disable rTMS mode, call rTMSMode(False).
N.B. In rTMS mode the maximum frequency allowed is dependent on the power level. Also, there is a dependent relationship between the Duration, NPulses, and Frequency parameter settings.
Therefore it is recommended either to seek confirmation of any change in settings or to evaluate allowable changes beforehand.
In addition, after each rTMS train there is an enforced delay (minimum 500 ms) before any subsequent train can be initiated or before any rTMS parameter settings can be altered.
"""
# Load settings file (resort to default values if not found)
__location__ = realpath(join(getcwd(), dirname(__file__)))
try:
with open(join(__location__, 'rapid_config.yaml')) as yaml_file:
config_data = load(yaml_file)
except:
DEFAULT_RAPID_TYPE = 0
DEFAULT_VOLTAGE = 240
DEFAULT_UNLOCK_CODE = ''
ENFORCE_ENERGY_SAFETY = True
DEFAULT_VIRTUAL_VERSION = (5,0,0)
else:
DEFAULT_RAPID_TYPE = config_data['defaultRapidType']
DEFAULT_VOLTAGE = config_data['defaultVoltage']
DEFAULT_UNLOCK_CODE = config_data['unlockCode']
ENFORCE_ENERGY_SAFETY = config_data['enforceEnergySafety']
DEFAULT_VIRTUAL_VERSION = literal_eval(config_data['virtualVersionNumber'])
# Load system info file
with open(join(__location__, 'rapid_system_info.yaml')) as yaml_file:
system_info = load(yaml_file)
# Maximum allowed rTMS frequency based on voltage and current power setting
MAX_FREQUENCY = system_info['maxFrequency']
# Minimum wait time (s) required for rTMS train. Power:Joules per pulse
JOULES = system_info['joules']
def getRapidMinWaitTime(power, nPulses, frequency):
""" Calculate minimum wait time between trains for given power, frequency, and number of pulses."""
return max(0.5, (nPulses * ((frequency * Rapid.JOULES[power]) - 1050.0)) / (1050.0 * frequency))
def getRapidMaxOnTime(power, frequency):
""" Calculate maximum train duration per minute for given power and frequency. If greater than 60 seconds, will allow for continuous operation for up to 6000 pulses."""
return 63000.0 / (frequency * Rapid.JOULES[power])
def getRapidMaxContinuousOperationFrequency(power):
""" Calculate maximum frequency that will allow for continuous operation (up to 6000 pulses)."""
return 1050.0 / Rapid.JOULES[power]
def __init__(self, serialConnection, superRapid=DEFAULT_RAPID_TYPE, unlockCode=DEFAULT_UNLOCK_CODE, voltage=DEFAULT_VOLTAGE, version=DEFAULT_VIRTUAL_VERSION):
self._super = superRapid
self._unlockCode = unlockCode
self._voltage = voltage
self._version = version if serialConnection.lower() == 'virtual' else (0,0,0)
super(Rapid, self).__init__(serialConnection)
# If an unlock code has been supplied, then the Rapid requires a different command to stay in contact with it.
if self._unlockCode:
self._connectionCommand = (b'x@G', None, 6)
self._queryCommand = self.getSystemStatus
self._parameterReturnBytes = None
self._sequenceValidated = False
self._repetitiveMode = False
def _setupSerialPort(self, serialConnection):
if serialConnection.lower() == 'virtual':
from _virtual import virtualPortController
self._connection = virtualPortController(self.__class__.__name__,self._sendQueue,self._receiveQueue,superRapid=self._super,unlockCode=self._unlockCode,voltage=self._voltage,version=self._version)
else:
self._connection = serialPortController(serialConnection, self._sendQueue, self._receiveQueue)
def getVersion(self):
"""
Get Magstim software version number. This is needed when obtaining parameters from the Magstim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (tuple): if error is 0 (False) returns a tuple containing the version number (in (Major,Minor,Patch) format), otherwise returns an error string
"""
error, message = self._processCommand(b'ND', 'version', None)
#If we didn't receive an error, update the version number and the number of bytes that will be returned by a getParameters() command
if not error:
self._version = message
if self._version >= (9,):
self._parameterReturnBytes = 24
elif self._version >= (7,):
self._parameterReturnBytes = 22
else:
self._parameterReturnBytes = 21
return (error,message)
def getErrorCode(self):
"""
Get current error code from Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and current error code ['errorCode'] dicts, otherwise returns an error string
"""
return self._processCommand(b'I@', 'error', 6)
def connect(self, receipt=False):
"""
Connect to the Rapid.
This starts the serial port controller, as well as a process that constantly keeps in contact with the Rapid so as not to lose control.
It also collects the software version number of the Rapid in order to send the correct command for obtaining parameter settings.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (str): if error is 0 (False) returns a string containing the version number (in (X,X,X) format), otherwise returns an error string
"""
super(Rapid,self).connect()
# We have to be able to determine the software version of the Rapid, otherwise we won't be able to communicate properly
error, message = self.getVersion()
if error:
self.disconnect()
raise MagstimError('Could not determine software version of Rapid. Disconnecting.')
def disconnect(self):
"""
Disconnect from the Magstim.
This stops maintaining contact with the Magstim and turns the serial port controller off.
"""
#Just some housekeeping before we call the base magstim class method disconnect
self._sequenceValidated = False
self._repetitiveMode = False
return super(Rapid, self).disconnect()
def rTMSMode(self, enable, receipt=False):
"""
This is a helper function to enable/disable rTMS mode.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Get current parameters
updateError,currentParameters = self.getParameters()
if updateError:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
# See if Rapid already in rTMS mode (if enabling) or already in single-pulse mode (if disabling)
if (not currentParameters['rapid']['singlePulseMode'] and enable) or (currentParameters['rapid']['singlePulseMode'] and not enable):
del currentParameters['rapidParam']
return (0,currentParameters) if receipt else None
# Durations of 1 or 0 are used to toggle repetitive mode on and off
if self._version >= (9,):
commandString = b'[0010' if enable else b'[0000'
else:
commandString = b'[010' if enable else b'[000'
error,message = self._processCommand(commandString, 'instrRapid', 4)
if not error:
if enable:
self._repetitiveMode = True
updateError,currentParameters = self.getParameters()
if not updateError:
if currentParameters['rapidParam']['frequency'] == 0:
updateError,currentParameters = self._processCommand(b'B0010', 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
self._repetitiveMode = False
return (error,message) if receipt else None
def ignoreCoilSafetySwitch(self, receipt=False):
"""
This allows the stimulator to ignore the state of coil safety interlock switch.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'b@', 'instr' if receipt else None, 3)
def remoteControl(self, enable, receipt=False):
"""
Enable/Disable remote control of stimulator. Disabling remote control will first disarm the Magstim unit.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
if self._unlockCode:
return self._processCommand(b'Q' + bytearray(self._unlockCode,encoding='latin_1') if enable else b'R@', 'instr' if receipt else None, 3)
else:
return self._processCommand(b'Q@' if enable else b'R@', 'instr' if receipt else None, 3)
def enhancedPowerMode(self, enable, receipt=False):
"""
Enable/Disable enhanced power mode; allowing intensity to be set to 110%.
N.B. This can only be enabled in single-pulse mode, and lowers the maximum firing frequency to 0.5 Hz.
Disabling will automatically reduce intensity to 100% if over
Args:
enable (bool): whether to enable (True) or disable (False) enhanced-power mode
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'^@' if enable else b'_@', 'instrRapid' if receipt else None, 4)
def isEnhanced(self):
"""
Helper function that returns True if the Rapid is in enhanced power mode, False if not if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['rapid']['enhancedPowerMode']) if not error else False
def setFrequency(self, newFrequency, receipt=False):
"""
Set frequency of rTMS pulse train.
N.B. Changing the Frequency will automatically update the NPulses parameter based on the current Duration parameter setting.
The maximum frequency allowed depends on the current Power level and the regional power settings (i.e., 115V vs. 240V)
Args:
newFrequency (int/float): new frequency of pulse train in Hertz (0-100 for 240V systems, 0-60 for 115V systems); decimal values are allowed for frequencies up to 30Hz
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Convert to tenths of a Hz
newFrequency = newFrequency * 10
# Make sure we have a valid frequency value
if newFrequency % 1:
return Magstim.PARAMETER_PRECISION_ERR
updateError,currentParameters = self.getParameters()
if updateError:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
maxFrequency = Rapid.MAX_FREQUENCY[self._voltage][self._super][currentParameters['rapidParam']['power']] * 10
if not (0 <= newFrequency <= maxFrequency):
return Magstim.PARAMETER_RANGE_ERR
#Send command
error, message = self._processCommand(b'B' + bytearray(str(int(newFrequency)).zfill(4),encoding='ascii'), 'instrRapid', 4)
#If we didn't get an error, update the other parameters accordingly
if not error:
updateError,currentParameters = self.getParameters()
if not updateError:
updateError,currentParameters = self._processCommand(b'D' + bytearray(str(int(currentParameters['rapidParam']['duration'] * currentParameters['rapidParam']['frequency'])).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def setNPulses(self, newNPulses, receipt=False):
"""
Set number of pulses in rTMS pulse train.
N.B. Changing the NPulses parameter will automatically update the Duration parameter (this cannot exceed 10 s) based on the current Frequency parameter setting.
Args:
newNPulses (int): new number of pulses (Version 9+: 1-6000; Version 7+: ?; Version 5+: 1-1000?)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Make sure we have a valid number of pulses value
if newNPulses % 1:
return Magstim.PARAMETER_FLOAT_ERR
if not (0 <= newNPulses <= 6000):
return Magstim.PARAMETER_RANGE_ERR
#Send command
error, message = self._processCommand(b'D' + bytearray(str(int(newNPulses)).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
#If we didn't get an error, update the other parameters accordingly
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
updateError, currentParameters = self._processCommand(b'[' + bytearray(str(int(currentParameters['rapidParam']['nPulses'] / currentParameters['rapidParam']['frequency'])).zfill(4 if self._version >= (9,) else 3),encoding='ascii'), 'instrRapid' if receipt else None, 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def setDuration(self, newDuration, receipt=False):
"""
Set duration of rTMS pulse train.
N.B. Changing the Duration parameter will automatically update the NPulses parameter based on the current Frequency parameter setting.
Args:
newDuration (int/float): new duration of pulse train in seconds (Version 9+: 1-600; Version 7+: ?; Version 5+: 1-10?); decimal values are allowed for durations up to 30s
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Convert to tenths of a second
newDuration = newDuration * 10
# Make sure we have a valid duration value
if newDuration % 1:
return Magstim.PARAMETER_PRECISION_ERR
elif not (0 <= newDuration <= (999 if self._version < (9,) else 9999)):
return Magstim.PARAMETER_RANGE_ERR
error, message = self._processCommand(b'[' + bytearray(str(int(newDuration)).zfill(4 if self._version >= (9,) else 3),encoding='ascii'), 'instrRapid', 4)
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
updateError, currentParameters = self._processCommand(b'D' + bytearray(str(int(currentParameters['rapidParam']['duration'] * currentParameters['rapidParam']['frequency'])).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def getParameters(self):
"""
Request current parameter settings from the Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'], rMTS setting ['rapid'], and parameter setting ['rapidParam'] dicts, otherwise returns an error string
"""
return self._processCommand(b'\\@', 'rapidParam', self._parameterReturnBytes)
def setPower(self, newPower, receipt=False, delay=False):
"""
Set power level for the Rapid.
N.B. Allow 100 ms per unit drop in power, or 10 ms per unit increase in power.
Changing the power level can result in automatic updating of the Frequency parameter (if in rTMS mode)
Args:
newPower (int): new power level (0-100; or 0-110 if enhanced-power mode is enabled)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
delay (bool): enforce delay to allow Rapid time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Check current enhanced power status
if self.isEnhanced():
maxPower = 110
else:
maxPower = 100
# Make sure we have a valid power value
if newPower % 1:
return Magstim.PARAMETER_FLOAT_ERR
elif not 0 <= newPower <= maxPower:
return Magstim.PARAMETER_RANGE_ERR
error, message = super(Rapid,self).setPower(newPower,True,delay,b'@')
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
if not currentParameters['rapid']['singlePulseMode']:
maxFrequency = Rapid.MAX_FREQUENCY[self._voltage][self._super][currentParameters['rapidParam']['power']]
if currentParameters['rapidParam']['frequency'] > maxFrequency:
if not self.setFrequency(maxFrequency)[0]:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error,message) if receipt else None
def setChargeDelay(self, newDelay, receipt=False):
"""
Set charge delay duration for the Rapid.
Args:
newDelay (int): new delay duration in seconds (Version 10+: 1-10000; Version 9: 1-2000)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version < (9,):
return Magstim.SYSTEM_STATUS_VERSION_ERR
self._sequenceValidated = False
#Make sure we have a valid delay duration value
if newDelay % 1:
return Magstim.PARAMETER_FLOAT_ERR
error, message = self._processCommand(b'n' + bytearray(str(int(newDelay)).zfill(5 if self._version >= (10,) else 4),encoding='ascii'), 'systemRapid' if self._version >= (10,) else 'instrRapid', 6 if self._version >= (10,) else 4)
return (error,message) if receipt else None
def getChargeDelay(self):
"""
Get current charge delay duration for the Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict and charge delay duration ['chargeDelay'] value, otherwise returns an error string
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version < (9,):
return Magstim.SYSTEM_STATUS_VERSION_ERR
return self._processCommand(b'o@', 'instrCharge', 8 if self._version > (9,) else 7)
def fire(self, receipt=False):
"""
Fire the stimulator. This overrides the base Magstim method in order to check whether rTMS mode is active, and if so whether the sequence has been validated and the min wait time between trains has elapsed
N.B. Will only succeed if previously armed.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
if self._repetitiveMode and Rapid.ENFORCE_ENERGY_SAFETY and not self._sequenceValidated:
return Magstim.SEQUENCE_VALIDATION_ERR
else:
return super(Rapid,self).fire(receipt)
def quickFire(self):
"""
Trigger the stimulator to fire with very low latency using the RTS pin and a custom serial connection.
"""
if self._repetitiveMode and Rapid.ENFORCE_ENERGY_SAFETY and not self._sequenceValidated:
return Magstim.SEQUENCE_VALIDATION_ERR
else:
super(Rapid,self).quickFire()
def validateSequence(self):
"""
Validate the energy consumption for the current rTMS parameters for the Rapid.
This must be performed before running any new sequence, otherwise calling fire() will return an error.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns current Rapid parameters, otherwise returns an error string
"""
self._sequenceValidated = False
error,parameters = self.getParameters()
if error:
return Magstim.PARAMETER_ACQUISTION_ERR
elif min(parameters['rapidParam']['duration'], 60) > Rapid.getRapidMaxOnTime(parameters['rapidParam']['power'], parameters['rapidParam']['frequency']):
return Magstim.MAX_ON_TIME_ERR
else:
self._sequenceValidated = True
return (0, parameters)
def getSystemStatus(self):
"""
Get system status from the Rapid. Available only on software version of 9 or later.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'], rMTS setting ['rapid'], and extended instrument status ['extInstr'] dicts, otherwise returns an error string
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version >= (9,):
return self._processCommand(b'x@', 'systemRapid', 6)
else:
return Magstim.SYSTEM_STATUS_VERSION_ERR
|
nicolasmcnair/magpy
|
magpy/magstim.py
|
Python
|
gpl-3.0
| 69,520
|
#Multiples of 3 and 5
#Problem 1
#
#If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
sum = 0
for i in range(1,1000):
if i%3 == 0 or i%5 == 0:
sum += i
print sum
|
paulmcquad/projecteuler
|
0-100/problem1.py
|
Python
|
gpl-3.0
| 321
|
# -*- coding: utf-8 -*-
"""
@file route.py
@author Michael Behrisch
@author Lena Kalleske
@date 2008-10-09
@version $Id: route.py 13811 2013-05-01 20:31:43Z behrisch $
Python implementation of the TraCI interface.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import traci, struct
import traci.constants as tc
_RETURN_VALUE_FUNC = {tc.ID_LIST: traci.Storage.readStringList,
tc.VAR_EDGES: traci.Storage.readStringList}
subscriptionResults = traci.SubscriptionResults(_RETURN_VALUE_FUNC)
def _getUniversal(varID, routeID):
result = traci._sendReadOneStringCmd(tc.CMD_GET_ROUTE_VARIABLE, varID, routeID)
return _RETURN_VALUE_FUNC[varID](result)
def getIDList():
"""getIDList() -> list(string)
Returns a list of all routes in the network.
"""
return _getUniversal(tc.ID_LIST, "")
def getEdges(routeID):
"""getEdges(string) -> list(string)
Returns a list of all edges in the route.
"""
return _getUniversal(tc.VAR_EDGES, routeID)
def subscribe(routeID, varIDs=(tc.ID_LIST,), begin=0, end=2**31-1):
"""subscribe(string, list(integer), double, double) -> None
Subscribe to one or more route values for the given interval.
A call to this method clears all previous subscription results.
"""
subscriptionResults.reset()
traci._subscribe(tc.CMD_SUBSCRIBE_ROUTE_VARIABLE, begin, end, routeID, varIDs)
def getSubscriptionResults(routeID=None):
"""getSubscriptionResults(string) -> dict(integer: <value_type>)
Returns the subscription results for the last time step and the given route.
If no route id is given, all subscription results are returned in a dict.
If the route id is unknown or the subscription did for any reason return no data,
'None' is returned.
It is not possible to retrieve older subscription results than the ones
from the last time step.
"""
return subscriptionResults.get(routeID)
def subscribeContext(routeID, domain, dist, varIDs=(tc.ID_LIST,), begin=0, end=2**31-1):
subscriptionResults.reset()
traci._subscribeContext(tc.CMD_SUBSCRIBE_ROUTE_CONTEXT, begin, end, routeID, domain, dist, varIDs)
def getContextSubscriptionResults(routeID=None):
return subscriptionResults.getContext(routeID)
def add(routeID, edges):
traci._beginMessage(tc.CMD_SET_ROUTE_VARIABLE, tc.ADD, routeID,
1+4+sum(map(len, edges))+4*len(edges))
traci._message.string += struct.pack("!Bi", tc.TYPE_STRINGLIST, len(edges))
for e in edges:
traci._message.string += struct.pack("!i", len(e)) + e
traci._sendExact()
|
rudhir-upretee/Sumo17_With_Netsim
|
tools/traci/route.py
|
Python
|
gpl-3.0
| 2,735
|
# -*- coding: utf-8 -*-
from docpool.config.utils import CHILDREN
from docpool.config.utils import createPloneObjects
from docpool.config.utils import ID
from docpool.config.utils import TITLE
from docpool.config.utils import TYPE
from plone.app.dexterity.behaviors.exclfromnav import IExcludeFromNavigation
from Products.CMFCore.utils import getToolByName
from Products.PortalTransforms.Transform import make_config_persistent
from Products.PythonScripts.PythonScript import PythonScript
import transaction
def install(self):
"""
"""
fresh = True
if self.hasObject("config"):
fresh = False # It's a reinstall
configUserFolders(self, fresh)
createStructure(self, fresh)
navSettings(self)
createGroups(self)
configureFiltering(self)
setFrontpage(self)
# Further base structures
ADMINSTRUCTURE = [
{
TYPE: 'DPConfig',
TITLE: u'Globale Konfiguration',
ID: 'config',
CHILDREN: [
{
TYPE: 'DocTypes',
TITLE: u'Globale Dokumenttypen',
ID: 'dtypes',
CHILDREN: [],
}
],
}
]
# Configuration methods
def configUserFolders(self, fresh):
"""
"""
# Turn creation of user folders on
# from plone.app.controlpanel.security import ISecuritySchema
# Fetch the adapter
from Products.CMFPlone.interfaces.controlpanel import ISecuritySchema
security_adapter = ISecuritySchema(self)
security_adapter.set_enable_user_folders(True)
# Set type for user folders
mtool = getToolByName(self, "portal_membership")
mtool.setMemberAreaType("UserFolder")
if fresh:
mtool.addMember(
'dpadmin',
'Docpool Administrator (global)',
['Site Administrator', 'Member'],
[],
)
dpadmin = mtool.getMemberById('dpadmin')
dpadmin.setMemberProperties({"fullname": 'Docpool Administrator'})
dpadmin.setSecurityProfile(password="admin")
mtool.addMember(
'dpmanager', 'Docpool Manager (global)', ['Manager', 'Member'], []
)
dpmanager = mtool.getMemberById('dpmanager')
dpmanager.setMemberProperties({"fullname": 'Docpool Manager'})
dpmanager.setSecurityProfile(password="admin")
def navSettings(self):
IExcludeFromNavigation(self.news).exclude_from_nav = True
self.news.reindexObject()
IExcludeFromNavigation(self.events).exclude_from_nav = True
self.events.reindexObject()
IExcludeFromNavigation(self.Members).exclude_from_nav = True
self.Members.reindexObject()
def createStructure(self, fresh):
transaction.commit()
createAdminstructure(self, fresh)
transaction.commit()
def createAdminstructure(plonesite, fresh):
"""
"""
createPloneObjects(plonesite, ADMINSTRUCTURE, fresh)
def setFrontpage(self):
"""
"""
script_name = 'redirect'
if script_name not in self.keys():
self._setObject(script_name, PythonScript(script_name))
ps = self._getOb(script_name)
ps.write(
"""
if not context.isAdmin():
container.REQUEST.RESPONSE.redirect(context.myFirstDocumentPool())
else:
container.REQUEST.RESPONSE.redirect(context.absolute_url() + "/folder_contents")
"""
)
self.setLayout(script_name)
def configureFiltering(self):
"""
"""
tid = 'safe_html'
pt = getToolByName(self, 'portal_transforms')
if not tid in pt.objectIds():
return
trans = pt[tid]
tconfig = trans._config
tconfig['class_blacklist'] = []
tconfig['nasty_tags'] = {'meta': '1'}
tconfig['remove_javascript'] = 0
tconfig['stripped_attributes'] = [
'lang',
'valign',
'halign',
'border',
'frame',
'rules',
'cellspacing',
'cellpadding',
'bgcolor',
]
tconfig['stripped_combinations'] = {}
tconfig['style_whitelist'] = [
'text-align',
'list-style-type',
'float',
'width',
'height',
'padding-left',
'padding-right',
] # allow specific styles for
tconfig['valid_tags'] = {
'code': '1',
'meter': '1',
'tbody': '1',
'style': '1',
'img': '0',
'title': '1',
'tt': '1',
'tr': '1',
'param': '1',
'li': '1',
'source': '1',
'tfoot': '1',
'th': '1',
'td': '1',
'dl': '1',
'blockquote': '1',
'big': '1',
'dd': '1',
'kbd': '1',
'dt': '1',
'p': '1',
'small': '1',
'output': '1',
'div': '1',
'em': '1',
'datalist': '1',
'hgroup': '1',
'video': '1',
'rt': '1',
'canvas': '1',
'rp': '1',
'sub': '1',
'bdo': '1',
'sup': '1',
'progress': '1',
'body': '1',
'acronym': '1',
'base': '0',
'br': '0',
'address': '1',
'article': '1',
'strong': '1',
'ol': '1',
'script': '1',
'caption': '1',
'dialog': '1',
'col': '1',
'h2': '1',
'h3': '1',
'h1': '1',
'h6': '1',
'h4': '1',
'h5': '1',
'header': '1',
'table': '1',
'span': '1',
'area': '0',
'mark': '1',
'dfn': '1',
'var': '1',
'cite': '1',
'thead': '1',
'head': '1',
'hr': '0',
'link': '1',
'ruby': '1',
'b': '1',
'colgroup': '1',
'keygen': '1',
'ul': '1',
'del': '1',
'iframe': '1',
'embed': '1',
'pre': '1',
'figure': '1',
'ins': '1',
'aside': '1',
'html': '1',
'nav': '1',
'details': '1',
'u': '1',
'samp': '1',
'map': '1',
'object': '1',
'a': '1',
'footer': '1',
'i': '1',
'q': '1',
'command': '1',
'time': '1',
'audio': '1',
'section': '1',
'abbr': '1',
'strike': '1',
}
make_config_persistent(tconfig)
trans._p_changed = True
trans.reload()
def createGroups(self):
gdata = getToolByName(self, 'portal_groupdata')
try:
gdata.manage_addProperty(
"allowedDocTypes", "possibleDocTypes", "multiple selection"
)
except BaseException:
pass
configureGroups(self)
try:
gdata.manage_addProperty("dp", "possibleDocumentPools", "selection")
except BaseException:
pass
def configureGroups(self):
gdata = getToolByName(self, 'portal_groupdata')
prop = gdata.propdict().get("allowedDocTypes")
if prop is not None:
prop["label"] = u"Allowed document types"
gdata._p_changed = True
|
OpenBfS/dokpool-plone
|
Plone/src/docpool.config/docpool/config/general/base.py
|
Python
|
gpl-3.0
| 6,876
|
# the following line needed for unicode character in convert_anglestring
# -*- coding: latin-1 -*-
import ast
import constants as c
import copy
import datetime
import dateutil
import logging
import math
import meteorologicalfunctions as mf
import netCDF4
import numpy
import os
import platform
import pytz
import sys
import time
import Tkinter,tkSimpleDialog
import xlrd
import xlwt
log = logging.getLogger('qc.utils')
def bp(fx,tao):
"""
Function to calculate the b and p coeficients of the Massman frequency correction.
"""
bp = 2 * c.Pi * fx * tao
return bp
def cfkeycheck(cf,Base='Variables',ThisOne=[],key=[]):
if len(ThisOne) == 0:
return
if len(key) == 0:
if Base in cf.keys() and ThisOne in cf[Base].keys():
return ThisOne in cf[Base].keys()
else:
return
else:
if Base in cf.keys() and ThisOne in cf[Base].keys():
return key in cf[Base][ThisOne].keys()
else:
return
def cfoptionskeylogical(cf,Key='',default=False):
if 'Options' in cf:
if Key in cf['Options']:
returnValue = cf.get('Options').as_bool(Key)
#if str(cf['Options'][Key]).lower()=="true" or str(cf['Options'][Key]).lower()=="yes":
#returnValue = True
#else:
#returnValue = False
else:
returnValue = default
else:
returnValue = default
return returnValue
#def CheckQCFlags(ds):
#"""
#Purpose:
#Make sure that all values of -9999 in a data series have a non-zero QC flag value.
#Usage:
#qcutils.CheckQCFlags(ds)
#Author: PRI
#Date: August 2014
#"""
#for ThisOne in ds.series.keys():
#data = numpy.ma.masked_values(ds.series[ThisOne]["Data"],-9999)
#flag = numpy.ma.masked_equal(ds.series[ThisOne]["Flag"],0)
#mask = data.mask&flag.mask
#index = numpy.ma.where(mask==True)[0]
#ds.series[ThisOne]["Flag"][index] = numpy.int32(8)
def CheckQCFlags(ds):
"""
Purpose:
Make sure that all values of -9999 in a data series have a non-zero QC flag value.
Usage:
qcutils.CheckQCFlags(ds)
Author: PRI
Date: August 2014
"""
# force any values of -9999 with QC flags of 0 to have a QC flag of 8
for ThisOne in ds.series.keys():
data = numpy.ma.masked_values(ds.series[ThisOne]["Data"],-9999)
flag = numpy.ma.masked_equal(numpy.mod(ds.series[ThisOne]["Flag"],10),0)
mask = data.mask&flag.mask
index = numpy.ma.where(mask==True)[0]
ds.series[ThisOne]["Flag"][index] = numpy.int32(8)
# force all values != -9999 to have QC flag = 0, 10, 20 etc
for ThisOne in ds.series.keys():
index = numpy.where((abs(ds.series[ThisOne]['Data']-numpy.float64(c.missing_value))>c.eps)&
(numpy.mod(ds.series[ThisOne]["Flag"],10)!=0))
ds.series[ThisOne]["Flag"][index] = numpy.int32(0)
def CheckTimeStep(ds):
"""
Purpose:
Checks the datetime series in the data structure ds to see if there are
any missing time stamps.
This function returns a logical variable that is true if any gaps exist
in the time stamp.
Useage:
has_gaps = CheckTimeSTep(ds)
if has_gaps:
<do something about missing time stamps>
Author: PRI
Date: April 2013
"""
# set the has_gaps logical
has_gaps = False
# get the number of records
nRecs = int(ds.globalattributes["nc_nrecs"])
# get the time step
ts = int(ds.globalattributes["time_step"])
# time step between records in seconds
dt = get_timestep(ds)
# indices of elements where time step not equal to default
index = numpy.where(dt!=ts*60)[0]
# check to see if ww have any time step problems
if len(index)!=0:
has_gaps = True
log.warning(" CheckTimeStep: "+str(len(index))+" problems found with the time stamp")
return has_gaps
def CheckUnits(ds,label,units,convert_units=False):
"""
Purpose:
General units checking and conversion.
Usage:
qcutils.CheckUnits(ds,label,units,convert_units=True)
where ds is a data structure
label (string) is the label of the series for which the units
are to be checked
units (string) is the required units
convert_units (logical, optional) is True to force conversion to
required units
Author: PRI
Date: January 2016
"""
data,flag,attr = GetSeriesasMA(ds,label)
if attr["units"]==units: return
if convert_units:
msg = " Units for "+label+" converted from "+attr["units"]+" to "+units
log.info(msg)
new_data = convert_units_func(ds,data,attr["units"],units)
attr["units"] = units
CreateSeries(ds,label,new_data,Flag=flag,Attr=attr)
else:
msg = " Units mismatch but conversion disabled"
log.error(msg)
sys.exit()
def contiguous_regions(condition):
"""
Purpose:
Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index.
Author: Joe Kington (via StackOverflow)
Date: September 2014
"""
# Find the indicies of changes in "condition"
d = numpy.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = numpy.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = numpy.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
return idx
def ConvertCO2Units(cf,ds,Cc='Cc'):
Cc_units_out = "mg/m3" # default value
Cc_units_in = ds.series[Cc]['Attr']['units']
if 'Options' in cf:
if 'CO2Units' in cf['Options']:
Cc_units_out = str(cf['Options']['CO2Units'])
if Cc_units_out!=Cc_units_in:
log.info(' Converting CO2 concentration from '+Cc_units_in+' to '+Cc_units_out)
if Cc_units_out=="umol/mol" and Cc_units_in=="mg/m3":
c_mgpm3,flag,attr = GetSeriesasMA(ds,Cc)
T,f,a = GetSeriesasMA(ds,'Ta')
p,f,a = GetSeriesasMA(ds,'ps')
c_ppm = mf.co2_ppmfrommgpm3(c_mgpm3,T,p)
attr["long_name"] = attr["long_name"]+", converted to umol/mol"
attr["units"] = Cc_units_out
attr["standard_name"] = "mole_concentration_of_carbon_dioxide_in_air"
CreateSeries(ds,Cc,c_ppm,Flag=flag,Attr=attr)
elif Cc_units_out=="mg/m3" and Cc_units_in=="umol/mol":
c_ppm,flag,attr = GetSeriesasMA(ds,Cc)
T,f,a = GetSeriesasMA(ds,'Ta')
p,f,a = GetSeriesasMA(ds,'ps')
c_mgpm3 = mf.co2_mgpm3fromppm(c_ppm,T,p)
attr["long_name"] = attr["long_name"]+", converted to mg/m3"
attr["units"] = Cc_units_out
attr["standard_name"] = "mass_concentration_of_carbon_dioxide_in_air"
CreateSeries(ds,Cc,c_mgpm3,Flag=flag,Attr=attr)
else:
log.info(' ConvertCO2Units: input or output units for CO2 concentration not recognised')
else:
log.info(" CO2 concentration already in requested units")
def ConvertFcUnits(cf,ds,Fc='Fc',Fc_storage='Fc_storage'):
if 'Options' not in cf: return
if 'FcUnits' not in cf['Options']: return
# the user may want to change the units of Fc and Fc_storage
Fc_units_out = str(cf['Options']['FcUnits'])
# convert units of Fc if required
if Fc in ds.series.keys():
Fc_units_in = ds.series[Fc]['Attr']['units']
if Fc_units_out!=Fc_units_in:
log.info(' Converting CO2 flux from '+Fc_units_in+' to '+Fc_units_out)
if Fc_units_out=="umol/m2/s" and Fc_units_in=="mg/m2/s":
Fc_mgpm2ps,flag,attr = GetSeriesasMA(ds,Fc)
Fc_umolpm2ps = mf.Fc_umolpm2psfrommgpm2ps(Fc_mgpm2ps)
attr["long_name"] = attr["long_name"]+", converted to umol/m2/s"
attr["units"] = Fc_units_out
attr["standard_name"] = "surface_upward_mole_flux_of_carbon_dioxide"
CreateSeries(ds,Fc,Fc_umolpm2ps,Flag=flag,Attr=attr)
elif Fc_units_out=="mg/m2/s" and Fc_units_in=="umol/m2/s":
Fc_umolpm2ps,f,a = GetSeriesasMA(ds,Fc)
Fc_mgpm2ps = mf.Fc_mgpm2psfromumolpm2ps(Fc_umolpm2ps)
attr["long_name"] = attr["long_name"]+', converted to mg/m2/s'
attr["units"] = Fc_units_out
attr["standard_name"] = "not defined"
CreateSeries(ds,Fc,Fc_mgpm2ps,Flag=flag,Attr=attr)
else:
log.info(' ConvertFcUnits: input or output units for Fc unrecognised')
# convert units of Fc_storage if required, just go with boiler plate for now
if Fc_storage in ds.series.keys():
Fc_storage_units_in = ds.series[Fc_storage]['Attr']['units']
if Fc_units_out!=Fc_storage_units_in:
log.info(' Converting CO2 storage flux from '+Fc_storage_units_in+' to '+Fc_units_out)
if Fc_units_out=="umol/m2/s" and Fc_storage_units_in=="mg/m2/s":
Fc_storage_mgpm2ps,flag,attr = GetSeriesasMA(ds,Fc_storage)
Fc_storage_umolpm2ps = mf.Fc_umolpm2psfrommgpm2ps(Fc_storage_mgpm2ps)
attr["long_name"] = attr["long_name"]+", converted to umol/m2/s"
attr["units"] = Fc_units_out
CreateSeries(ds,Fc_storage,Fc_storage_umolpm2ps,Flag=flag,Attr=attr)
elif Fc_units_out=="mg/m2/s" and Fc_storage_units_in=="umol/m2/s":
Fc_storage_umolpm2ps,f,a = GetSeriesasMA(ds,Fc_storage)
Fc_storage_mgpm2ps = mf.Fc_mgpm2psfromumolpm2ps(Fc_storage_umolpm2ps)
attr["long_name"] = attr["long_name"]+", converted to mg/m2/s"
attr["units"] = Fc_units_out
CreateSeries(ds,Fc_storage,Fc_storage_mgpm2ps,Flag=flag,Attr=attr)
else:
log.info(' ConvertFcUnits: input or output units for Fc_storage unrecognised')
def convert_units_func(ds,old_data,old_units,new_units,mode="quiet"):
"""
Purpose:
Generic routine for changing units.
Nothing is done if the original units are the same as the requested units.
Usage:
new_data = qcutils.convert_units_func(old_data,old_units,new_units)
where old_data is a 1D array of data in the original units
old_units are the units of the original data
new_units are the units of the new data
ts is the time step
Author: PRI
Date: July 2015
"""
if old_units==new_units: return
# check the units are something we understand
# add more lists here to cope with water etc
co2_list = ["umol/m2/s","gC/m2","mg/m3","mgCO2/m3","umol/mol","mg/m2/s","mgCO2/m2/s"]
h2o_list = ["g/m3","mmol/mol","%","frac"]
t_list = ["C","K"]
# h2o_list = ["%","frac","g/m3","kg/kg","mmol/mol"]
ok_list = co2_list+h2o_list+t_list
# parse the original units
#if old_units=="umol/m^2/s": old_units="umol/m2/s"
#if old_units.replace(" ","")=="umolm-2s-1": old_units="umol/m2/s"
if old_units not in ok_list:
msg = " Unrecognised units in quantity provided ("+old_units+")"
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
elif new_units not in ok_list:
msg = " Unrecognised units requested ("+new_units+")"
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
elif new_units in co2_list:
if old_units in co2_list:
new_data = convert_units_co2(ds,old_data,old_units,new_units)
else:
msg = " New units ("+new_units+") not compatible with old ("+old_units+")"
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
elif new_units in h2o_list:
if old_units in h2o_list:
new_data = convert_units_h2o(ds,old_data,old_units,new_units)
else:
msg = " New units ("+new_units+") not compatible with old ("+old_units+")"
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
elif new_units in t_list:
if old_units in t_list:
new_data = convert_units_t(ds,old_data,old_units,new_units)
else:
msg = " New units ("+new_units+") not compatible with old ("+old_units+")"
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
else:
msg = "Unrecognised units combination "+old_units+" and "+new_units
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
return new_data
def convert_units_co2(ds,old_data,old_units,new_units):
"""
Purpose:
General purpose routine to convert from one set of CO2 concentration units
to another.
Conversions supported are:
umol/m2/s to gC/m2 (per time step)
gC/m2 (per time step) to umol/m2/s
mg/m3 to umol/mol
mgCO2/m3 to umol/mol
umol/mol to mg/m3
mg/m2/s to umol/m2/s
mgCO2/m2/s to umol/m2/s
Usage:
new_data = qcutils.convert_units_co2(ds,old_data,old_units,new_units)
where ds is a data structure
old_data (numpy array) is the data to be converted
old_units (string) is the old units
new_units (string) is the new units
Author: PRI
Date: January 2016
"""
ts = int(ds.globalattributes["time_step"])
if old_units=="umol/m2/s" and new_units=="gC/m2":
new_data = old_data*12.01*ts*60/1E6
elif old_units=="gC/m2" and new_units=="umol/m2/s":
new_data = old_data*1E6/(12.01*ts*60)
elif old_units in ["mg/m3","mgCO2/m3"] and new_units=="umol/mol":
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.co2_ppmfrommgpm3(old_data,Ta,ps)
elif old_units=="umol/mol" and new_units in ["mg/m3","mgCO2/m3"]:
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.co2_mgpm3fromppm(old_data,Ta,ps)
elif old_units in ["mg/m2/s","mgCO2/m2/s"] and new_units=="umol/m2/s":
new_data = mf.Fc_umolpm2psfrommgpm2ps(old_data)
else:
msg = " Unrecognised conversion from "+old_units+" to "+new_units
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
return new_data
def convert_units_h2o(ds,old_data,old_units,new_units):
"""
Purpose:
General purpose routine to convert from one set of H2O concentration units
to another.
Conversions supported are:
g/m3 to mmol/mol
mmol/mol to g/m3
Usage:
new_data = qcutils.convert_units_h2o(ds,old_data,old_units,new_units)
where ds is a data structure
old_data (numpy array) is the data to be converted
old_units (string) is the old units
new_units (string) is the new units
Author: PRI
Date: January 2016
"""
ts = int(ds.globalattributes["time_step"])
if old_units=="mmol/mol" and new_units=="g/m3":
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.h2o_gpm3frommmolpmol(old_data,Ta,ps)
elif old_units=="g/m3" and new_units=="mmol/mol":
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.h2o_mmolpmolfromgpm3(old_data,Ta,ps)
elif old_units=="frac" and new_units=="%":
new_data = old_data*float(100)
elif old_units=="%" and new_units=="frac":
new_data = old_data/float(100)
else:
msg = " Unrecognised conversion from "+old_units+" to "+new_units
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
return new_data
def convert_units_t(ds,old_data,old_units,new_units):
"""
Purpose:
General purpose routine to convert from one set of temperature units
to another.
Conversions supported are:
C to K
K to C
Usage:
new_data = qcutils.convert_units_t(ds,old_data,old_units,new_units)
where ds is a data structure
old_data (numpy array) is the data to be converted
old_units (string) is the old units
new_units (string) is the new units
Author: PRI
Date: January 2016
"""
ts = int(ds.globalattributes["time_step"])
if old_units=="C" and new_units=="K":
new_data = old_data+c.C2K
elif old_units=="K" and new_units=="C":
new_data = old_data-c.C2K
else:
msg = " Unrecognised conversion from "+old_units+" to "+new_units
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
return new_data
def convert_anglestring(anglestring):
"""
Purpose:
Attempt to convert an angle string to a float.
Usage:
a = qcutils.convert_anglestring(astr)
Acceptable input formats:
astr = '''34 12' 24" S'''
astr = '''34 12 24S'''
astr = '''34 12'24.123"S''
astr = '''34.123 S'''
astr = '''-34.123'''
"""
quadlist=["N","E","S","W"]
direction = {'N':1, 'S':-1, 'E': 1, 'W':-1}
try:
# simple casting may work, who knows?
return float(anglestring)
except ValueError:
# replace the degrees, minutes and seconds symbols with spaces
new = anglestring.replace(u'\B0',' ').replace('\'',' ').replace('"',' ')
# check there is a space between the quadrant letter (assumed to be one of N, E, W or S)
# and the next character to the left
# find out which of N, E, S, or W is in the string
for item in quadlist:
if item in new: quadletter=item
# now get the index of this character in the string
i=new.index(quadletter)
# check that the next character to the left is a space character
if new[i-1] != " ": new = new[0:i]+" "+new[i:]
# now split the string on space characters
new = new.split()
# get the quadrant letter
new_dir = new.pop()
# make sure we have 3 parts
new.extend([0,0,0])
# return with the string converted to a float
return (float(new[0])+float(new[1])/60.0+float(new[2])/3600.0) * direction[new_dir]
def convert_WsWdtoUV(Ws,Wd):
"""
Purpose:
Convert wind speed and direction to U and V conponents.
This routine follows the meteorological convention:
- wind direction is positive going clockwise from north
- U is positive towards east
- V is positive towards north
Usage:
u,v = qcutils.convert_WsWdtoUV(Ws,Wd)
Author: PRI
Date: February 2015
"""
u = -Ws*numpy.sin(numpy.radians(Wd))
v = -Ws*numpy.cos(numpy.radians(Wd))
return u,v
def convert_UVtoWsWd(u,v):
"""
Purpose:
Convert U and V conponents to wind speed and direction
This routine follows the meteorological convention:
- wind direction is positive going clockwise from north
- U is positive towards east
- V is positive towards north
Usage:
Ws,Wd = qcutils.convert_UVtoWsWd(U,V)
Author: PRI
Date: February 2015
"""
Wd = float(270) - (numpy.degrees(numpy.arctan2(v,u)))
Wd = numpy.mod(Wd,360)
Ws = numpy.sqrt(u*u + v*v)
return Ws,Wd
def CreateSeries(ds,Label,Data,FList=None,Flag=None,Attr=None):
"""
Purpose:
Create a series (1d array) of data in the data structure.
If the series already exists in the data structure, data values and QC flags will be
overwritten but attributes will be preserved. However, the long_name and units attributes
are treated differently. The existing long_name will have long_name appended to it. The
existing units will be overwritten with units.
This utility is the prefered method for creating or updating a data series because
it implements a consistent method for creating series in the data structure. Direct
writes to the contents of the data structure are discouraged (unless PRI wrote the code:=P).
Usage:
Fsd,flag,attr = qcutils.GetSeriesasMA(ds,"Fsd")
... do something to Fsd here ...
qcutils.CreateSeries(ds,"Fsd",Fsd,Flag=flag,Attr=attr)
Author: PRI
Date: Back in the day
"""
ds.series['_tmp_'] = {} # create a temporary series to avoid premature overwrites
# put the data into the temporary series
if numpy.ma.isMA(Data):
ds.series['_tmp_']['Data'] = numpy.ma.filled(Data,float(c.missing_value))
else:
ds.series['_tmp_']['Data'] = numpy.array(Data)
# copy or make the QC flag
if Flag is None:
ds.series['_tmp_']['Flag'] = MakeQCFlag(ds,FList)
else:
ds.series['_tmp_']['Flag'] = Flag.astype(numpy.int32)
# do the attributes
ds.series['_tmp_']['Attr'] = {}
if Label in ds.series.keys(): # check to see if the series already exists
for attr in ds.series[Label]['Attr']: # if it does, copy the existing attributes
if attr in Attr and ds.series[Label]['Attr'][attr]!=Attr[attr]:
ds.series['_tmp_']['Attr'][attr] = Attr[attr]
else:
ds.series['_tmp_']['Attr'][attr] = ds.series[Label]['Attr'][attr]
for attr in Attr:
if attr not in ds.series['_tmp_']['Attr'].keys():
ds.series['_tmp_']['Attr'][attr] = Attr[attr]
else:
for item in Attr:
ds.series['_tmp_']['Attr'][item] = Attr[item]
ds.series[unicode(Label)] = ds.series['_tmp_'] # copy temporary series to new series
del ds.series['_tmp_'] # delete the temporary series
def CreateDatetimeRange(start,stop,step=datetime.timedelta(minutes=30)):
'''
Purpose:
Create a series of datetimes between the "start" and "stop" datetimes
and with a time step of "step".
Useage:
dt = ds.series['DateTime']['Data']
ts = ds.globaleattributes['time_step']
dt_evenlyspaced = CreateDatetimeRange(dt[0],dt[-1],step=datetime.timedelta(minutes=ts))]
Author: PRI
Date: December 2013
'''
result = []
while start<stop:
result.append(start)
start = start + step
return result
def CreateVariableFromDictionary(ds,variable):
"""
Purpose:
Create a variable in the data structure.
If the variable already exists in the data structure, data values, QC flags and
attributes will be overwritten.
This utility is the prefered method for creating or updating a data series because
it implements a consistent method for creating series in the data structure. Direct
writes to the contents of the data structure are discouraged (unless PRI wrote the code:=P).
Usage:
Fsd = qcutils.GetVariableAsDict(ds,"Fsd")
... do something to Fsd here ...
... and don't forget to update the QC flag ...
... and the attributes ...
qcutils.CreateVariableFromDict(ds,Fsd)
Author: PRI
Date: September 2016
"""
label = variable["Label"]
# create a temporary series to avoid premature overwrites
ds.series["_tmp_"] = {}
# put the data into the temporary series
if numpy.ma.isMA(variable["Data"]):
ds.series["_tmp_"]["Data"] = numpy.ma.filled(variable["Data"],
float(c.missing_value))
else:
ds.series["_tmp_"]["Data"] = numpy.array(variable["Data"])
# copy or make the QC flag
ds.series["_tmp_"]["Flag"] = numpy.array(variable["Flag"])
# do the attributes
ds.series["_tmp_"]["Attr"] = copy.deepcopy(variable["Attr"])
# and copy the temporary series back to the original label
ds.series[unicode(label)] = copy.deepcopy(ds.series['_tmp_'])
# delete the temporary series
del ds.series['_tmp_']
def file_exists(filename,mode="verbose"):
if not os.path.exists(filename):
if mode=="verbose":
log.error(' File '+filename+' not found')
return False
else:
return True
def FindIndicesOfBInA(a,b):
"""
Purpose:
Find the indices of elements in b that also occur in a.
The routine is intended for use only with lists of Python datetime
values. This ensures the input series are monotonically increasing
(though this is not a requirement) and contain no duplicates (which
is required, or at least not handled).
Limitations:
Argument a is converted to a set to greatly speed the comparison
of b elements with a. This means that duplicates in a will be
dropped and hence only 1 index will be returned for each value
in b.
Usage:
indices = qcutils.FindIndicesOfBInA(a,b)
where a is a list of Python datetime objects
b is a list of Python datetime objects
indices is a list of indices in b where the elements of b
also occur in a
Author: PRI
Date: July 2015
Comments: Replaces find_indices used up to V2.9.3.
"""
if len(set(a))!=len(a):
msg = " FindIndicesOfBInA: first argument contains duplicate values"
log.warning(msg)
tmpset = set(a)
indices = [i for i,item in enumerate(b) if item in tmpset]
return indices
def RemoveDuplicateRecords(ds):
""" Remove duplicate records."""
# the ds.series["DateTime"]["Data"] series is actually a list
for item in ["DateTime","DateTime_UTC"]:
if item in ds.series.keys():
ldt,ldt_flag,ldt_attr = GetSeries(ds,item)
# ldt_nodups is returned as an ndarray
ldt_nodups,idx_nodups = numpy.unique(numpy.array(ldt),return_index=True)
# now get ldt_nodups as a list
ldt_nodups = ldt_nodups.tolist()
# and put it back into the data structure
ds.series[item]["Data"] = ldt_nodups
ds.series[item]["Flag"] = ldt_flag[idx_nodups]
# get a list of the series in the data structure
series_list = [item for item in ds.series.keys() if '_QCFlag' not in item]
# remove the DateTime
for item in ["DateTime","DateTime_UTC"]:
if item in series_list: series_list.remove(item)
# loop over the series in the data structure
for ThisOne in series_list:
data_dups,flag_dups,attr = GetSeriesasMA(ds,ThisOne)
data_nodups = data_dups[idx_nodups]
flag_nodups = flag_dups[idx_nodups]
CreateSeries(ds,ThisOne,data_nodups,Flag=flag_nodups,Attr=attr)
ds.globalattributes['nc_nrecs'] = len(ds.series["DateTime"]["Data"])
def FixNonIntegralTimeSteps(ds,fixtimestepmethod=""):
"""
Purpose:
Fix time steps that are not an integral number of the default time step.
The default time step is read from the "time_step" global attribute which is read from
the L1 control file and written to the L1 netCDF file.
The most common cause of non-integral time steps is drift in logger time stamp or
rounding errors in Excel's treatment of datetimes.
Usage:
FixNonIntegralTimeSteps(ds)
Called By: CheckTimeStep
Author: PRI
Date: February 2015
To do:
Implement [I]nterpolate
"""
ts = int(ds.globalattributes["time_step"])
ldt = ds.series["DateTime"]["Data"]
dt_diffs = numpy.array([(ldt[i]-rounddttots(ldt[i],ts=ts)).total_seconds() for i in range(1,len(ldt))])
log.info(" Maximum drift is "+str(numpy.max(dt_diffs))+" seconds, minimum drift is "+str(numpy.min(dt_diffs))+" seconds")
ans = fixtimestepmethod
if ans=="": ans = raw_input("Do you want to [Q]uit, [I]nterploate or [R]ound? ")
if ans.lower()[0]=="q":
print "Quiting ..."
sys.exit()
if ans.lower()[0]=="i":
print "Interpolation to regular time step not implemented yet ..."
sys.exit()
if ans.lower()[0]=="r":
log.info(" Rounding to the nearest time step")
ldt_rounded = [rounddttots(dt,ts=ts) for dt in ldt]
rdt = numpy.array([(ldt_rounded[i]-ldt_rounded[i-1]).total_seconds() for i in range(1,len(ldt))])
log.info(" Maximum time step is now "+str(numpy.max(rdt))+" seconds, minimum time step is now "+str(numpy.min(rdt)))
# replace the existing datetime series with the datetime series rounded to the nearest time step
ds.series["DateTime"]["Data"] = ldt_rounded
ds.globalattributes['nc_nrecs'] = len(ds.series["DateTime"]["Data"])
def FixTimeGaps(ds):
"""
Purpose:
Fix gaps in datetime series found by CheckTimeStep.
Useage:
has_gaps = CheckTimeStep(ds)
if has_gaps:
FixTimeGaps(ds)
Author: PRI
Date: April 2013
Modified:
September 2014 - rewrite for clarity and efficiency
February 2015 - and again ...
"""
ts = int(ds.globalattributes["time_step"])
#ldt_gaps,ldt_flag,ldt_attr = GetSeries(ds,"DateTime")
ldt_gaps = ds.series["DateTime"]["Data"]
# generate a datetime list from the start datetime to the end datetime
ldt_start = ldt_gaps[0]
ldt_end = ldt_gaps[-1]
ldt_nogaps = [result for result in perdelta(ldt_start,ldt_end,datetime.timedelta(minutes=ts))]
# update the global attribute containing the number of records
nRecs = len(ldt_nogaps)
ds.globalattributes['nc_nrecs'] = nRecs
# find the indices of the no-gap data in the original data
idx_gaps = FindIndicesOfBInA(ldt_gaps,ldt_nogaps)
# update the series of Python datetimes
ds.series['DateTime']['Data'] = ldt_nogaps
org_flag = ds.series['DateTime']['Flag'].astype(numpy.int32)
ds.series['DateTime']['Flag'] = numpy.ones(nRecs,dtype=numpy.int32)
ds.series['DateTime']['Flag'][idx_gaps] = org_flag
# get a list of series in the data structure
series_list = [item for item in ds.series.keys() if '_QCFlag' not in item]
# remove the datetime-related series from data structure
datetime_list = ["DateTime","DateTime_UTC"]
for item in datetime_list:
if item in series_list: series_list.remove(item)
# now loop over the rest of the series in the data structure
for ThisOne in series_list:
data_nogaps = numpy.ones(nRecs,dtype=numpy.float64)*float(-9999)
flag_nogaps = numpy.ones(nRecs,dtype=numpy.int32)
data_gaps,flag_gaps,attr = GetSeriesasMA(ds,ThisOne)
data_nogaps[idx_gaps] = data_gaps
flag_nogaps[idx_gaps] = flag_gaps
CreateSeries(ds,ThisOne,data_nogaps,Flag=flag_nogaps,Attr=attr)
def FixTimeStep(ds,fixtimestepmethod="round"):
"""
Purpose:
Fix problems with the time stamp.
Useage:
qcutils.FixTimeStep(ds,fixtimestepmethod=fixtimestepmethod)
Author: PRI
Date: April 2013
Modified:
February 2015 - split check and fix functions into different routines
"""
# get the number of records
nRecs = int(ds.globalattributes["nc_nrecs"])
# get the time step
ts = int(ds.globalattributes["time_step"])
# time step between records in seconds
dt = get_timestep(ds)
dtmin = numpy.min(dt)
dtmax = numpy.max(dt)
if dtmin < ts*60:
# duplicate or overlapping times found
log.info(' FixTimeStep: duplicate or overlapping times found, removing ...')
RemoveDuplicateRecords(ds)
dt = get_timestep(ds)
dtmin = numpy.min(dt)
dtmax = numpy.max(dt)
#log.info("After RemoveDuplicateRecords:"+str(dtmin)+" "+str(dtmax))
if numpy.min(numpy.mod(dt,ts*60))!=0 or numpy.max(numpy.mod(dt,ts*60))!=0:
# non-integral time steps found
# indices of elements where time step not equal to default
index = numpy.where(numpy.min(numpy.mod(dt,ts*60))!=0 or numpy.max(numpy.mod(dt,ts*60))!=0)[0]
log.info(" FixTimeStep: Non-integral time steps found "+str(len(index))+" times out of "+str(nRecs))
log.info(" FixTimeStep: Maximum time step was "+str(numpy.max(dt))+" seconds, minimum time step was "+str(numpy.min(dt)))
FixNonIntegralTimeSteps(ds,fixtimestepmethod=fixtimestepmethod)
dt = get_timestep(ds)
dtmin = numpy.min(dt)
dtmax = numpy.max(dt)
#log.info("After FixNonIntegralTimeSteps:"+str(dtmin)+" "+str(dtmax))
if dtmax > ts*60:
# time gaps found
log.info(' FixTimeStep: one or more time gaps found, inserting times ...')
FixTimeGaps(ds)
dt = get_timestep(ds)
dtmin = numpy.min(dt)
dtmax = numpy.max(dt)
#log.info("After FixTimeGaps: "+str(dtmin)+" "+str(dtmax))
def GetAverageSeriesKeys(cf,ThisOne):
if incf(cf,ThisOne) and haskey(cf,ThisOne,'AverageSeries'):
if 'Source' in cf['Variables'][ThisOne]['AverageSeries'].keys():
alist = ast.literal_eval(cf['Variables'][ThisOne]['AverageSeries']['Source'])
else:
log.error(' GetAverageSeriesKeys: key "Source" not in control file AverageSeries section for '+ThisOne)
alist = []
if 'standard_name' in cf['Variables'][ThisOne]['AverageSeries'].keys():
standardname = str(cf['Variables'][ThisOne]['AverageSeries']['standard_name'])
else:
standardname = "not defined"
else:
standardname = "not defined"
log.info(' GetAverageSeriesKeys: '+ThisOne+ ' not in control file or it does not have the "AverageSeries" key')
alist = []
return alist, standardname
def GetAltName(cf,ds,ThisOne):
'''
Check to see if the specified variable name is in the data structure (ds).
If it is, return the variable name unchanged.
If it isn't, check the control file to see if an alternate name has been specified
and return the alternate name if one exists.
'''
if ThisOne not in ds.series.keys():
if ThisOne in cf['Variables'].keys():
ThisOne = cf['Variables'][ThisOne]['AltVarName']
if ThisOne not in ds.series.keys():
log.error('GetAltName: alternate variable name not in ds')
else:
log.error('GetAltName: cant find ',ThisOne,' in ds or control file')
return ThisOne
def GetAltNameFromCF(cf,ThisOne):
'''
Get an alternate variable name from the control file.
'''
if ThisOne in cf['Variables'].keys():
if 'AltVarName' in cf['Variables'][ThisOne].keys():
ThisOne = str(cf['Variables'][ThisOne]['AltVarName'])
else:
print 'GetAltNameFromCF: AltVarName key not in control file for '+str(ThisOne)
else:
print 'GetAltNameFromCF: '+str(ThisOne)+' not in control file'
return ThisOne
def GetAttributeDictionary(ds,ThisOne):
attr = {}
# if series ThisOne is in the data structure
if ThisOne in ds.series.keys():
attr = ds.series[ThisOne]['Attr']
else:
attr = MakeAttributeDictionary()
return copy.deepcopy(attr)
def GetcbTicksFromCF(cf,ThisOne):
'''
Get colour bar tick labels from the control file.
'''
if ThisOne in cf['Variables'].keys():
if 'Ticks' in cf['Variables'][ThisOne].keys():
Ticks = eval(cf['Variables'][ThisOne]['Ticks'])
else:
print 'GetcbTicksFromCF: Ticks key not in control file for '+str(ThisOne)
else:
print 'GetcbTicksFromCF: '+str(ThisOne)+' not in control file'
return Ticks
def GetRangesFromCF(cf,ThisOne,mode="verbose"):
'''
Get lower and upper range limits from the control file.
'''
if ThisOne in cf['Variables'].keys():
if 'Lower' in cf['Variables'][ThisOne].keys():
lower = float(cf['Variables'][ThisOne]['Lower'])
else:
if mode.lower()!="quiet":
msg = "GetRangesFromCF: Lower key not in control file for "+str(ThisOne)
log.info(msg)
lower = None
if 'Upper' in cf['Variables'][ThisOne].keys():
upper = float(cf['Variables'][ThisOne]['Upper'])
else:
if mode.lower()!="quiet":
msg = "GetRangesFromCF: Upper key not in control file for "+str(ThisOne)
log.info(msg)
upper = None
else:
if mode.lower()!="quiet":
msg = "GetRangesFromCF: "+str(ThisOne)+" not in control file"
log.info(msg)
lower, upper = None
return lower, upper
def GetDateIndex(dts,date,ts=30,default=0,match='exact'):
"""
Purpose:
Return the index of a date/datetime string in an array of datetime objects
Usage:
si = qcutils.GetDateIndex(datetimeseries,date_str,ts=30,default=0,match='exact')
where
dts - array of datetime objects
date_str - a date or date/time string in a format dateutils can parse
ts - time step for the data, optional (integer)
default - default value, optional (integer)
match - type of match (string) options are:
"exact" - finds the specified datetime and returns
the index
"startnextday" - returns the index of the first time period
in the next day
"endpreviousday" - returns the index of the last time period
in the previous day
"startnexthour" - returns the index of the first time period
in the next hour
"endprevioushour" - returns the index of the last time period
in the previous hour
"startnextmonth" - returns the index of the first time period
in the next month
"endpreviousmonth" - returns the index of the last time period
in the previous month
NOTE: "startnextday" and "endpreviousday" can be used to pick
out time periods with an integer number of days
Author: PRI
Date: Back in the day
"""
try:
if len(date)!=0:
i = dts.index(dateutil.parser.parse(date))
else:
if default==-1:
i = len(dts)-1
else:
i = default
except ValueError:
if default==-1:
i = len(dts)-1
else:
i = default
if match=="exact":
# if an exact match is required, do nothing
pass
elif match=="startnextmonth":
# get to the start of the next day
while abs(dts[i].hour+float(dts[i].minute)/60-float(ts)/60)>c.eps:
i = i + 1
while dts[i].day!=1:
i = i + int(float(24)/(float(ts)/60))
elif match=='startnextday':
while abs(dts[i].hour+float(dts[i].minute)/60-float(ts)/60)>c.eps:
i = i + 1
elif match=="startnexthour":
# check the time step value
if int(ts)!=60:
# if the time step is 60 then it is always the start of the next hour
# we assume here that the time period ends on the datetime stamp
while dts[i].minute!=ts:
# iterate until the minutes equal the time step
i = i + 1
elif match=='endpreviousmonth':
while abs(dts[i].hour+float(dts[i].minute)/60)>c.eps:
i = i - 1
while dts[i].day!=1:
i = i - int(float(24)/(float(ts)/60))
elif match=='endpreviousday':
while abs(dts[i].hour+float(dts[i].minute)/60)>c.eps:
i = i - 1
elif match=="endprevioushour":
# check the time step value
if int(ts)!=60:
# if the time step is 60 then it is always the end of the previous hour
# we assume here that the time period ends on the datetime stamp
while dts[i].minute!=0:
# iterate until the minutes equal 0
i = i - 1
else:
log.error("GetDateIndex: Unrecognised match option")
return i
def GetGlobalAttributeValue(cf,ds,ThisOne):
if ThisOne not in ds.globalattributes.keys():
if ThisOne in cf['General'].keys():
ds.globalattributes[ThisOne] = cf['General'][ThisOne]
else:
log.error(' GetGlobalAttributeValue: global attribute '+ThisOne+' was not found in the netCDF file or in the control file')
ds.globalattributes[ThisOne] = None
return ds.globalattributes[ThisOne]
def GetMergeSeriesKeys(cf,ThisOne,section=''):
if len(section)==0: section = 'Variables'
if 'Source' in cf[section][ThisOne]['MergeSeries'].keys():
mlist = ast.literal_eval(cf[section][ThisOne]['MergeSeries']['Source'])
else:
log.error(' GetMergeSeriesKeys: key "Source" not in control file MergeSeries section for '+ThisOne)
mlist = []
if 'standard_name' in cf[section][ThisOne]['MergeSeries'].keys():
standardname = str(cf[section][ThisOne]['MergeSeries']['standard_name'])
else:
standardname = 'not defined'
return mlist, standardname
def GetPlotTitleFromCF(cf, nFig):
if 'Plots' in cf:
if str(nFig) in cf['Plots']:
if 'Title' in cf['Plots'][str(nFig)]:
Title = str(cf['Plots'][str(nFig)]['Title'])
else:
print 'GetPlotTitleFromCF: Variables key not in control file for plot '+str(nFig)
else:
print 'GetPlotTitleFromCF: '+str(nFig)+' key not in Plots section of control file'
else:
print 'GetPlotTitleFromCF: Plots key not in control file'
return Title
def GetPlotVariableNamesFromCF(cf, n):
if 'Plots' in cf:
if str(n) in cf['Plots']:
if 'Variables' in cf['Plots'][str(n)]:
SeriesList = eval(cf['Plots'][str(n)]['Variables'])
else:
print 'GetPlotVariableNamesFromCF: Variables key not in control file for plot '+str(n)
else:
print 'GetPlotVariableNamesFromCF: '+str(n)+' key not in Plots section of control file'
else:
print 'GetPlotVariableNamesFromCF: Plots key not in control file'
return SeriesList
def GetSeries(ds,ThisOne,si=0,ei=-1,mode="truncate"):
""" Returns the data, QC flag and attributes of a series from the data structure."""
# number of records
if "nc_nrecs" in ds.globalattributes:
nRecs = int(ds.globalattributes["nc_nrecs"])
else:
nRecs = len(ds.series[ThisOne]["Data"])
# check the series requested is in the data structure
if ThisOne in ds.series.keys():
# series is in the data structure
if isinstance(ds.series[ThisOne]['Data'],list):
# return a list if the series is a list
Series = list(ds.series[ThisOne]['Data'])
elif isinstance(ds.series[ThisOne]['Data'],numpy.ndarray):
# return a numpy array if series is an array
Series = ds.series[ThisOne]['Data'].copy()
# now get the QC flag
if 'Flag' in ds.series[ThisOne].keys():
# return the QC flag if it exists
Flag = ds.series[ThisOne]['Flag'].copy()
else:
# create a QC flag if one does not exist
Flag = numpy.zeros(nRecs,dtype=numpy.int32)
# now get the attribute dictionary
if "Attr" in ds.series[ThisOne].keys():
Attr = GetAttributeDictionary(ds,ThisOne)
else:
Attr = MakeAttributeDictionary()
else:
# make an empty series if the requested series does not exist in the data structure
Series,Flag,Attr = MakeEmptySeries(ds,ThisOne)
# tidy up
if ei==-1: ei = nRecs - 1
if mode=="truncate":
# truncate to the requested start and end indices
si = max(0,si) # clip start index at 0
ei = min(nRecs,ei) # clip end index to nRecs
Series = Series[si:ei+1] # truncate the data
Flag = Flag[si:ei+1] # truncate the QC flag
elif mode=="pad":
# pad with missing data at the start and/or the end of the series
if si<0 and ei>nRecs-1:
# pad at the start
Series = numpy.append(float(c.missing_value)*numpy.ones(abs(si),dtype=numpy.float64),Series)
Flag = numpy.append(numpy.ones(abs(si),dtype=numpy.int32),Flag)
# pad at the end
Series = numpy.append(Series,float(c.missing_value)*numpy.ones((ei-(nRecs-1)),dtype=numpy.float64))
Flag = numpy.append(Flag,numpy.ones((ei-(nRecs-1)),dtype=numpy.int32))
elif si<0 and ei<=nRecs-1:
# pad at the start, truncate the end
Series = numpy.append(float(c.missing_value)*numpy.ones(abs(si),dtype=numpy.float64),Series[:ei+1])
Flag = numpy.append(numpy.ones(abs(si),dtype=numpy.int32),Flag[:ei+1])
elif si>=0 and ei>nRecs-1:
# truncate at the start, pad at the end
Series = numpy.append(Series[si:],float(c.missing_value)*numpy.ones((ei-(nRecs-1)),numpy.float64))
Flag = numpy.append(Flag[si:],numpy.ones((ei-(nRecs-1)),dtype=numpy.int32))
elif si>=0 and ei<=nRecs-1:
# truncate at the start and end
Series = Series[si:ei+1]
Flag = Flag[si:ei+1]
else:
msg = 'GetSeries: unrecognised combination of si ('+str(si)+') and ei ('+str(ei)+')'
raise ValueError(msg)
elif mode=="mirror":
# reflect data about end boundaries if si or ei are out of bounds
if si<0 and ei>nRecs-1:
# mirror at the start
Series = numpy.append(numpy.fliplr([Series[1:abs(si)+1]])[0],Series)
Flag = numpy.append(numpy.fliplr([Flag[1:abs(si)+1]])[0],Flag)
# mirror at the end
sim = 2*nRecs-1-ei
eim = nRecs-1
Series = numpy.append(Series,numpy.fliplr([Series[sim:eim]])[0])
Flag = numpy.append(Flag,numpy.fliplr([Flag[sim:eim]])[0])
elif si<0 and ei<=nRecs-1:
# mirror at start, truncate at end
Series = numpy.append(numpy.fliplr([Series[1:abs(si)+1]])[0],Series[:ei+1])
Flag = numpy.append(numpy.fliplr([Flag[1:abs(si)+1]])[0],Flag[:ei+1])
elif si>=0 and ei>nRecs-1:
# truncate at start, mirror at end
sim = 2*nRecs-1-ei
eim = nRecs
Series = numpy.append(Series[si:],numpy.fliplr([Series[sim:eim]])[0])
Flag = numpy.append(Flag[si:],numpy.fliplr([Flag[sim:eim]])[0])
elif si>=0 and ei<=nRecs-1:
# truncate at the start and end
Series = Series[si:ei+1]
Flag = Flag[si:ei+1]
else:
msg = 'GetSeries: unrecognised combination of si ('+str(si)+') and ei ('+str(ei)+')'
raise ValueError(msg)
else:
raise ValueError("GetSeries: unrecognised mode option "+str(mode))
return Series,Flag,Attr
def MakeEmptySeries(ds,ThisOne):
nRecs = int(ds.globalattributes['nc_nrecs'])
Series = float(c.missing_value)*numpy.ones(nRecs,dtype=numpy.float64)
Flag = numpy.ones(nRecs,dtype=numpy.int32)
Attr = MakeAttributeDictionary()
return Series,Flag,Attr
def GetSeriesasMA(ds,ThisOne,si=0,ei=-1,mode="truncate"):
"""
Purpose:
Returns a data series and the QC flag series from the data structure.
Usage:
data,flag,attr = qcutils.GetSeriesasMA(ds,label,si=0,ei=-1)
where the arguments are;
ds - the data structure (dict)
label - label of the data series in ds (string)
si - start index (integer), default 0
ei - end index (integer), default -1
and the returned values are;
data - values for the requested series in ds
(numpy masked array, float64)
flag - QC flag for the requested series in ds
(numpy masked array, int32)
attr - attribute dictionary for series
Example:
The code snippet below will return the incoming shortwave data values
(Fsd) and the associated QC flag (f) as numpy masked arrays;
ds = qcio.nc_read_series("HowardSprings_2011_L3.nc")
Fsd,f,a = qcutils.GetSeriesasMA(ds,"Fsd")
Author: PRI
"""
Series,Flag,Attr = GetSeries(ds,ThisOne,si=si,ei=ei,mode=mode)
Series,WasND = SeriestoMA(Series)
return Series,Flag,Attr
def GetVariableAsDictionary(ds,label,si=0,ei=-1,mode="truncate"):
"""
Purpose:
Returns a data variable from the data structure as a dictionary.
Usage:
data,flag,attr = qcutils.GetSeriesasMA(ds,label,si=0,ei=-1)
where the arguments are;
ds - the data structure (dict)
label - label of the data variable in ds (string)
si - start index (integer), default 0
ei - end index (integer), default -1
and the returned values are;
The data are returned as a dictionary;
variable["label"] - variable label in data structure
variable["data"] - numpy float64 masked array containing data
variable["flag"] - numpy int32 array containing QC flags
variable["attr"] - dictionary of variable attributes
Example:
The code snippet below will return the incoming shortwave data values
(Fsd), the associated QC flag and the variable attributes;
ds = qcio.nc_read_series("HowardSprings_2011_L3.nc")
Fsd = qcutils.GetSeriesAsDict(ds,"Fsd")
Author: PRI
"""
ldt,flag,attr = GetSeries(ds,"DateTime",si=si,ei=ei,mode=mode)
data,flag,attr = GetSeries(ds,label,si=si,ei=ei,mode=mode)
data,WasND = SeriestoMA(data)
variable = {"Label":label,"Data":data,"Flag":flag,"Attr":attr,"DateTime":numpy.array(ldt)}
return variable
def GetUnitsFromds(ds, ThisOne):
units = ds.series[ThisOne]['Attr']['units']
return units
def get_cfsection(cf,series='',mode='quiet'):
'''
Find the section in the control file that contains an entry for the series "series".
USEAGE: section = qcutils.get_cfsection(cf,series=<series_name>)
INPUT: cf - a control file object (from ConfigObj)
<series_name> - the name of the series (string)
RETURNS: section - the name of the section containing an entry for <series_name> (string)
Note that the returned section name is an empty string if there is no entry for <series_name> in
the control file.
'''
section = ''
sectionlist = ['Variables','Drivers','Fluxes','Respiration','Partition','ER','GPP','NEE']
if len(series)==0:
msgtxt = ' get_cfsection: no input series specified'
if mode!='quiet': log.info(msgtxt)
return section
for ThisSection in sectionlist:
if ThisSection in cf.keys():
if series in cf[ThisSection]: section = ThisSection
if len(section)==0:
msgtxt = ' get_cfsection: series '+str(series)+' not found in control file'
if mode!='quiet': log.info(msgtxt)
return section
def get_coverage_groups(ds,rad=None,met=None,flux=None,soil=None):
level = "L1"
if "nc_level" in ds.globalattributes:
level = str(ds.globalattributes["nc_level"])
rad = ['Fsd','Fsu','Fld','Flu','Fn']
met = ['Ah','Cc','Precip','ps','Ta','Ws','Wd']
flux = ['Fm','ustar','Fh','Fe','Fc']
soil = ['Fg','Ts','Sws']
for ThisGroup, ThisLabel in zip([rad,met,flux,soil],['radiation','meteorology','flux','soil']):
sum_coverage = float(0); count = float(0)
for ThisOne in ThisGroup:
if ThisOne in ds.series.keys():
sum_coverage = sum_coverage + float(ds.series[ThisOne]['Attr']['coverage_'+level])
count = count + 1
if count!=0:
coverage_group = sum_coverage/count
else:
coverage_group = 0
ds.globalattributes['coverage_'+ThisLabel+'_'+level] = str('%d'%coverage_group)
def get_coverage_individual(ds):
level = "L1"
if "nc_level" in ds.globalattributes:
level = str(ds.globalattributes["nc_level"])
SeriesList = ds.series.keys()
for ThisOne in ["DateTime","DateTime_UTC"]:
if ThisOne in SeriesList: SeriesList.remove(ThisOne)
for ThisOne in SeriesList:
num_good = len(numpy.where(abs(ds.series[ThisOne]['Data']-float(c.missing_value))>c.eps)[0])
coverage = 100*float(num_good)/float(ds.globalattributes['nc_nrecs'])
ds.series[ThisOne]['Attr']['coverage_'+level] = str('%d'%coverage)
def get_datetimefromnctime(ds,time,time_units):
"""
Purpose:
Create a series of datetime objects from the time read from a netCDF file.
Usage:
qcutils.get_datetimefromnctime(ds,time,time_units)
Side effects:
Creates a Python datetime series in the data structure
Author: PRI
Date: September 2014
"""
ts = int(ds.globalattributes["time_step"])
nRecs = int(ds.globalattributes["nc_nrecs"])
dt = netCDF4.num2date(time,time_units)
ds.series[unicode("DateTime")] = {}
ds.series["DateTime"]["Data"] = list(dt)
ds.series["DateTime"]["Flag"] = numpy.zeros(nRecs)
ds.series["DateTime"]["Attr"] = {}
ds.series["DateTime"]["Attr"]["long_name"] = "Datetime in local timezone"
ds.series["DateTime"]["Attr"]["units"] = "None"
def get_datetimefromxldate(ds):
''' Creates a series of Python datetime objects from the Excel date read from the Excel file.
Thanks to John Machin for the quick and dirty code
see http://stackoverflow.com/questions/1108428/how-do-i-read-a-date-in-excel-format-in-python'''
log.info(' Getting the Python datetime series from the Excel datetime')
xldate = ds.series['xlDateTime']['Data']
nRecs = len(ds.series['xlDateTime']['Data'])
datemode = int(ds.globalattributes['xl_datemode'])
ds.series[unicode('DateTime')] = {}
ds.series['DateTime']['Data'] = [None]*nRecs
basedate = datetime.datetime(1899, 12, 30)
#ldt = [basedate + datetime.timedelta(days=xldate[i] + 1462 * datemode) for i in range(nRecs)]
#ds.series['DateTime']['Data'][i] = ldt
for i in range(nRecs):
ds.series['DateTime']['Data'][i] = basedate + datetime.timedelta(days=xldate[i] + 1462 * datemode)
ds.series['DateTime']['Flag'] = numpy.zeros(nRecs)
ds.series['DateTime']['Attr'] = {}
ds.series['DateTime']['Attr']['long_name'] = 'Datetime in local timezone'
ds.series['DateTime']['Attr']['units'] = 'None'
def get_datetimefromymdhms(ds):
''' Creates a series of Python datetime objects from the year, month,
day, hour, minute and second series stored in the netCDF file.'''
SeriesList = ds.series.keys()
if 'Year' not in SeriesList or 'Month' not in SeriesList or 'Day' not in SeriesList or 'Hour' not in SeriesList or 'Minute' not in SeriesList or 'Second' not in SeriesList:
log.info(' get_datetimefromymdhms: unable to find all datetime fields required')
return
log.info(' Getting the date and time series')
nRecs = get_nrecs(ds)
ts = ds.globalattributes["time_step"]
ds.series[unicode('DateTime')] = {}
ds.series['DateTime']['Data'] = [None]*nRecs
if "Microseconds" in ds.series.keys():
microseconds = ds.series["Microseconds"]["Data"]
else:
microseconds = numpy.zeros(nRecs,dtype=numpy.float64)
for i in range(nRecs):
#print i,int(ds.series['Year']['Data'][i]),int(ds.series['Month']['Data'][i]),int(ds.series['Day']['Data'][i])
#print i,int(ds.series['Hour']['Data'][i]),int(ds.series['Minute']['Data'][i]),int(ds.series['Second']['Data'][i])
ds.series['DateTime']['Data'][i] = datetime.datetime(int(ds.series['Year']['Data'][i]),
int(ds.series['Month']['Data'][i]),
int(ds.series['Day']['Data'][i]),
int(ds.series['Hour']['Data'][i]),
int(ds.series['Minute']['Data'][i]),
int(ds.series['Second']['Data'][i]),
int(microseconds[i]))
ds.series['DateTime']['Flag'] = numpy.zeros(nRecs)
ds.series['DateTime']['Attr'] = {}
ds.series['DateTime']['Attr']['long_name'] = 'Date-time object'
ds.series['DateTime']['Attr']['units'] = 'None'
def get_diurnalstats(dt,data,info):
ts = info["time_step"]
nperday = info["nperday"]
si = 0
while abs(dt[si].hour+float(dt[si].minute)/60-float(ts)/60)>c.eps:
si = si + 1
ei = len(dt)-1
while abs(dt[ei].hour+float(dt[ei].minute)/60)>c.eps:
ei = ei - 1
data_wholedays = data[si:ei+1]
ndays = len(data_wholedays)/nperday
data_2d = numpy.ma.reshape(data_wholedays,[ndays,nperday])
diel_stats = {}
diel_stats["Hr"] = numpy.ma.array([i*ts/float(60) for i in range(0,nperday)])
diel_stats["Av"] = numpy.ma.average(data_2d,axis=0)
diel_stats["Sd"] = numpy.ma.std(data_2d,axis=0)
diel_stats["Mx"] = numpy.ma.max(data_2d,axis=0)
diel_stats["Mn"] = numpy.ma.min(data_2d,axis=0)
return diel_stats
def get_keyvaluefromcf(cf,sections,key,default=None,mode="quiet"):
"""
Purpose:
General return a keyword value from a control file.
Usage:
keyval = qcutils.get_keyvaluefromcf(cf,sections,key,default=default)
where
cf is a control file object from ConfigObj
sections is a list of sections and nested sub-sections to search
key is the keyword
default is a default value
Example:
ncOutFileName = qcutils.get_keyvaluefromcf(cf,["Files","Out"],"ncFileName",default="")
The example above will return the value for ncFileName from the ["Files"]["Out"] sub-section
in the control file.
Author: PRI
Date: February 2015
"""
if len(sections)<1:
msg = " get_keyvaluefromsections: no sections specified"
if mode.lower()!="quiet": log.info(msg)
if sections[0] in cf:
section = cf[sections[0]]
if len(sections)>1:
for item in sections[1:]:
if item in section:
section = section[item]
else:
msg = " get_keyvaluefromcf: Sub section "+item+" not found in control file, used default ("+str(default)+")"
if mode.lower()!="quiet": log.info(msg)
value = default
if key in section:
value = section[key]
else:
msg = " get_keyvaluefromcf: Key "+key+" not found in section, used default ("+str(default)+")"
if mode.lower()!="quiet": log.info(msg)
value = default
else:
msg = " get_keyvaluefromcf: Section "+sections[0]+" not found in control file, used default ("+str(default)+")"
if mode.lower()!="quiet": log.error(msg)
value = default
return value
def get_label_list_from_cf(cf):
"""
Purpose:
Returns a list of variable labels from a control file.
Usage:
label_list = qcutils.get_label_list_from_cf(cf)
where cf is a control file object
label_list is a list of variable labels referenced in the control file.
"""
if "Variables" in cf:
label_list = cf["Variables"].keys()
elif "Drivers" in cf:
label_list = cf["Drivers"].keys()
elif "Fluxes" in cf:
label_list = cf["Fluxes"].keys()
else:
label_list = []
msg = "No Variables, Drivers or Fluxes section found in control file"
log.error(msg)
return label_list
def get_missingingapfilledseries(ds):
"""
Purpose:
Check series in data structure and print a message to the screen if missing points are found.
Usage:
gfalternate_checkformissing(ds,series_list=series_list)
where ds is a data structure
series_list is a list of series to check
Author: PRI
Date: March 2015
"""
# get a local pointer to the datetime
ldt = ds.series["DateTime"]["Data"]
# create an empty list
alt_list = []
# check to see if there was any gap filling using data from alternate sources
if "alternate" in dir(ds):
# if so, get a list of the quantities gap filled from alternate sources
alt_list = list(set([ds.alternate[item]["label_tower"] for item in ds.alternate.keys()]))
# create an empty list
cli_list = []
# check to see if there was any gap filling from climatology
if "climatology" in dir(ds):
# if so, get a list of the quantities gap filled using climatology
cli_list = list(set([ds.climatology[item]["label_tower"] for item in ds.climatology.keys()]))
# one list to rule them, one list to bind them ...
gf_list = list(set(alt_list+cli_list))
# clear out if there was no gap filling
if len(gf_list)==0: return
# loop over the series to be checked
gap_found = False
for series in gf_list:
if series not in ds.series.keys(): continue
data,flag,attr = GetSeriesasMA(ds,series)
idx = numpy.ma.where(data.mask==True)[0]
if len(idx)!=0:
gap_found = True
msg = " Missing points ("+str(len(idx))+") found in "+series
log.error(msg)
#ldt_missing = [ldt[i] for i in idx]
#msg = " The first 10 missing data is at datetimes "+str(ldt_missing[0:9])
#log.error(msg)
if not gap_found:
msg = " No missing values found in gap filled series"
log.info(msg)
def get_number_from_heightstring(height):
z = str(height)
if "m" in z: z = z.replace("m","")
try:
z = float(z)
except:
z = 0.0
return z
def get_nrecs(ds):
if 'nc_nrecs' in ds.globalattributes.keys():
nRecs = int(ds.globalattributes['nc_nrecs'])
elif 'NumRecs' in ds.globalattributes.keys():
nRecs = int(ds.globalattributes['NumRecs'])
else:
series_list = ds.series.keys()
nRecs = len(ds.series[series_list[0]]['Data'])
return nRecs
def get_timestep(ds):
"""
Purpose:
Return an array of time steps in seconds between records
Useage:
dt = qcutils.get_timestep(ds)
Author: PRI
Date: February 2015
"""
# local pointer to the Python datetime series
ldt = ds.series["DateTime"]["Data"]
# time step between records in seconds
dt = numpy.array([(ldt[i]-ldt[i-1]).total_seconds() for i in range(1,len(ldt))])
return dt
def get_timezone(site_name,prompt="no"):
""" Return the time zone based on the site name."""
time_zone = ""
found = False
# strip out spaces and commas from the site name
site_name = site_name.replace(" ","").replace(",","")
for item in c.tz_dict.keys():
if item in site_name.lower():
time_zone = c.tz_dict[item]
found = True
else:
# cant find the site in the dictionary so ask the user
if prompt.lower()=="yes":
root = Tkinter.Tk(); root.withdraw()
time_zone = tkSimpleDialog.askstring("Time zone","Enter time zone eg Australia/Melbourne")
root.destroy()
found = True
return time_zone,found
def get_UTCfromlocaltime(ds):
'''
Purpose:
Creates a UTC datetime series in the data structure from the
local datetime series.
Usage:
ldt_UTC = qcutils.get_UTCfromlocaltime(ds)
Assumptions:
No daylight savings used in the local datetime
Author: PRI
'''
# check the time_zone global attribute is set, we cant continue without it
if "time_zone" not in ds.globalattributes.keys():
log.warning("get_UTCfromlocaltime: time_zone not in global attributes, checking elsewhere ...")
if "site_name" in ds.globalattributes.keys():
site_name = ds.globalattributes["site_name"]
else:
log.warning("get_UTCfromlocaltime: site_name not in global attributes, skipping UTC calculation ...")
return
time_zone,found = get_timezone(site_name,prompt="no")
if not found:
log.warning("get_UTCfromlocaltime: site_name not in time zone dictionary")
return
else:
log.info("get_UTCfromlocaltime: time_zone found in time zone dictionary")
ds.globalattributes["time_zone"] = time_zone
log.info(' Getting the UTC datetime from the local datetime')
# get the number of records
nRecs = len(ds.series['xlDateTime']['Data'])
# get the time zone
tz = ds.globalattributes["time_zone"]
# create a timezone object
loc_tz = pytz.timezone(tz)
# local pointer to the datetime series in ds
ldt = ds.series["DateTime"]["Data"]
# localise the datetime by assigning a time zone
ldt_loc = [loc_tz.localize(dt) for dt in ldt]
# remove any daylight saving time
ldt_loc_nodst = [dt+dt.dst() for dt in ldt_loc]
# convert to UTC
ldt_utc = [dt.astimezone(pytz.utc) for dt in ldt_loc_nodst]
return ldt_utc
def get_xldatefromdatetime(ds):
'''
Purpose:
Returns a list of xldatetime (floating point number represent decimal days
since 00:00 1/1/1900) from a list of Python datetimes
Usage:
qcutils.get_xldatefromdatetime(ds)
Assumptions:
The Excel datetime series ("xlDateTime") exists in the data structure ds.
Author: PRI
'''
# get the datemode of the original Excel spreadsheet
if "xl_datemode" in ds.globalattributes.keys():
datemode = int(ds.globalattributes["xl_datemode"])
else:
datemode = int(0)
nRecs = int(ds.globalattributes["nc_nrecs"])
# get the Excel datetime series, flag and attributes
if "xlDateTime" in ds.series.keys():
xldt_org,xldt_flag,xldt_attr = GetSeriesasMA(ds,"xlDateTime")
else:
xldt_flag = numpy.zeros(nRecs,dtype=numpy.int32)
xldt_attr = MakeAttributeDictionary(long_name="Date/time in Excel format",units="days since 1899-12-31 00:00:00")
# get a local pointer to the Python DateTime series in ds
ldt = ds.series["DateTime"]["Data"]
# get a list of Excel datetimes from the Python datetime objects
xldate = [xlrd.xldate.xldate_from_datetime_tuple((ldt[i].year,
ldt[i].month,
ldt[i].day,
ldt[i].hour,
ldt[i].minute,
ldt[i].second),
datemode) for i in range(0,len(ldt))]
xldt_new = numpy.ma.array(xldate, dtype=numpy.float64)
# overwrite the existing Excel datetime series
CreateSeries(ds,"xlDateTime",xldt_new,Flag=xldt_flag,Attr=xldt_attr)
def get_ymdhmsfromdatetime(ds):
'''
Purpose:
Gets the year, month, day, hour, minute and second from a list of
Python datetimes. The Python datetime series is read from
the input data structure and the results are written back to the
data structure.
Usage:
qcutils.get_ymdhmsfromdatetime(ds)
Assumptions:
None
Author: PRI
'''
nRecs = int(ds.globalattributes["nc_nrecs"])
dt = ds.series["DateTime"]["Data"]
flag = numpy.zeros(nRecs,dtype=numpy.int32)
Year = numpy.array([dt[i].year for i in range(0,nRecs)]).astype(numpy.int32)
Month = numpy.array([dt[i].month for i in range(0,nRecs)]).astype(numpy.int32)
Day = numpy.array([dt[i].day for i in range(0,nRecs)]).astype(numpy.int32)
Hour = numpy.array([dt[i].hour for i in range(0,nRecs)]).astype(numpy.int32)
Minute = numpy.array([dt[i].minute for i in range(0,nRecs)]).astype(numpy.int32)
Second = numpy.array([dt[i].second for i in range(0,nRecs)]).astype(numpy.int32)
Hdh = numpy.array([float(Hour[i])+float(Minute[i])/60. for i in range(0,nRecs)]).astype(numpy.float64)
Ddd = numpy.array([(dt[i] - datetime.datetime(Year[i],1,1)).days+1+Hdh[i]/24. for i in range(0,nRecs)]).astype(numpy.float64)
CreateSeries(ds,'Year',Year,Flag=flag,Attr=MakeAttributeDictionary(long_name='Year',units='none'))
CreateSeries(ds,'Month',Month,Flag=flag,Attr=MakeAttributeDictionary(long_name='Month',units='none'))
CreateSeries(ds,'Day',Day,Flag=flag,Attr=MakeAttributeDictionary(long_name='Day',units='none'))
CreateSeries(ds,'Hour',Hour,Flag=flag,Attr=MakeAttributeDictionary(long_name='Hour',units='none'))
CreateSeries(ds,'Minute',Minute,Flag=flag,Attr=MakeAttributeDictionary(long_name='Minute',units='none'))
CreateSeries(ds,'Second',Second,Flag=flag,Attr=MakeAttributeDictionary(long_name='Second',units='none'))
CreateSeries(ds,'Hdh',Hdh,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal hour of the day',units='none'))
CreateSeries(ds,'Ddd',Ddd,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal day of the year',units='none'))
def get_ymdhmsfromxldate(ds):
"""
Gets year, month, day, hour, and if available seconds, from
excel-formatted Timestamp
Usage qcts.get_ymdhmsfromxldate(ds)
cf: control file
ds: data structure
"""
log.info(' Getting date and time variables')
# get the date mode of the original Excel datetime
datemode = int(ds.globalattributes['xl_datemode'])
nRecs = len(ds.series['xlDateTime']['Data'])
Year = numpy.array([c.missing_value]*nRecs,numpy.int32)
Month = numpy.array([c.missing_value]*nRecs,numpy.int32)
Day = numpy.array([c.missing_value]*nRecs,numpy.int32)
Hour = numpy.array([c.missing_value]*nRecs,numpy.int32)
Minute = numpy.array([c.missing_value]*nRecs,numpy.int32)
Second = numpy.array([c.missing_value]*nRecs,numpy.int32)
Hdh = numpy.array([c.missing_value]*nRecs,numpy.float64)
Ddd = numpy.array([c.missing_value]*nRecs,numpy.float64)
flag = numpy.zeros(nRecs)
for i in range(nRecs):
DateTuple = xlrd.xldate_as_tuple(ds.series['xlDateTime']['Data'][i],datemode)
Year[i] = int(DateTuple[0])
Month[i] = int(DateTuple[1])
Day[i] = int(DateTuple[2])
Hour[i] = int(DateTuple[3])
Minute[i] = int(DateTuple[4])
Second[i] = int(DateTuple[5])
Hdh[i] = float(DateTuple[3])+float(DateTuple[4])/60.
Ddd[i] = ds.series['xlDateTime']['Data'][i] - xlrd.xldate.xldate_from_date_tuple((Year[i],1,1),datemode) + 1
CreateSeries(ds,'Year',Year,Flag=flag,Attr=MakeAttributeDictionary(long_name='Year',units='none'))
CreateSeries(ds,'Month',Month,Flag=flag,Attr=MakeAttributeDictionary(long_name='Month',units='none'))
CreateSeries(ds,'Day',Day,Flag=flag,Attr=MakeAttributeDictionary(long_name='Day',units='none'))
CreateSeries(ds,'Hour',Hour,Flag=flag,Attr=MakeAttributeDictionary(long_name='Hour',units='none'))
CreateSeries(ds,'Minute',Minute,Flag=flag,Attr=MakeAttributeDictionary(long_name='Minute',units='none'))
CreateSeries(ds,'Second',Second,Flag=flag,Attr=MakeAttributeDictionary(long_name='Second',units='none'))
CreateSeries(ds,'Hdh',Hdh,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal hour of the day',units='none'))
CreateSeries(ds,'Ddd',Ddd,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal day of the year',units='none'))
def haskey(cf,ThisOne,key):
return key in cf['Variables'][ThisOne].keys()
def incf(cf,ThisOne):
return ThisOne in cf['Variables'].keys()
def linear_function(B,x):
"""
Purpose:
Linear function for use with orthogonal distance regression.
Usage:
linear = scipy.odr.Model(qcutils.linear_function)
where B is a list of slope and offset values
x is an array of x values
"""
return B[0]*x + B[1]
def MakeAttributeDictionary(**kwargs):
"""
Purpose:
Make an attribute dictionary.
Usage:
attr_new = qcutils.MakeAttributeDictionary(long_name = "some string",attr_exist)
where long_name is an attribute to be written to the new attribute dictionary
attr_exist is an existing attribute dictionary
Author: PRI
Date: Back in the day
"""
default_list = ["ancillary_variables","height","instrument","long_name","serial_number","standard_name",
"units","valid_range"]
attr = {}
for item in kwargs:
if isinstance(item, dict):
for entry in item: attr[entry] = item[entry]
else:
attr[item] = kwargs.get(item,"not defined")
if item in default_list: default_list.remove(item)
if len(default_list)!=0:
for item in default_list:
if item == "valid_range":
attr[item] = str(c.small_value)+","+str(c.large_value)
else:
attr[item] = "not defined"
attr["missing_value"] = c.missing_value
return copy.deepcopy(attr)
def MakeQCFlag(ds,SeriesList):
flag = []
if len(SeriesList)<=0:
#log.info(' MakeQCFlag: no series list specified')
pass
if len(SeriesList)==1:
if SeriesList[0] in ds.series.keys():
flag = ds.series[SeriesList[0]]['Flag'].copy()
else:
log.error(' MakeQCFlag: series '+str(SeriesList[0])+' not in ds.series')
if len(SeriesList)>1:
for ThisOne in SeriesList:
if ThisOne in ds.series.keys():
if len(flag)==0:
#flag = numpy.ones(numpy.size(ds.series[ThisOne]['Flag']))
flag = ds.series[ThisOne]['Flag'].copy()
else:
tmp_flag = ds.series[ThisOne]['Flag'].copy() # get a temporary copy of the flag
index = numpy.where(numpy.mod(tmp_flag,10)==0) # find the elements with flag = 0, 10, 20 etc
tmp_flag[index] = 0 # set them all to 0
flag = numpy.maximum(flag,tmp_flag) # now take the maximum
else:
log.error(' MakeQCFlag: series '+ThisOne+' not in ds.series')
return flag.astype(numpy.int32)
def MAtoSeries(Series):
"""
Convert a masked array to a numpy ndarray with masked elements set to c.missing_value.
Useage:
Series, WasMA = MAtoSeries(Series)
where:
Series (input) is the data series to be converted.
WasMA (returned) is a logical, True if the input series was a masked array.
Series (output) is the input series convered to an ndarray with c.missing_value values
for missing data.
"""
WasMA = False
if numpy.ma.isMA(Series):
WasMA = True
Series = numpy.ma.filled(Series,float(c.missing_value))
return Series, WasMA
def MergeQCFlag(QCFlag_list):
""" Merge a list of QC flags by taking the element-wise maximum."""
if len(QCFlag_list)==0: return None
if len(QCFlag_list)==1: return QCFlag_list[0]
flag = QCFlag_list[0].copy() # get a copy of the first flag
for item in QCFlag_list[1:]: # loop over the list of flags
tmp_flag = item.copy() # get a copy of the next flag
index = numpy.where(numpy.mod(tmp_flag,10)==0) # find the elements with flag = 0, 10, 20 etc
tmp_flag[index] = 0 # set them all to 0
flag = numpy.maximum(flag,tmp_flag) # now take the maximum
return flag
def nxMom_nxScalar_alpha(zoL):
nRecs = numpy.size(zoL)
nxMom = numpy.ma.ones(nRecs) * 0.079
nxScalar = numpy.ma.ones(nRecs) * 0.085
alpha = numpy.ma.ones(nRecs) * 0.925
# get the index of stable conditions
stable = numpy.ma.where(zoL>0)[0]
# now set the series to their stable values
nxMom[stable] = 0.079 * (1 + 7.9 * zoL[stable]) ** 0.75
nxScalar[stable] = 2.0 - 1.915 / (1 + 0.5 * zoL[stable])
alpha[stable] = 1
return nxMom, nxScalar, alpha
def path_exists(pathname,mode="verbose"):
if not os.path.isdir(pathname):
if mode=="verbose":
log.error(' Path '+pathname+' not found')
return False
else:
return True
def perdelta(start, end, delta):
"""
Yields an iterator of datetime objects from start to end with time step delta.
"""
curr = start
while curr <= end:
yield curr
curr += delta
def polyval(p,x):
"""
Replacement for the polyval routine in numpy. This version doesnt check the
input variables to make sure they are array_like. This means that when
masked arrays are treated correctly when they are passed to this routine.
Parameters
----------
p : a 1D array of coefficients, highest order first
x : a 1D array of points at which to evaluate the polynomial described by
the coefficents in p
Example
-------
>>> x = numpy.array([1,2,3])
>>> p = numpy.array([2,0])
>>> qcutils.polyval(p,x)
array([2,4,6])
>>> y = numpy.array([1,c.missing_value,3])
>>> y = numpy.ma.masked_where(y==c.missing_value,y)
>>> qcutils.polyval(p,y)
masked_array(data = [2 -- 6],
mask = [False True False],
fill_value = 999999)
"""
y = 0
for i in range(len(p)):
y = x*y + p[i]
return y
def rounddttots(dt,ts=30):
dt += datetime.timedelta(minutes=int(ts/2))
dt -= datetime.timedelta(minutes=dt.minute % int(ts),seconds=dt.second,microseconds=dt.microsecond)
return dt
def rounddttoseconds(dt):
dt += datetime.timedelta(seconds=0.5)
dt -= datetime.timedelta(seconds=dt.second % 1,microseconds=dt.microsecond)
return dt
def round_datetime(ds,mode="nearest_timestep"):
"""
Purpose:
Round the series of Python datetimes to the nearest time based on mode
Usage:
qcutils.round_datetime(ds,mode=mode)
where;
mode = "nearest_second" rounds to the nearesy second
mode = "nearest_timestep" rounds to the nearest time step
Author: PRI
Date: February 2015
"""
# local pointer to the datetime series
ldt = ds.series["DateTime"]["Data"]
# check which rounding option has been chosen
if mode.lower()=="nearest_timestep":
# get the time step
if "time_step" in ds.globalattributes:
ts = int(ds.globalattributes["time_step"])
else:
ts = numpy.mean(get_timestep(ds)/60)
ts = roundtobase(ts,base=30)
ds.globalattributes["time_step"] = ts
# round to the nearest time step
rldt = [rounddttots(dt,ts=ts) for dt in ldt]
elif mode.lower()=="nearest_second":
# round to the nearest second
rldt = [rounddttoseconds(dt) for dt in ldt]
else:
# unrecognised option for mode, return original datetime series
log.error(" round_datetime: unrecognised mode ("+str(mode)+")"+" ,returning original time series")
rldt = ds.series["DateTime"]["Data"]
# replace the original datetime series with the rounded one
ds.series["DateTime"]["Data"] = rldt
def roundtobase(x,base=5):
return int(base*round(float(x)/base))
def round2sig(x,sig=2):
'''
Round a float to a specified number of significant digits (default is 2).
'''
return round(x, sig-int(math.floor(math.log10(abs(x))))-1)
def r(b, p, alpha):
"""
Function to calculate the r coeficient of the Massman frequency correction.
"""
r = ((b ** alpha) / (b ** alpha + 1)) * \
((b ** alpha) / (b ** alpha + p ** alpha)) * \
(1 / (p ** alpha + 1))
return r
def SeriestoMA(Series):
"""
Convert a numpy ndarray to a masked array.
Useage:
Series, WasND = SeriestoMA(Series)
where:
Series (input) is the data series to be converted.
WasND (returned) is a logical, True if the input series was an ndarray
Series (output) is the input series convered to a masked array.
"""
WasND = False
if not numpy.ma.isMA(Series):
WasND = True
Series = numpy.ma.masked_where(abs(Series-numpy.float64(c.missing_value))<c.eps,Series)
return Series, WasND
def SetUnitsInds(ds, ThisOne, units):
ds.series[ThisOne]['Attr']['units'] = units
def startlog(loggername,loggerfile):
logger = logging.getLogger(loggername)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(loggerfile)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
#formatter = logging.Formatter('%(asctime)s %(name)-8s %(levelname)-6s %(message)s', '%d-%m-%y %H:%M')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def UpdateGlobalAttributes(cf,ds,level):
ds.globalattributes["nc_level"] = str(level)
ds.globalattributes["EPDversion"] = sys.version
# put the control file name into the global attributes
ds.globalattributes["controlfile_name"] = cf["controlfile_name"]
if "Global" in cf:
for item in cf["Global"].keys():
if item not in ds.globalattributes.keys():
ds.globalattributes[item] = cf["Global"][item].replace("\n"," ").replace("\r","")
def update_progress(progress):
barLength = 50 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
progress = round(progress,2)
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
|
OzFlux/OzFluxQC
|
scripts/qcutils.py
|
Python
|
gpl-3.0
| 82,118
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from django.db import transaction
from storageadmin.util import handle_exception
from storageadmin.serializers import PoolScrubSerializer
from storageadmin.models import (Pool, PoolScrub)
import rest_framework_custom as rfc
from fs.btrfs import (scrub_start, scrub_status)
from datetime import timedelta
import logging
logger = logging.getLogger(__name__)
class PoolScrubView(rfc.GenericView):
serializer_class = PoolScrubSerializer
def _validate_pool(self, pname, request):
try:
return Pool.objects.get(name=pname)
except:
e_msg = ('Pool: %s does not exist' % pname)
handle_exception(Exception(e_msg), request)
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
pool = self._validate_pool(self.kwargs['pname'], self.request)
self._scrub_status(pool)
return PoolScrub.objects.filter(pool=pool).order_by('-id')
@transaction.atomic
def _scrub_status(self, pool):
try:
ps = PoolScrub.objects.filter(pool=pool).order_by('-id')[0]
except:
return Response()
if (ps.status == 'started' or ps.status == 'running'):
cur_status = scrub_status(pool)
if (cur_status['status'] == 'finished'):
duration = int(cur_status['duration'])
cur_status['end_time'] = (ps.start_time +
timedelta(seconds=duration))
del(cur_status['duration'])
PoolScrub.objects.filter(id=ps.id).update(**cur_status)
return ps
@transaction.atomic
def post(self, request, pname, command=None):
pool = self._validate_pool(pname, request)
if (command is not None and command != 'status'):
e_msg = ('Unknown scrub command: %s' % command)
handle_exception(Exception(e_msg), request)
with self._handle_exception(request):
ps = self._scrub_status(pool)
if (command == 'status'):
return Response(PoolScrubSerializer(ps).data)
force = request.data.get('force', False)
if ((PoolScrub.objects.filter(pool=pool,
status__regex=r'(started|running)')
.exists())):
if (force):
p = PoolScrub.objects.filter(
pool=pool,
status__regex=r'(started|running)').order_by('-id')[0]
p.status = 'terminated'
p.save()
else:
e_msg = ('A Scrub process is already running for '
'pool(%s). If you really want to kill it '
'and start a new scrub, use force option' % pname)
handle_exception(Exception(e_msg), request)
scrub_pid = scrub_start(pool, force=force)
ps = PoolScrub(pool=pool, pid=scrub_pid)
ps.save()
return Response(PoolScrubSerializer(ps).data)
|
priyaganti/rockstor-core
|
src/rockstor/storageadmin/views/pool_scrub.py
|
Python
|
gpl-3.0
| 3,837
|
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from orb.models import Resource, Tag
class LatestTagEntries(Feed):
description_template = 'feeds/resource.html'
link = "/"
def get_object(self, request, tag_slug):
return get_object_or_404(Tag, slug=tag_slug)
def title(self, obj):
return "'%s' ORB resources" % obj.name
def description(self, obj):
return "Resources recently tagged with %s" % obj.name
def items(self, obj):
return Resource.objects.filter(status=Resource.APPROVED, resourcetag__tag=obj).order_by('-create_date')[:20]
def item_pubdate(self, item):
return item.create_date
def item_updateddate(self, item):
return item.update_date
class LatestEntries(Feed):
description_template = 'feeds/resource.html'
title = "ORB latest resources"
link = "/"
description = "Latest resources added to ORB."
def items(self):
return Resource.objects.filter(status=Resource.APPROVED).order_by('-update_date')[:20]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
|
mPowering/django-orb
|
orb/feeds.py
|
Python
|
gpl-3.0
| 1,207
|
#!/usr/bin/env python
# By Robin Lennox - twitter.com/robberbear
from lib.Layout import colour
from pexpect import pxssh
from scapy.all import IP, TCP, sr1
import subprocess
import time
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# Import Colour Scheme
G, Y, B, R, W = colour()
def openPort(port, ip):
try:
response = sr1(IP(dst=ip)/TCP(dport=int(port),
flags="S"), verbose=False, timeout=2)
while response:
if response[TCP].flags == 18:
return True
else:
return False
except Exception as e:
print("[!] openPort:", e)
return False
def udp2rawTunnelAttempt(callbackIP, tunnelIP, tunnelType, tunnelPort, listenPort, localPort, tunnelPassword):
returnResult = False
try:
subprocess.check_output(
'pkill udp2raw && pkill kcptun_client', shell=True)
except Exception as e:
pass
try:
subprocess.check_output('udp2raw -c -r{0}:{1} -l0.0.0.0:{2} --raw-mode {3} -k"{4}" >/dev/null 2>&1 &'.format(
callbackIP, listenPort, tunnelPort, tunnelType, tunnelPassword), shell=True)
subprocess.check_output(
'kcptun_client -r "127.0.0.1:{0}" -l ":{1}" -mode fast2 -mtu 1300 >/dev/null 2>&1 &'.format(tunnelPort, localPort), shell=True)
time.sleep(5)
command = "timeout -t 2 nc 127.0.0.1 {0}".format(localPort)
output = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if "SSH" in str(output.communicate()):
returnResult = True
return returnResult
except Exception as e:
print(e)
return returnResult
def udp2rawTunnel(callbackIP, tunnelIP, tunnelType, tunnelPort, localPort, listenPort, tunnelPassword, verbose):
count = 0
stopCount = 5
while (count < stopCount):
if verbose:
print(B+"[-] Attempting {0} Tunnel".format(tunnelType)+W)
time.sleep(5)
if udp2rawTunnelAttempt(callbackIP, tunnelIP, tunnelType, tunnelPort, listenPort, localPort, tunnelPassword):
return True
break
else:
# Restricts Attempts
count = count + 1
return False
def checkTunnel(ipAddr, portNumber):
failedMessage = R+"[x] Failed connect, trying again."+W
# Timeout 10 is used for RAW DNS Tunnel as this is slow to connect.
s = pxssh.pxssh(timeout=10,)
try:
testConn = s.login(ipAddr, 'myusername', 'mypassword',
port=portNumber, auto_prompt_reset=False)
s.close()
if testConn:
return True
else:
print(failedMessage)
return False
# Should never get here
# print s.login (ipAddr, 'myusername', 'mypassword', auto_prompt_reset=False)
# print "failedMessage"
# return False
except pxssh.ExceptionPxssh as e:
# DNS Tunnel setup but not routable.
if "could not set shell prompt" in str(e):
print(failedMessage)
# print str(e)
return False
else:
# print G+"[+] SSH Tunnel Created!"+W
# print str(e)
return True
except:
# Catch all
print(failedMessage)
return False
|
robinlennox/breakout
|
lib/SetupTunnel.py
|
Python
|
gpl-3.0
| 3,373
|
import Globals
class Food:
def __init__(self, game_globals: Globals.PYG_snake_Globals):
self.game_globals = game_globals
self.fruits = [self.game_globals.random_point() for _ in range(self.game_globals.fruit_number)]
def add_fruit(self, occupied):
pos = False
occupied += self.fruits
while not pos:
p = self.game_globals.random_point()
if p not in occupied:
pos = p
self.fruits.append(pos)
def remove_fruit(self, pos):
self.fruits.remove(pos)
def get_fruits(self):
return self.fruits
def update_fruits(self, snake_body):
self.remove_fruit(snake_body[0])
self.add_fruit(snake_body)
|
wrec/CursedSnake
|
pygame/Food.py
|
Python
|
gpl-3.0
| 724
|
import textwrap
import re
# from textwrap
_whitespace_only_re = re.compile('^[ \t*#/]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t*#/]*)(?:[^ \t\n])', re.MULTILINE)
def common_indent(text):
# This is mostly taken from textwrap.dedent
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
return margin or ''
def strip_indent(text, indent):
return re.sub(r'(?m)^' + indent, '', text)
def add_indent(text, indent):
return '\n'.join(indent + line for line in text.splitlines()) + '\n'
class ParagraphWrapper(textwrap.TextWrapper):
def wrap(self, text):
"""Override textwrap.TextWrapper to process 'text' properly when
multiple paragraphs present"""
# This is shamelessly stolen from
# http://code.activestate.com/recipes/358228-extend-textwraptextwrapper-to-handle-multiple-para/
para_edge = re.compile(r"(\n\s*\n)", re.MULTILINE)
paragraphs = para_edge.split(text)
wrapped_lines = []
for para in paragraphs:
if para.isspace():
if not self.replace_whitespace:
# Do not take the leading and trailing newlines since
# joining the list with newlines (as self.fill will do)
# will put them back in.
if self.expand_tabs:
para = para.expandtabs()
wrapped_lines.append(para[1:-1])
else:
# self.fill will end up putting in the needed newline to
# space out the paragraphs
wrapped_lines.append('')
else:
wrapped_lines.extend(textwrap.TextWrapper.wrap(self, para))
return wrapped_lines
def paragraph_fill(text, **kw):
'''
Like textwrap.fill, but respect paragraph (blank line) boundaries and
preserve indentation.
'''
initial_indent = kw.get('initial_indent', '')
subsequent_indent = kw.get('subsequent_indent', '')
indent = common_indent(text)
subsequent_indent = indent + subsequent_indent
initial_indent = indent + initial_indent
kw.update(initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
wrapper = ParagraphWrapper(**kw)
return wrapper.fill(strip_indent(text, indent))
|
sam-roth/Keypad
|
keypad/util/wrap.py
|
Python
|
gpl-3.0
| 3,066
|
##############################################################################
## ##
## This file is part of ModelBlocks. Copyright 2009, ModelBlocks developers. ##
## ##
## ModelBlocks is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## ModelBlocks is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with ModelBlocks. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
import sys
import os
import collections
import sets
import copy
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'resource-gcg', 'scripts'))
import discgraph
import induciblediscgraph
VERBOSE = False ## print debugging info.
ENDGRAPH = False ## print last graph for each discourse.
PRINT_NORMAL = False ## smuggle graph out with induced scopes & normalized inherits
for a in sys.argv:
if a=='-d':
VERBOSE = True
if a=='-g':
ENDGRAPH = True
if a=='-n':
PRINT_NORMAL = True
################################################################################
## Contains...
def contains( struct, x ):
if type(struct)==str: return True if struct==x else False
return any( [ contains(substruct,x) for substruct in struct ] )
## Variable replacement...
def replaceVarName( struct, xOld, xNew ):
if type(struct)==str: return xNew if struct==xOld else struct
return tuple( [ replaceVarName(x,xOld,xNew) for x in struct ] )
## Lambda expression format...
def lambdaFormat( expr, inAnd = False ):
if len( expr ) == 0: return 'T'
elif isinstance( expr, str ): return expr
elif expr[0] == 'lambda': return '(\\' + expr[1] + ' ' + ' '.join( [ lambdaFormat(subexpr,False) for subexpr in expr[2:] ] ) + ')'
elif expr[0] == 'and' and not inAnd: return '(^ ' + ' '.join( [ lambdaFormat(subexpr,True ) for subexpr in expr[1:] if len(subexpr)>0 ] ) + ')'
elif expr[0] == 'and' and inAnd: return ' '.join( [ lambdaFormat(subexpr,True ) for subexpr in expr[1:] if len(subexpr)>0 ] )
else: return '(' + expr[0] + ' ' + ' '.join( [ lambdaFormat(subexpr,False) for subexpr in expr[1:] ] ) + ')'
## Find unbound vars...
def findUnboundVars( expr, Unbound, Bound = [] ):
if len( expr ) == 0: return
elif isinstance( expr, str ):
if expr not in Bound and expr != '_':
if expr not in Unbound: Unbound.append( expr )
elif expr[0] == 'lambda':
for subexpr in expr[2:]:
findUnboundVars( subexpr, Unbound, Bound + [ expr[1] ] )
else:
for subexpr in expr[1:]:
findUnboundVars( subexpr, Unbound, Bound )
## Convert expr to existentialized discourse anaphor antecedent...
def makeDiscAntec( expr, dst, OrigUnbound ): #### NOTE: we should really just existentialize anything above the destination var
# print( 'mDA ' + dst + ' ' + str(expr[3][1] if len(expr)>3 and len(expr[3])>2 else '') )
if len( expr ) > 3 and expr[0].endswith('Q') and len( expr[2] ) > 2 and expr[2][1] == dst: return expr[2][2]
if len( expr ) > 3 and expr[0].endswith('Q') and len( expr[3] ) > 2 and expr[3][1] == dst: return expr[3][2]
if len( expr ) > 3 and expr[0].endswith('Q') and len( expr[3] ) > 0 and expr[3][1] in OrigUnbound: return ('D:supersomeQ', '_', expr[2], makeDiscAntec( expr[3], dst, OrigUnbound ) )
if isinstance( expr, str ): return expr
return tuple([ makeDiscAntec( subexpr, dst, OrigUnbound ) for subexpr in expr ])
## Check off consts used in expr...
def checkConstsUsed( expr, OrigConsts ):
if len( expr ) == 0: return
if isinstance( expr, str ): return
if len(expr)>1 and (expr[0],expr[1]) in OrigConsts:
OrigConsts.remove( (expr[0],expr[1]) )
if (expr[0],'Q') in OrigConsts:
OrigConsts.remove( (expr[0],'Q') )
for subexpr in expr:
checkConstsUsed( subexpr, OrigConsts )
################################################################################
discctr = 0
## For each discourse graph...
for line in sys.stdin:
discctr += 1
DiscTitle = sorted([ asc for asc in line.split(' ') if ',0,' in asc and asc.startswith('000') ])
print( '#DISCOURSE ' + str(discctr) + '... (' + ' '.join(DiscTitle) + ')' )
sys.stderr.write( '#DISCOURSE ' + str(discctr) + '... (' + ' '.join(DiscTitle) + ')\n' )
#### I. READ IN AND PREPROCESS DISCOURSE GRAPH...
line = line.rstrip()
if VERBOSE: print( 'GRAPH: ' + line )
D = discgraph.DiscGraph( line )
if not D.check(): continue
D.checkMultipleOutscopers()
OrigScopes = D.Scopes
D = induciblediscgraph.InducibleDiscGraph( line )
#### II. ENFORCE NORMAL FORM (QUANTS AND SCOPE PARENTS AT MOST SPECIFIC INHERITANCES...
# D.normForm()
#SMITING BREAKS CYCLES WHICH... SHOULD BE REPORTED?
if not D.check(): continue
## Copy quants down to final heirs -- NOTE: this is the same as using inheritance in Q rules...
for q,e,r,x,n in D.QuantTuples[:]:
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in [s for _,_,_,s,_ in D.QuantTuples]:
D.QuantTuples.append( (q,e,r,xFin,n) )
## Copy scopes down to final heirs -- NOTE: this is the same as using inheritance in S rules...
for x in D.Scopes.keys():
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in D.Scopes:
D.Scopes[ xFin ] = D.Scopes[ x ]
## Copy special scope markers (taint, upward) down to final heirs
for x in D.Taints.keys():
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in D.Taints:
D.Taints[ xFin ] = D.Taints[ x ]
for x in D.Upward1.keys():
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in D.Upward1:
D.Upward1[ xFin ] = D.Upward1[ x ]
for x in D.Upward2.keys():
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in D.Upward2:
D.Upward2[ xFin ] = D.Upward2[ x ]
## Skip sentence if cycle...
if not D.check(): continue
#### III. INDUCE UNANNOTATED SCOPES AND EXISTENTIAL QUANTS...
## Add dummy args below eventualities...
for xt in D.PredTuples:
for x in xt[2:]:
if len( D.ConstrainingTuples.get(x,[]) )==1 and x.endswith('\''): #x.startswith(xt[1][0:4] + 's') and x.endswith('\''):
D.Scopes[x] = xt[1]
if VERBOSE: print( 'Scoping dummy argument ' + x + ' to predicate ' + xt[1] )
## Helper functions to explore inheritance chain...
def outscopingFromSup( xLo ):
return True if xLo in D.Scopes.values() else any( [ outscopingFromSup(xHi) for l,xHi in D.Inhs.get(xLo,{}).items() if l!='w' and l!='o' ] )
def outscopingFromSub( xHi ):
return True if xHi in D.Scopes.values() else any( [ outscopingFromSub(xLo) for xLo in D.Subs.get(xHi,[]) ] )
def outscopingInChain( x ):
return outscopingFromSup( x ) or outscopingFromSub( x )
ScopeLeaves = [ ]
for x in D.Referents:
if not outscopingInChain(x): ScopeLeaves.append( x )
L1 = [ x for x in sorted((sets.Set(D.Referents) | sets.Set(D.Subs)) - sets.Set(D.Inhs.keys())) if any([ y in D.Chains.get(x,[]) for y in OrigScopes.values() ]) and not any([ y in D.Chains.get(x,[]) for y in OrigScopes ]) ]
if len(L1) > 1:
print( '#WARNING: Discourse scope annotations do not converge to single top-level ancestor: ' + ' '.join(L1) + ' -- possibly due to missing anaphora between sentences' )
sys.stderr.write( 'WARNING: Discourse scope annotations do not converge to single top-level ancestor: ' + ' '.join(L1) + ' -- possibly due to missing anaphora between sentences\n' )
for xHi in L1:
print( '# ' + xHi + ' subsumes ' + ' '.join(sorted(sets.Set([ xLo for xLo in D.Referents if D.reaches(xLo,xHi) ]))) )
sys.stderr.write( ' ' + xHi + ' subsumes ' + ' '.join(sorted(sets.Set([ xLo for xLo in D.Referents if D.reaches(xLo,xHi) ]))) + '\n' )
elif L1 == []:
L2 = [ x for x in sorted((sets.Set(D.Referents) | sets.Set(D.Subs)) - sets.Set(D.Inhs.keys())) if any([ r in D.Chains.get(x,[]) for q,e,n,r,s in D.QuantTuples ]) and not any([ y in D.Chains.get(x,[]) for y in OrigScopes ]) ]
print( '#NOTE: Discourse contains no scope annotations -- defaulting to legators of explicit quantifiers: ' + ' '.join(L2) )
sys.stderr.write( 'NOTE: Discourse contains no scope annotations -- defaulting to legators of explicit quantifiers: ' + ' '.join(L2) + '\n' )
if L2 == []:
# L = [ x for x in sorted((sets.Set(D.Referents) | sets.Set(D.Subs)) - sets.Set(D.Inhs.keys())) if not any([ y in D.Chains.get(x,[]) for y in OrigScopes ]) ]
print( '#WARNING: No explicit quantifiers annotated -- instead iterating over all legator referents' )
sys.stderr.write( 'WARNING: No explicit quantifiers annotated -- instead iterating over all legator referents\n' )
if VERBOSE: print( 'GRAPH: ' + D.strGraph() )
## List of original (dominant) refts...
# RecencyConnected = sorted( [ ((0 if x not in D.Subs else -1) + (0 if x in ScopeLeaves else -2),x) for x in D.Referents if D.ceiling(x) in D.Chains.get(L[0],[]) ], reverse = True ) # | sets.Set([ ceiling(x) for x in Scopes.values() ])
RecencyConnected = [ y for x in L1 for y in D.Referents if any([ z in D.Chains.get(x,[]) for z in D.getCeils(y) ]) ] #D.ceiling(y) in D.Chains.get(x,[]) ]
if VERBOSE: print( 'RecencyConnected = ' + str(RecencyConnected) )
## D.Scopes = tryScope( D.Scopes, RecencyConnected )
# D.tryScope( RecencyConnected, False )
# if VERBOSE: print( 're-running tryScope...' )
# RecencyConnected = [ (0,x) for x in D.Referents if D.ceiling(x) in L ]
ok = True
Complete = []
while ok:
## Try using increasingly coarse sets of top-level scopes, starting with top of annotated scopes (preferred)...
L = [ x for x in sorted(sets.Set(D.Referents) - sets.Set(Complete) - sets.Set(D.Inhs.keys())) if any([ y in D.Chains.get(x,[]) for y in OrigScopes.values() ]) and not any([ y in D.Chains.get(x,[]) for y in OrigScopes ]) ]
if VERBOSE and L != []: print( 'Legators as roots of annotated scope: ' + str(L) )
## Back off to explicitly annotated quantifiers (preferred)...
if L == []:
L = [ x for x in sorted(sets.Set(D.Referents) - sets.Set(Complete) - sets.Set(D.Inhs.keys())) if any([ r in D.Chains.get(x,[]) for q,e,n,r,s in D.QuantTuples ]) and not any([ y in D.Chains.get(x,[]) for y in D.Scopes ]) ]
if VERBOSE and L != []: print( 'Legators as explicit quantifiers: ' + str(L) )
## Back off to any legator (dispreferred)...
if L == []:
L = [ x for x in sorted(sets.Set(D.Referents) - sets.Set(Complete) - sets.Set(D.Inhs.keys())) if any([ y in D.Chains.get(x,[]) for tup in D.PredTuples for y in tup[1:] ]) and not any([ y in D.Chains.get(x,[]) for y in D.Scopes ]) ]
if VERBOSE and L != []: print( 'Legators as explicit quantifiers: ' + str(L) )
if L != []:
print( '#WARNING: Insufficient explicitly annotated quantifiers, backing off to full set of legators: ' + str(L) )
sys.stderr.write( 'WARNING: Insufficient explicitly annotated quantifiers, backing off to full set of legators: ' + str(L) + '\n' )
## Exit if no uncompleted legators...
if L == []: break
if VERBOSE: print( 'Trying to induce scopes below ' + L[0] + '...' )
RecencyConnected += D.Chains.get(L[0],[]) ## Account target as connected (root).
ok = D.tryScope( L[0], RecencyConnected )
Complete.append( L[0] )
if ENDGRAPH: print( 'GRAPH: ' + D.strGraph() )
if not ok: continue
# out = D.tryScope( RecencyConnected, True )
# if out == False: continue
if VERBOSE: print( D.Scopes )
if VERBOSE: print( 'GRAPH: ' + D.strGraph() )
for xTarget in sets.Set( D.Scopes.values() ):
if not any([ x in D.Scopes for x in D.Chains.get(xTarget,[]) ]) and not any([ s in D.Chains.get(xTarget,[]) for q,e,r,s,n in D.QuantTuples ]):
print( '#WARNING: Top-scoping referent ' + xTarget + ' has no annotated quantifier, and will not be induced!' )
sys.stderr.write( 'WARNING: Top-scoping referent ' + xTarget + ' has no annotated quantifier, and will not be induced!\n' )
#### IV. ENFORCE NORMAL FORM (QUANTS AND SCOPE PARENTS AT MOST SPECIFIC INHERITANCES...
# DisjointPreds = sets.Set([ ( D.ceiling(xt[1]), D.ceiling(yt[1]) ) for xt in D.PredTuples for yt in D.PredTuples if xt[1] < yt[1] and not D.reachesInChain( xt[1], D.ceiling(yt[1]) ) ])
# if len(DisjointPreds) > 0:
# print( '#WARNING: Scopal maxima not connected, possibly due to missing anaphora between sentences: ' + str(DisjointPreds) )
# sys.stderr.write( 'WARNING: Scopal maxima not connected, possibly due to missing anaphora between sentences: ' + str(DisjointPreds) + '\n' )
# DisjointRefts = sets.Set([ ( D.ceiling(x), D.ceiling(y) ) for xt in D.PredTuples for x in xt[1:] for yt in D.PredTuples for y in yt[1:] if x < y and not D.reachesInChain( x, D.ceiling(y) ) ])
# if len(DisjointRefts) > 0:
# print( '#WARNING: Scopal maxima not connected, possibly due to missing anaphora between sentences or unscoped argument of scoped predicate: ' + str(DisjointRefts) )
# sys.stderr.write( 'WARNING: Scopal maxima not connected, possibly due to missing anaphora between sentences or unscoped argument of scoped predicate: ' + str(DisjointRefts) + '\n' )
## Copy lowest (if multiple) scopes down chain to final heirs -- NOTE: this is the same as using inheritance in S rules...
for x in D.Scopes.keys():
if not any([ y in D.Scopes for y in D.Heirs.get(x,[]) if y != x ]):
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in D.Scopes:
D.Scopes[ xFin ] = D.Scopes[ x ]
## Copy quants down to final heirs -- NOTE: this is the same as using inheritance in Q rules...
for q,e,r,x,n in D.QuantTuples[:]:
if not any([ y in D.Scopes for y in D.Heirs.get(x,[]) if y != x ]):
for xFin in D.Heirs.get(x,[]):
if xFin not in D.Subs and xFin not in [s for _,_,_,s,_ in D.QuantTuples]:
D.QuantTuples.append( (q,e,r,xFin,n) )
if VERBOSE: print( 'GRAPH: ' + D.strGraph() )
## Induce low existential quants when only scope annotated...
# for xCh in sorted([x if x in NuscoValues else Nuscos[x] for x in Scopes.keys()] + [x for x in Scopes.values() if x in NuscoValues]): #sorted([ s for s in NuscoValues if 'r' not in Inhs.get(Inhs.get(s,{}).get('r',''),{}) ]): #Scopes:
# ScopeyNuscos = [ x for x in NuscoValues if 'r' not in Inhs.get(Inhs.get(x,{}).get('r',''),{}) and (x in Scopes.keys()+Scopes.values() or Inhs.get(x,{}).get('r','') in Scopes.keys()+Scopes.values()) ]
# ScopeyNuscos = [ x for x in D.Referents | sets.Set(D.Inhs.keys()) if (x not in D.Nuscos or x in D.NuscoValues) and 'r' not in D.Inhs.get(D.Inhs.get(x,{}).get('r',''),{}) and (x in D.Scopes.keys()+D.Scopes.values() or D.Inhs.get(x,{}).get('r','') in D.Scopes.keys()+D.Scopes.values()) ]
ScopeyNuscos = D.Scopes.keys()
if VERBOSE: print( 'ScopeyNuscos = ' + str(ScopeyNuscos) )
if VERBOSE: print( 'Referents = ' + str(D.Referents) )
if VERBOSE: print( 'Nuscos = ' + str(D.Nuscos) )
for xCh in ScopeyNuscos:
if xCh not in [s for _,_,_,s,_ in D.QuantTuples]: # + [r for q,e,r,s,n in QuantTuples]:
if D.Inhs[xCh].get('r','') == '': D.Inhs[xCh]['r'] = xCh+'r'
if VERBOSE: print( 'Inducing existential quantifier: ' + str([ 'D:someQ', xCh+'P', D.Inhs[xCh]['r'], xCh, '_' ]) )
D.QuantTuples.append( ( 'D:someQ', xCh+'P', D.Inhs[xCh]['r'], xCh, '_' ) )
# D.normForm()
## Remove redundant non-terminal quants with no scope parent...
for q,e,r,s,n in D.QuantTuples[:]:
if s in D.Subs and s not in D.Scopes and any([ x in D.Heirs.get(s,[]) for _,_,_,x,_ in D.QuantTuples if x!=s ]):
D.QuantTuples.remove( (q,e,r,s,n) )
if VERBOSE: print( 'Removing non-terminal quantifier ' + q + ' ' + e + ' ' + r + ' ' + s + ' ' + n )
# output normalized discgraph
if PRINT_NORMAL: print( 'GRAPH: ' + D.strGraph() )
#### V. TRANSLATE TO LAMBDA CALCULUS...
Translations = [ ]
Abstractions = collections.defaultdict( list ) ## Key is lambda.
Expressions = collections.defaultdict( list ) ## Key is lambda.
## Iterations...
i = 0
active = True
while active: #PredTuples != [] or QuantTuples != []:
i += 1
active = False
if VERBOSE:
print( '---- ITERATION ' + str(i) + ' ----' )
print( 'P = ' + str(sorted(D.PredTuples)) )
print( 'Q = ' + str(sorted(D.QuantTuples)) )
print( 'S = ' + str(sorted(D.Scopes.items())) )
print( 't = ' + str(sorted(D.Traces.items())) )
print( 'I = ' + str(sorted(D.Inhs.items())) )
print( 'DI = ' + str(sorted(D.DiscInhs.items())) )
print( 'T = ' + str(sorted(Translations)) )
print( 'A = ' + str(sorted(Abstractions.items())) )
print( 'E = ' + str(sorted(Expressions.items())) )
'''
## P rule...
for ptup in list(D.PredTuples):
for x in ptup[1:]: ## extended here below
if x not in D.Scopes.values() and x not in D.Inhs: # and not any([ y in D.Scopes.values() for y in D.Chains.get(x,[]) ]):
if VERBOSE: print( 'applying P to make \\' + x + '. ' + lambdaFormat(ptup) )
Abstractions[ x ].append( ptup )
if ptup in D.PredTuples: D.PredTuples.remove( ptup )
active = True
'''
## P1 rule...
for ptup in list(D.PredTuples):
x = ptup[1]
if x not in D.Scopes.values() and x not in D.Inhs and x not in D.DiscInhs:
if VERBOSE: print( 'applying P to move from P to A: \\' + x + '. ' + lambdaFormat(ptup) )
Abstractions[ x ].append( ptup )
if ptup in D.PredTuples: D.PredTuples.remove( ptup )
active = True
## P2 rule...
for ptup in list(D.PredTuples):
for x in ptup[2:]:
if D.Scopes.get(x,'')==ptup[1] and x not in D.Scopes.values() and x not in D.Inhs and x not in D.DiscInhs:
if VERBOSE: print( 'applying P to move from P to A: \\' + x + '. ' + lambdaFormat(ptup) )
Abstractions[ x ].append( ptup )
if ptup in D.PredTuples: D.PredTuples.remove( ptup )
active = True
## C rule...
for var,Structs in Abstractions.items():
if len(Structs) > 1:
if VERBOSE: print( 'applying C to add from A to A: \\' + var + '. ' + lambdaFormat( tuple( ['and'] + Structs ) ) )
Abstractions[var] = [ tuple( ['and'] + Structs ) ]
active = True
## M rule...
for var,Structs in Abstractions.items():
if len(Structs) == 1 and var not in D.Scopes.values() and var not in D.Inhs and var not in D.DiscInhs:
if VERBOSE: print( 'applying M to move from A to E: \\' + var + '. ' + lambdaFormat(Structs[0]) )
Expressions[var] = Structs[0]
del Abstractions[var]
active = True
## Q rule...
for q,e,r,s,n in list(D.QuantTuples):
if r in Expressions and s in Expressions:
if VERBOSE: print( 'applying Q to move from Q to T: ' + lambdaFormat( ( q, n, ( 'lambda', r, Expressions[r] ), ( 'lambda', s, Expressions[s] ) ) ) ) ## (' + q + ' (\\' + r + '. ' + str(Expressions[r]) + ') (\\' + s + '. ' + str(Expressions[s]) + '))' )
Translations.append( ( q, n, ( 'lambda', r, Expressions[r] ), ( 'lambda', s, Expressions[s] ) ) )
D.QuantTuples.remove( (q, e, r, s, n) )
active = True
## D rule -- discourse anaphora...
for src,dst in D.DiscInhs.items():
if dst in Expressions:
# expr = replaceVarName( Expressions[dst], dst, src )
## Find expr subsuming antecedent (dst) containing no unbound vars...
expr = Expressions[dst]
DstUnbound = [ ]
findUnboundVars( expr, DstUnbound )
# print( 'yyyy ' + str(DstUnbound) )
def getOutscoper( Unbound, EverUnbound ):
if VERBOSE: print( 'DDDDDDDDDD trying to find outscoper for ' + str(Unbound) + ' ' + str(EverUnbound) )
## For each unbound variable...
for var in Unbound:
## Look up each expression...
supexpr = Expressions.get( var, None )
if supexpr != None:
SupUnbound = []
findUnboundVars( supexpr, SupUnbound, [var] )
## If all old unbounds are scoped, and not new unbounds, return new expression...
if SupUnbound == []: return var,supexpr,EverUnbound
## If all old unbounds are outscoped...
if set(SupUnbound).isdisjoint( EverUnbound ):
## Repeat for newly unbound variables...
return getOutscoper( SupUnbound, EverUnbound + SupUnbound )
if VERBOSE: print( 'failed' )
return None,None,[]
var,expr,EverUnbound = getOutscoper( DstUnbound, DstUnbound )
if expr == None: continue
'''
EverUnbound = sets.Set()
AlreadyTriedVars = []
while len(DstUnbound)>0 and expr!=None:
var = DstUnbound.pop()
if (var,len(DstUnbound),len(EverUnbound)) in AlreadyTriedVars:
sys.stderr.write('ERROR: unable to make discourse anaphor from ' + src + ' to ' + dst + ' without cycle in quantifying ' + ' '.join([v for v,n,m in AlreadyTriedVars]) + '\n' )
break #exit(0)
AlreadyTriedVars += [ (var,len(DstUnbound),len(EverUnbound)) ]
expr = Expressions.get(var,None)
if expr == None: break
findUnboundVars( expr, DstUnbound, [var] )
EverUnbound |= sets.Set(DstUnbound)
if expr == None: continue
'''
# for expr in Translations:
# if contains( expr, dst ): break
# else: continue
'''
for var in DstUnbound:
if var in Expressions:
# outscopingExpr = replaceVarName( Expressions[dst], dst, src )
outscopingExpr = Expressions[var]
OutscopingUnbound = [ ]
findUnboundVars( outscopingExpr, OutscopingUnbound, [var] )
if len( OutscopingUnbound ) == 0: break
else:
if VERBOSE: print( 'tried to attach discourse anaphor, but none of ' + ' '.join(DstUnbound) + ' had no unbound variables in Expression set' )
continue
'''
expr = replaceVarName( makeDiscAntec( ('D:prevosomeQ', '_', ('lambda', var+'x', ()), ('lambda', var, expr)), dst, EverUnbound ), dst, src )
# expr = replaceVarName( makeDiscAntec( expr, dst, EverUnbound ), dst, src )
Abstractions[ src ].append( expr )
if VERBOSE: print( 'applying D to add from A to A replacing: ' + dst + ' with ' + src + ' and existentializing to make \\' + src + ' ' + lambdaFormat(expr) )
del D.DiscInhs[ src ]
## I1 rule...
for src,lbldst in D.Inhs.items():
for lbl,dst in lbldst.items():
if dst not in D.Inhs and dst not in D.DiscInhs and dst not in Abstractions and dst not in Expressions and dst not in D.Scopes.values() and dst not in [ x for ptup in D.PredTuples for x in ptup ]:
if VERBOSE: print( 'applying I1 to add to A: \\' + dst + ' True' )
Abstractions[ dst ].append( () )
active = True
## I2,I3,I4 rule...
for src,lbldst in D.Inhs.items():
for lbl,dst in lbldst.items():
if dst in Expressions:
if src in D.Scopes and dst in D.Traces and D.Scopes[src] in D.Scopes and D.Traces[dst] in D.Traces:
Abstractions[ src ].append( replaceVarName( replaceVarName( replaceVarName( Expressions[dst], dst, src ), D.Traces[dst], D.Scopes[src] ), D.Traces[D.Traces[dst]], D.Scopes[D.Scopes[src]] ) ) ## I4 rule.
if VERBOSE: print( 'applying I4 to add from A to A replacing ' + dst + ' with ' + src + ' and ' + D.Traces[dst] + ' with ' + D.Scopes[src] + ' and ' + D.Traces[D.Traces[dst]] + ' with ' + D.Scopes[D.Scopes[src]] + ' to make \\' + src + ' ' + lambdaFormat(Abstractions[src][-1]) )
# del Traces[dst]
elif src in D.Scopes and dst in D.Traces:
Abstractions[ src ].append( replaceVarName( replaceVarName( Expressions[dst], dst, src ), D.Traces[dst], D.Scopes[src] ) ) ## I4 rule.
if VERBOSE: print( 'applying I4 to add from A to A replacing ' + dst + ' with ' + src + ' and ' + D.Traces[dst] + ' with ' + D.Scopes[src] + ' to make \\' + src + ' ' + lambdaFormat(Abstractions[src][-1]) )
# del Traces[dst]
else:
if VERBOSE: print( 'applying I2/I3 to add from A to A replacing ' + dst + ' with ' + src + ' to make \\' + src + ' ' + lambdaFormat(replaceVarName( Expressions[dst], dst, src )) ) #' in ' + str(Expressions[dst]) )
Abstractions[ src ].append( replaceVarName( Expressions[dst], dst, src ) )
# if dst in D.Scopes and src not in D.Scopes and D.Nuscos.get(src,[''])[0] not in D.Scopes and src in [s for q,e,r,s,n in D.QuantTuples] + [r for q,e,r,s,n in D.QuantTuples]: D.Scopes[src if src in D.NuscoValues else D.Nuscos[src][0]] = D.Scopes[dst] ## I3 rule.
del D.Inhs[src][lbl]
if len(D.Inhs[src])==0: del D.Inhs[src]
active = True
## Rename all relevant abstractions with all inheritances, in case of multiple inheritance...
for dst in D.AllInherited[ src ]:
if dst in Expressions:
Abstractions[ src ] = [ replaceVarName( a, dst, src ) for a in Abstractions[src] ]
## S1 rule...
for q,n,R,S in list(Translations):
if S[1] in D.Scopes:
if VERBOSE: print( 'applying S1 to move from T to A: (\\' + D.Scopes[ S[1] ] + ' ' + q + ' ' + str(R) + ' ' + str(S) + ')' )
Expr = copy.deepcopy( (q,n,R,S) )
for x in D.Chains.get( D.Scopes[ S[1] ], [] ):
if x != D.Scopes[ S[1] ]:
Expr = replaceVarName( Expr, x, D.Scopes[ S[1] ] )
Abstractions[ D.Scopes[ S[1] ] ].append( Expr )
# del D.Scopes[ S[1] ]
# if R[1] in Scopes: del Scopes[ R[1] ] ## Should use 't' trace assoc.
Translations.remove( (q, n, R, S) )
if not [True for q1,n1,R1,S1 in Translations if S1[1]==S[1]]: del D.Scopes[ S[1] ] ## Remove scope only if no more quantifiers using it.
active = True
expr = tuple( ['and'] + Translations )
print( lambdaFormat(expr) )
# for expr in Translations:
Unbound = [ ]
findUnboundVars( expr, Unbound )
for v in Unbound:
print( '# DOWNSTREAM LAMBDA EXPRESSION ERROR: unbound var: ' + v )
sys.stderr.write( ' DOWNSTREAM LAMBDA EXPRESSION ERROR: unbound var: ' + v + '\n' )
if VERBOSE: print( 'D.OrigConsts = ' + str(D.OrigConsts) )
checkConstsUsed( expr, D.OrigConsts )
for k in D.OrigConsts:
print( '# DOWNSTREAM LAMBDA EXPRESSION WARNING: const does not appear in translations: ' + ','.join(k) )
sys.stderr.write( ' DOWNSTREAM LAMBDA EXPRESSION WARNING: const does not appear in translations: ' + ','.join(k) + '\n' )
|
modelblocks/modelblocks-release
|
resource-gcg/scripts/discgraphs2discexprs.py
|
Python
|
gpl-3.0
| 27,849
|
from interspike_interval import *
from numpy import *
from matplotlib.pyplot import *
from numpy.random import randint
from matplotlib import rcParams
from scipy.io import savemat, loadmat
from switch import *
import UnrecognizedFormatError
class ISIpy(object):
#Constructor begins --------------------------------------------------------------------
def __init__(self,
plot_settings={
'axes.labelsize':20,
'axes.titlesize':30,
'xtick.direction':'out',
'ytick.direction':'out',
'axes.linewidth':2.0
},
control_flow={
'should_plot':True,
'should_save':True,
'save_format':'png',
'graph_filename':'data',
'processed_data_filename':'data' #In version 1.0 only save data in MATLAB format
},
data_location = ''
):
#Control Flow= Whether to plot, save,...
self.control_flow = control_flow
self.should_plot = control_flow['should_plot']
self.should_save = control_flow['should_save']
self.save_format = control_flow['save_format']
self.graph_filename = control_flow['graph_filename']
self.processed_data_filename=control_flow['processed_data_filename']
#Customize Heatmap Appearance
self.plot_settings = plot_settings
rcParams.update(plot_settings)
self.accepted_formats = {
'MATLAB':'.m',
'CSV':'.csv',
'Brian':'.txt,'
}
self.data_location = data_location
self.data = self.fetch_data(self.data_location)
#Assume that data is passed as a MATLAB matrix structured as below
# Time ---------------------->
# Neurons
# |
# |
# |
# |
# V
#
self.ISIs = self.pairwise_ISI(self.data)
#ISIs is organized as below. The ends are neglected to avoid confounding edge effects.
# ISI ---------------------->
# Neurons
# |
# |
# |
# |
# V
self.visualization()
self.save_everything()
#Constructor Ends ----------------------------------------------------------------------------------
def fetch_data(self,data_location):
if type(data_location)==list:
print 'Data spread over multiple files'
return [loadtxt(record,delimiter='\t') for record in data_location]
else: #assume a single file summarizing the entire recording is passed
self.suffix = self.data_location[self.data_location.find('.'):]
if self.suffix =='.mat':
print 'Loading',data_location
return loadmat(self.data_location)['data']
elif self.suffix== '.csv':
print 'Loading',data_location
return loadtxt(self.data_location,delimiter=',')
elif self.suffix=='.txt':
print 'Loading',data_location
return loadtxt(self.data_location,delimiter='\t')
else:
print 'Unrecognized Format. Assuming a tab delimited text file'
return loadtxt(self.data_location,delimiter='\t')
def pairwise_ISI(self,spikeTimes):
self.duration = min(amax(spikeTimes,axis=1))
self.ISI_functions = array([self.process_spiketimes_ISIs(spikeTrain) for spikeTrain in spikeTimes])
self.ISI_functions = array([self.construct_piecewise_ISI(isi_object, self.duration) for isi_object in self.ISI_functions])
self.neuron_count = spikeTimes.shape[0]
self.answer = squeeze(array([[self.make_I(first,second) for first in self.ISI_functions] for second in self.ISI_functions]))
self.answer = reshape(self.answer, (self.neuron_count**2,-1))
return self.answer
def pairs(self,lst):
self.i = iter(lst)
self.first = self.prev = self.item = self.i.next()
for item in self.i:
yield self.prev, item
self.prev = item
yield item, self.first
def process_spiketimes_ISIs(self,spikeTimes):
return [interspike_interval(pair[0],pair[1],pair[1]-pair[0]) for pair in self.pairs(spikeTimes)][1:-1]
def construct_piecewise_ISI(self,isi_object, duration):
self.answer = zeros(duration,)
self.answer[0] = isi_object[1].interspike_interval
for timestep in range(1,duration-1):
self.isi= [interval.interspike_interval for interval in isi_object if interval.start<=timestep and interval.stop>timestep]
if self.isi:
self.answer[timestep] = self.isi[0]
return self.answer[2:-1]
def normalize(self,first,second):
if first and second:
return first/float(second)-1 if first<=second else -(second/float(first)-1)
else:
return 0 #Potential Bug
def make_I(self,one,two):
return array([self.normalize(first,second) for first,second in zip(one,two)])
def make_heatmap(self,ISIs):
self.average_ISIs = average(ISIs,axis=1)
return reshape(self.average_ISIs,(sqrt(len(self.average_ISIs)),sqrt(len(self.average_ISIs))))
def visualization(self):
print 'Plotting ISI heatmap'
if self.should_plot:
figure()
imshow(self.make_heatmap(self.ISIs), interpolation='nearest',aspect='auto')
title('ISI Distance')
xlabel('Neuron')
ylabel('Neuron')
colorbar()
grid(which='major')
clim(-1,1)
if self.should_save:
print 'Saving heatmap and ISI distance timeseries'
savefig(self.graph_filename+'.'+self.save_format,transparent=True, format=self.save_format)
show()
def save_everything(self):
if self.should_save:
savemat(self.processed_data_filename, mdict={'ISI_intervals': self.ISIs})
#The MAT file will be saved to the same directory as this file
|
mac389/brainpy
|
lib/analysis/ISI/ISIpy.py
|
Python
|
gpl-3.0
| 5,276
|
from invoke import task, call
from .docker import setup, manage
@task(
pre=[setup],
post=[call(manage, command='create_example_data')]
)
def quickstart(ctx):
print('RadioCo should be running')
print('Generating some data...')
@task
def commit_changes(ctx):
# Add files
ctx.run('git add .')
# Add local settings excluded on gitignore
ctx.run('git add -f radioco/configs/base/local_settings.py')
ctx.run('git add -f radioco/configs/development/local_settings.py')
ctx.run('git add -f radioco/configs/heroku/local_settings.py')
# Commit all
ctx.run('git commit -am "autocommit: save changes"')
@task
def checkout_latest(ctx):
ctx.run('git pull')
|
txenoo/django-radio
|
tasks/radioco.py
|
Python
|
gpl-3.0
| 701
|
"""
Simple Tests for Support Generation
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import numpy as np
import unittest
import tensorflow as tf
import deepchem as dc
class TestSupports(unittest.TestCase):
"""
Test that support generation happens properly.
"""
def test_remove_dead_examples(self):
"""Tests that examples with zero weight are removed."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.random.binomial(1, p, size=(n_samples, n_tasks))
num_nonzero = np.count_nonzero(np.sum(w, axis=1))
dataset = dc.data.NumpyDataset(X, y, w, ids)
cleared_dataset = dc.data.remove_dead_examples(dataset)
assert len(cleared_dataset) == num_nonzero
def test_get_task_support_simple(self):
"""Tests that get_task_support samples correctly."""
n_samples = 20
n_features = 3
n_tasks = 1
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_pos = 1
n_neg = 5
supports = dc.data.get_task_support(dataset, n_episodes, n_pos, n_neg,
task=0, log_every_n=10)
assert len(supports) == n_episodes
for support in supports:
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
def test_get_task_support_missing(self):
"""Test that task support works in presence of missing data."""
n_samples = 20
n_features = 3
n_tasks = 1
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Set last n_samples/2 weights to 0
w[n_samples/2:] = 0
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_pos = 1
n_neg = 2
supports = dc.data.get_task_support(dataset, n_episodes, n_pos, n_neg,
task=0, log_every_n=10)
assert len(supports) == n_episodes
for support in supports:
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
# Check that no support elements are sample from zero-weight samples
for identifier in support.ids:
assert identifier < n_samples/2
def test_get_task_test(self):
"""Tests that get_task_testsamples correctly."""
n_samples = 20
n_features = 3
n_tasks = 1
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_episodes = 20
n_test = 10
tests = dc.data.get_task_test(dataset, n_episodes, n_test,
task=0, log_every_n=10)
assert len(tests) == n_episodes
for test in tests:
assert len(test) == n_test
def test_simple_support_generator(self):
"""Conducts simple test that support generator runs."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
supp_gen = dc.data.SupportGenerator(dataset, n_pos, n_neg, n_trials)
def test_simple_episode_generator(self):
"""Conducts simple test that episode generator runs."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_test = 10
n_episodes = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
episode_gen = dc.data.EpisodeGenerator(
dataset, n_pos, n_neg, n_test, n_episodes)
n_episodes_found = 0
for (task, support, test) in episode_gen:
assert task >= 0
assert task < n_tasks
assert len(support) == n_pos + n_neg
assert np.count_nonzero(support.y) == n_pos
assert len(test) == n_test
n_episodes_found += 1
assert n_episodes_found == n_episodes
def test_get_task_minus_support_simple(self):
"""Test that fixed index support can be removed from dataset."""
n_samples = 20
n_support = 5
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_dataset = dc.data.NumpyDataset(X[:n_support], y[:n_support],
w[:n_support], ids[:n_support])
task_dataset = dc.data.get_task_dataset_minus_support(
dataset, support_dataset, task=0)
# Assert all support elements have been removed
assert len(task_dataset) == n_samples - n_support
np.testing.assert_array_equal(task_dataset.X, X[n_support:])
np.testing.assert_array_equal(task_dataset.y, y[n_support:])
np.testing.assert_array_equal(task_dataset.w, w[n_support:])
np.testing.assert_array_equal(task_dataset.ids, ids[n_support:])
def test_dataset_difference_simple(self):
"""Test that fixed index can be removed from dataset."""
n_samples = 20
n_remove = 5
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
remove_dataset = dc.data.NumpyDataset(X[:n_remove], y[:n_remove],
w[:n_remove], ids[:n_remove])
out_dataset = dc.data.dataset_difference(
dataset, remove_dataset)
# Assert all remove elements have been removed
assert len(out_dataset) == n_samples - n_remove
np.testing.assert_array_equal(out_dataset.X, X[n_remove:])
np.testing.assert_array_equal(out_dataset.y, y[n_remove:])
np.testing.assert_array_equal(out_dataset.w, w[n_remove:])
np.testing.assert_array_equal(out_dataset.ids, ids[n_remove:])
def test_get_task_minus_support(self):
"""Test that random index support can be removed from dataset."""
n_samples = 10
n_support = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_inds = sorted(np.random.choice(
np.arange(n_samples), (n_support,), replace=False))
support_dataset = dc.data.NumpyDataset(X[support_inds], y[support_inds],
w[support_inds], ids[support_inds])
task_dataset = dc.data.get_task_dataset_minus_support(
dataset, support_dataset, task=0)
# Assert all support elements have been removed
data_inds = sorted(list(set(range(n_samples)) - set(support_inds)))
assert len(task_dataset) == n_samples - n_support
np.testing.assert_array_equal(task_dataset.X, X[data_inds])
np.testing.assert_array_equal(task_dataset.y, y[data_inds])
np.testing.assert_array_equal(task_dataset.w, w[data_inds])
np.testing.assert_array_equal(task_dataset.ids, ids[data_inds])
def test_dataset_difference(self):
"""Test that random index can be removed from dataset."""
n_samples = 10
n_remove = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
remove_inds = sorted(np.random.choice(
np.arange(n_samples), (n_remove,), replace=False))
remove_dataset = dc.data.NumpyDataset(X[remove_inds], y[remove_inds],
w[remove_inds], ids[remove_inds])
out_dataset = dc.data.dataset_difference(
dataset, remove_dataset)
# Assert all remove elements have been removed
data_inds = sorted(list(set(range(n_samples)) - set(remove_inds)))
assert len(out_dataset) == n_samples - n_remove
np.testing.assert_array_equal(out_dataset.X, X[data_inds])
np.testing.assert_array_equal(out_dataset.y, y[data_inds])
np.testing.assert_array_equal(out_dataset.w, w[data_inds])
np.testing.assert_array_equal(out_dataset.ids, ids[data_inds])
def test_get_task_minus_support_missing(self):
"""Test that support can be removed from dataset with missing data"""
n_samples = 20
n_support = 4
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Set last n_samples/2 weights to 0
w[n_samples/2:] = 0
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Sample from first n_samples/2 elements for support
support_inds = sorted(np.random.choice(
np.arange(n_samples/2), (n_support,), replace=False))
support_dataset = dc.data.NumpyDataset(X[support_inds], y[support_inds],
w[support_inds], ids[support_inds])
task_dataset = dc.data.get_task_dataset_minus_support(
dataset, support_dataset, task=0)
# Should lie within first n_samples/2 samples only
assert len(task_dataset) == n_samples/2 - n_support
for identifier in task_dataset.ids:
assert identifier < n_samples/2
def test_support_generator_correct_samples(self):
"""Tests that samples from support generator have desired shape."""
n_samples = 20
n_features = 3
n_tasks = 1
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Create support generator
supp_gen = dc.data.SupportGenerator(dataset, n_pos, n_neg, n_trials)
num_supports = 0
for (task, support) in supp_gen:
assert support.X.shape == (n_pos + n_neg, n_features)
num_supports += 1
assert task == 0 # Only one task in this example
n_supp_pos = np.count_nonzero(support.y)
assert n_supp_pos == n_pos
assert num_supports == n_trials
def test_evaluation_strategy(self):
"""Tests that sampling supports for eval works properly."""
n_samples = 2000
n_features = 3
n_tasks = 5
n_pos = 1
n_neg = 5
n_trials = 10
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
support_generator = dc.data.SupportGenerator(dataset,
n_pos, n_neg, n_trials)
for ind, (task, support) in enumerate(support_generator):
task_dataset = dc.data.get_task_dataset_minus_support(
dataset, support, task)
task_y = dataset.y[:, task]
task_w = dataset.w[:, task]
task_y = task_y[task_w != 0]
assert len(task_y) == len(support) + len(task_dataset)
print("Verifying that task_dataset doesn't overlap with support.")
for task_id in task_dataset.ids:
assert task_id not in set(support.ids)
|
bowenliu16/deepchem
|
deepchem/data/tests/test_support_generator.py
|
Python
|
gpl-3.0
| 12,947
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class RegistrationForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
confirm_password = forms.CharField(widget=forms.PasswordInput())
class LoginForm(AuthenticationForm):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
|
imranariffin/taskmanager
|
accounts/forms.py
|
Python
|
gpl-3.0
| 398
|
'''
Created on 2015-5-1
@author: cheneave
'''
import json
from subprocess import PIPE, Popen
import sys
class ShellMiddleware(object):
'''
classdocs
'''
def __init__(self, app, password, url_path="webshell"):
'''
Constructor
app indicates the application that should be called if the request is for shell
password should be given, for safety reason, there are no default password
url_path indicates the url path of the webshell, "/" not included
'''
self.app = app
self.url_path = '/' + url_path
@staticmethod
def run_cmd(cmd):
try:
pipe = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = pipe.communicate()
try:
out = out.decode(sys.stdout.encoding) if out else ""
except:
pass
try:
err = err.decode(sys.stderr.encoding) if err else ""
except:
pass
return "{}{}".format(out, err)
except Exception as e:
return str(e)
def __call__(self, environ, start_response):
if environ["PATH_INFO"] == self.url_path:
start_response(
"200 OK", [("Content-type", 'text/plain;charset=utf-8')])
if environ["REQUEST_METHOD"].upper() != "POST":
return ["only support post"]
try:
wsgi_input = environ["wsgi.input"]
except KeyError:
return ["no input data"]
try:
content_length = int(environ.get("CONTENT_LENGTH", 0))
except ValueError:
content_length = 0
dat = wsgi_input.read(content_length)
try:
req = json.loads(dat)
except ValueError:
return ["parsing request failed"]
if req.get("password", "") != self.password:
return ["wrong password"]
return self.run_cmd(req.get("command", ""))
else:
return self.app(environ, start_response)
|
cheneave/wsgishell
|
wsgishell/shell_middleware.py
|
Python
|
gpl-3.0
| 2,112
|
from sqlalchemy import Column, Integer, String, DateTime, Sequence
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
from locale import setlocale, LC_TIME
from sqlalchemy.sql import func
Base = declarative_base()
class LatestTopic(Base):
__tablename__ = 'scr_latesttopics'
id = Column(Integer, Sequence('latesttopics_id_seq'), primary_key=True)
numOfReplies = Column(Integer)
topicLinkInternal = Column(String(2000))
topicTitle = Column(String(255))
categoryLinkInternal = Column(String(2000))
categoryTitle = Column(String(255))
topicCreatedAt = Column(DateTime)
topicCreatedByProfileLinkExternal = Column(String(2000))
topicCreatedByName = Column(String(255))
numOfVisits = Column(String(10))
scraped_at = Column(DateTime(timezone=True), default=func.now())
batch_id = Column(Integer)
def __init__(self, latestRow, batch_id):
# set the local for date parsing
setlocale(LC_TIME, ['hu_HU','UTF-8'])
latestRowTableData = latestRow.findAll("td")
self.numOfReplies = latestRowTableData[0].find("strong").getText()
topic = latestRowTableData[2].findAll("div")[0].a
self.topicLinkInternal = topic.attrs['href']
self.topicTitle = topic.getText()
category = latestRowTableData[2].findAll("div")[1].span.a
self.categoryLinkInternal = category.attrs['href']
self.categoryTitle = category.getText()
topicCreated = latestRowTableData[2].findAll("div")[2].findAll("span")
self.topicCreatedAt = datetime.strptime(topicCreated[0].attrs["title"], '%Y %b. %d %H:%M')
topicCreatedBy = topicCreated[1].a
self.topicCreatedByProfileLinkExternal = topicCreatedBy.attrs['href']
self.topicCreatedByName = topicCreatedBy.getText()
self.numOfVisits = latestRowTableData[3].findAll("span")[0].getText()
self.batch_id = batch_id
def __repr__(self):
return "<LatestTopic(id='%s', numOfReplies='%s', topicLinkInternal='%s' topicTitle='%s', categoryLinkInternal='%s', categoryTitle='%s', topicCreatedAt='%s', topicCreatedByProfileLinkExternal='%s', topicCreatedByName='%s', numOfVisits='%s')>" % \
(self.id, self.numOfReplies, self.topicLinkInternal, self.topicTitle, self.categoryLinkInternal, self.categoryTitle, self.topicCreatedAt, self.topicCreatedByProfileLinkExternal, self.topicCreatedByName, self.numOfVisits)
|
stackingfunctions/scrapeforum
|
python/src/latesttopic.py
|
Python
|
gpl-3.0
| 2,452
|
import os
import subprocess
import threading
import timeit
from collections import deque
from logging import critical, info, debug, exception
from math import ceil
from queue import Queue
from time import time
from .service import Service, ServiceConfig
from .queuemanager import QueueManager, TimedQueueManager, BillingQueueManager, PingQueueManager, ServicesQueueManager, \
AlertQueueManager, PollerQueueManager, DiscoveryQueueManager
def normalize_wait(seconds):
return ceil(seconds - (time() % seconds))
def call_script(script, args=()):
"""
Run a LibreNMS script. Captures all output and throws an exception if a non-zero
status is returned. Blocks parent signals (like SIGINT and SIGTERM).
:param script: the name of the executable relative to the base directory
:param args: a tuple of arguments to send to the command
:returns the output of the command
"""
if script.endswith('.php'):
# save calling the sh process
base = ('/usr/bin/env', 'php')
else:
base = ()
base_dir = os.path.realpath(os.path.dirname(__file__) + "/..")
cmd = base + ("{}/{}".format(base_dir, script),) + tuple(map(str, args))
debug("Running {}".format(cmd))
# preexec_fn=os.setsid here keeps process signals from propagating
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, preexec_fn=os.setsid, close_fds=True).decode()
class DB:
def __init__(self, config, auto_connect=True):
"""
Simple DB wrapper
:param config: The poller config object
"""
self.config = config
self._db = {}
if auto_connect:
self.connect()
def connect(self):
try:
import pymysql
pymysql.install_as_MySQLdb()
info("Using pure python SQL client")
except ImportError:
info("Using other SQL client")
try:
import MySQLdb
except ImportError:
critical("ERROR: missing a mysql python module")
critical("Install either 'PyMySQL' or 'mysqlclient' from your OS software repository or from PyPI")
raise
try:
args = {
'host': self.config.db_host,
'port': self.config.db_port,
'user': self.config.db_user,
'passwd': self.config.db_pass,
'db': self.config.db_name
}
if self.config.db_socket:
args['unix_socket'] = self.config.db_socket
conn = MySQLdb.connect(**args)
conn.autocommit(True)
conn.ping(True)
self._db[threading.get_ident()] = conn
except Exception as e:
critical("ERROR: Could not connect to MySQL database! {}".format(e))
raise
def db_conn(self):
"""
Refers to a database connection via thread identifier
:return: database connection handle
"""
# Does a connection exist for this thread
if threading.get_ident() not in self._db.keys():
self.connect()
return self._db[threading.get_ident()]
def query(self, query, args=None):
"""
Open a cursor, fetch the query with args, close the cursor and return it.
:rtype: MySQLdb.Cursor
:param query:
:param args:
:return: the cursor with results
"""
try:
cursor = self.db_conn().cursor()
cursor.execute(query, args)
cursor.close()
return cursor
except Exception as e:
critical("DB Connection exception {}".format(e))
self.close()
raise
def close(self):
"""
Close the connection owned by this thread.
"""
conn = self._db.pop(threading.get_ident(), None)
if conn:
conn.close()
class RecurringTimer:
def __init__(self, duration, target, thread_name=None):
self.duration = duration
self.target = target
self._timer_thread = None
self._thread_name = thread_name
self._event = threading.Event()
def _loop(self):
while not self._event.is_set():
self._event.wait(normalize_wait(self.duration))
if not self._event.is_set():
self.target()
def start(self):
self._timer_thread = threading.Thread(target=self._loop)
if self._thread_name:
self._timer_thread.name = self._thread_name
self._event.clear()
self._timer_thread.start()
def stop(self):
self._event.set()
class Lock:
""" Base lock class this is not thread safe"""
def __init__(self):
self._locks = {} # store a tuple (owner, expiration)
def lock(self, name, owner, expiration, allow_owner_relock=False):
"""
Obtain the named lock.
:param allow_owner_relock:
:param name: str the name of the lock
:param owner: str a unique name for the locking node
:param expiration: int in seconds
"""
if (
(name not in self._locks) or # lock doesn't exist
(allow_owner_relock and self._locks.get(name, [None])[0] == owner) or # owner has permission
time() > self._locks[name][1] # lock has expired
):
self._locks[name] = (owner, expiration + time())
return self._locks[name][0] == owner
return False
def unlock(self, name, owner):
"""
Release the named lock.
:param name: str the name of the lock
:param owner: str a unique name for the locking node
"""
if (name in self._locks) and self._locks[name][0] == owner:
self._locks.pop(name, None)
return True
return False
def check_lock(self, name):
lock = self._locks.get(name, None)
if lock:
return lock[1] > time()
return False
def print_locks(self):
debug(self._locks)
class ThreadingLock(Lock):
"""A subclass of Lock that uses thread-safe locking"""
def __init__(self):
Lock.__init__(self)
self._lock = threading.Lock()
def lock(self, name, owner, expiration, allow_owner_relock=False):
"""
Obtain the named lock.
:param allow_owner_relock:
:param name: str the name of the lock
:param owner: str a unique name for the locking node
:param expiration: int in seconds
"""
with self._lock:
return Lock.lock(self, name, owner, expiration, allow_owner_relock)
def unlock(self, name, owner):
"""
Release the named lock.
:param name: str the name of the lock
:param owner: str a unique name for the locking node
"""
with self._lock:
return Lock.unlock(self, name, owner)
def check_lock(self, name):
return Lock.check_lock(self, name)
def print_locks(self):
Lock.print_locks(self)
class RedisLock(Lock):
def __init__(self, namespace='lock', **redis_kwargs):
import redis
redis_kwargs['decode_responses'] = True
self._redis = redis.Redis(**redis_kwargs)
self._redis.ping()
self._namespace = namespace
def __key(self, name):
return "{}:{}".format(self._namespace, name)
def lock(self, name, owner, expiration=1, allow_owner_relock=False):
"""
Obtain the named lock.
:param allow_owner_relock: bool
:param name: str the name of the lock
:param owner: str a unique name for the locking node
:param expiration: int in seconds, 0 expiration means forever
"""
import redis
try:
if int(expiration) < 1:
expiration = 1
key = self.__key(name)
non_existing = not (allow_owner_relock and self._redis.get(key) == owner)
return self._redis.set(key, owner, ex=int(expiration), nx=non_existing)
except redis.exceptions.ResponseError as e:
exception("Unable to obtain lock, local state: name: %s, owner: %s, expiration: %s, allow_owner_relock: %s",
name, owner, expiration, allow_owner_relock)
def unlock(self, name, owner):
"""
Release the named lock.
:param name: str the name of the lock
:param owner: str a unique name for the locking node
"""
key = self.__key(name)
if self._redis.get(key) == owner:
self._redis.delete(key)
return True
return False
def check_lock(self, name):
return self._redis.get(self.__key(name)) is not None
def print_locks(self):
keys = self._redis.keys(self.__key('*'))
for key in keys:
print("{} locked by {}, expires in {} seconds".format(key, self._redis.get(key), self._redis.ttl(key)))
class RedisUniqueQueue(object):
def __init__(self, name, namespace='queue', **redis_kwargs):
import redis
redis_kwargs['decode_responses'] = True
self._redis = redis.Redis(**redis_kwargs)
self._redis.ping()
self.key = "{}:{}".format(namespace, name)
# clean up from previous implementations
if self._redis.type(self.key) != 'zset':
self._redis.delete(self.key)
def qsize(self):
return self._redis.zcount(self.key, '-inf', '+inf')
def empty(self):
return self.qsize() == 0
def put(self, item):
self._redis.zadd(self.key, {item: time()}, nx=True)
def get(self, block=True, timeout=None):
if block:
item = self._redis.bzpopmin(self.key, timeout=timeout)
else:
item = self._redis.zpopmin(self.key)
if item:
item = item[1]
return item
def get_nowait(self):
return self.get(False)
class UniqueQueue(Queue):
def _init(self, maxsize):
self.queue = deque()
self.setqueue = set()
def _put(self, item):
if item not in self.setqueue:
self.setqueue.add(item)
self.queue.append(item)
def _get(self):
item = self.queue.popleft()
self.setqueue.remove(item)
return item
class PerformanceCounter(object):
"""
This is a simple counter to record execution time and number of jobs. It's unique to each
poller instance, so does not need to be globally syncronised, just locally.
"""
def __init__(self):
self._count = 0
self._jobs = 0
self._lock = threading.Lock()
def add(self, n):
"""
Add n to the counter and increment the number of jobs by 1
:param n: Number to increment by
"""
with self._lock:
self._count += n
self._jobs += 1
def split(self, precise=False):
"""
Return the current counter value and keep going
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
return (self._count if precise else int(self._count)), self._jobs
def reset(self, precise=False):
"""
Return the current counter value and then zero it.
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
with self._lock:
c = self._count
j = self._jobs
self._count = 0
self._jobs = 0
return (c if precise else int(c)), j
class TimeitContext(object):
"""
Wrapper around timeit to allow the timing of larger blocks of code by wrapping them in "with"
"""
def __init__(self):
self._t = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, *args):
del self._t
def delta(self):
"""
Calculate the elapsed time since the context was initialised
:return: FLOAT
"""
if not self._t:
raise ArithmeticError("Timer has not been started, cannot return delta")
return timeit.default_timer() - self._t
@classmethod
def start(cls):
"""
Factory method for TimeitContext
:param cls:
:return: TimeitContext
"""
return cls()
|
Rosiak/librenms
|
LibreNMS/__init__.py
|
Python
|
gpl-3.0
| 12,436
|
from fGame import Game
from fV2 import V2
import pygame
class Game1(Game):
angle = 0
pos = V2.One() * 150
def load_content(self):
self.car = pygame.image.load('images\schnitzelimage.png').convert_alpha()
def update(self, events):
#self.pos+=4
self.angle+=1
rotatedCar=pygame.transform.rotate(self.car,self.angle)
self.screen.blit(rotatedCar,self.pos.getArray())
#//if (self.angle % 2 == 0):
# self.screen.blit(rotatedCar,self.pos.getArray())
#else:
# self.screen.blit(self.car,self.pos.getArray())
|
gitlitz/pygame-with-interpreter
|
3/fGame1.py
|
Python
|
gpl-3.0
| 610
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
import os
import sys
import errno
import locale
import re
import tempfile
import shutil
import fnmatch
import functools
from collections import Counter, namedtuple
from multiprocessing.pool import ThreadPool
import traceback
import subprocess
import platform
import shlex
from beets.util import hidden
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = '\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return '"{}"'.format(str(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error('{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = 'while {} {} to {}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = 'while {} {}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = 'during {} of paths {}'.format(
self.verb, ', '.join(displayable_path(p) for p in self.paths)
)
return f'{self._reasonstr()} {clause}'
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
REFLINK = 4
REFLINK_AUTO = 5
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warning('could not list directory {}: {}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug('ignoring {} due to ignore rule {}'.format(
base, pat
))
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
def path_as_posix(path):
"""Return the string representation of the path with forward (/)
slashes.
"""
return path.replace(b'\\', b'/')
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
try:
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(directory)
else:
break
except OSError:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == 'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = 'utf-8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator='; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, str):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf-8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith('\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = 'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except OSError as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if os.path.isdir(path):
raise FilesystemError(u'source is directory', 'move', (path, dest))
if os.path.isdir(dest):
raise FilesystemError(u'destination is directory', 'move',
(path, dest))
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
# First, try renaming the file.
try:
os.replace(path, dest)
except OSError:
tmp = tempfile.mktemp(suffix='.beets',
prefix=py3_path(b'.' + os.path.basename(dest)),
dir=py3_path(os.path.dirname(dest)))
tmp = syspath(tmp)
try:
shutil.copyfile(path, tmp)
os.replace(tmp, dest)
tmp = None
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
finally:
if tmp is not None:
os.remove(tmp)
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError('OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError('Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def reflink(path, dest, replace=False, fallback=False):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If reflinking fails and `fallback` is enabled, try copying the file
instead. Otherwise, raise an error without trying a plain copy.
May raise an `ImportError` if the `reflink` module is not available.
"""
import reflink as pyreflink
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
pyreflink.reflink(path, dest)
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
if fallback:
copy(path, dest, replace)
else:
raise FilesystemError('OS/filesystem does not support reflinks.',
'link', (path, dest), traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
suffix = f'.{num}'.encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), '_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), '_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), ''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), '_'), # Windows "reserved characters".
(re.compile(r'\.$'), '_'), # Trailing dots.
(re.compile(r'\s+$'), ''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, str):
return path
assert isinstance(path, bytes)
return os.fsdecode(path)
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ''
elif isinstance(value, memoryview):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf-8', 'ignore')
else:
return str(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(command_output([
'/usr/sbin/sysctl',
'-n',
'hw.ncpu',
]).stdout)
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
# stdout and stderr as bytes
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=' '.join(cmd),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr)
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, 'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, str):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r'^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError('String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)
def par_map(transform, items):
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value. The map *might* happen in parallel: it's parallel on Python 3
and sequential on Python 2.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.
"""
pool = ThreadPool()
pool.map(transform, items)
pool.close()
pool.join()
def lazy_property(func):
"""A decorator that creates a lazily evaluated property. On first access,
the property is assigned the return value of `func`. This first value is
stored, so that future accesses do not have to evaluate `func` again.
This behaviour is useful when `func` is expensive to evaluate, and it is
not certain that the result will be needed.
"""
field_name = '_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if hasattr(self, field_name):
return getattr(self, field_name)
value = func(self)
setattr(self, field_name, value)
return value
return wrapper
def decode_commandline_path(path):
"""Prepare a path for substitution into commandline template.
On Python 3, we need to construct the subprocess commands to invoke as a
Unicode string. On Unix, this is a little unfortunate---the OS is
expecting bytes---so we use surrogate escaping and decode with the
argument encoding, which is the same encoding that will then be
*reversed* to recover the same bytes before invoking the OS. On
Windows, we want to preserve the Unicode filename "as is."
"""
# On Python 3, the template is a Unicode string, which only supports
# substitution of Unicode variables.
if platform.system() == 'Windows':
return path.decode(_fsencoding())
else:
return path.decode(arg_encoding(), 'surrogateescape')
|
rembo10/headphones
|
lib/beets/util/__init__.py
|
Python
|
gpl-3.0
| 36,926
|
#
# viscosity.py
# This file is part of ISOFT.
#
# Copyright 2018 Chris MacMackin <cmacmackin@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
'''Contains classes for calculating the viscosity of the ice
using different parameterisations.
'''
import numpy as np
import calculus
class NewtonianViscosity(object):
'''A class representing Newtonian viscosity.
value
The viscosity value of the ice
'''
def __init__(this, value=1.0):
this.val = value
def __call__(this, uvec, temperature=-15.0, time=0):
return this.val*np.ones_like(uvec[0,...])
class GlensLaw(object):
'''A class using Glen's Law to represent viscosity. It treats
viscosity as a power law for strain.
size
The number of Chebyshev modes in the field
lower
The lower boundary of the field domain
upper
The upper boundary of the field domain
coef
The coefficient by which Glen's Law is scaled
index
The index of the power law
'''
def __init__(this, size, lower=0.0, upper=1.0, coef=1.0, index=3):
this.diff = calculus.Differentiator(size, lower, upper)
this.coef = coef
this.index = float(index)
def __call__(this, uvec, temperature=-15.0, time=0):
if (uvec.ndim > 2):
raise NotImplementedError('GlensLaw only implemented for 1-D '
'velocity field.')
return 0.5*this.coef*abs(this.diff(uvec[:,0]))**(1./this.index - 1.)
|
cmacmackin/isoft
|
plotting/viscosity.py
|
Python
|
gpl-3.0
| 2,204
|
"""
GCode M907
Set stepper current in A
Author: quillford
License: CC BY-SA: http://creativecommons.org/licenses/by-sa/2.0/
"""
from __future__ import absolute_import
from .GCodeCommand import GCodeCommand
class M907(GCodeCommand):
def execute(self, g):
self.printer.path_planner.wait_until_done()
for i in range(g.num_tokens()):
axis = g.token_letter(i)
stepper = self.printer.steppers[axis]
# Cap at 2.5A and set the current
stepper.set_current_value(float(min(g.token_value(i), 2.5)))
def get_description(self):
return "Set stepper current in A"
|
intelligent-agent/redeem
|
redeem/gcodes/M907.py
|
Python
|
gpl-3.0
| 596
|
#
# Copyright 2015-2016 Free Software Foundation, Inc.
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Packager: Base class for external packagers
"""
from pybombs.packagers.base import PackagerBase
from pybombs.utils.vcompare import vcompare
class ExternPackager(object):
"""
Base class for wrappers around external packagers.
"""
def __init__(self, logger):
self.log = logger
def get_available_version(self, pkgname):
"""
Return a version that we can install through this package manager.
"""
raise NotImplementedError
def get_installed_version(self, pkgname):
"""
Return the currently installed version. If pkgname is not installed,
return None.
"""
raise NotImplementedError
def install(self, pkgname):
"""
Install pkgname using this packager.
"""
raise NotImplementedError
def update(self, pkgname):
"""
Update pkgname using this packager.
Defaults to calling install().
"""
return self.install(pkgname)
class ExternReadOnlyPackager(ExternPackager):
"""
Wraps a read-only packager, i.e. one that can't itself install packages
but can find out what's installed.
"""
def __init__(self, logger):
ExternPackager.__init__(self, logger)
def get_available_version(self, pkgname):
"""
The only available version is the installed version.
"""
return self.get_installed_version(pkgname)
def install(self, pkgname):
"""
Can't install, by definition.
"""
return False
class ExternCmdPackagerBase(PackagerBase):
"""
Base class for packagers that use external commands (e.g. apt, yum).
Most packagers will work with a system packager in the background (e.g.
ap-get, dnf, etc. All of these pretty much have the same behaviour, and
only need the backend communication implemented.
"""
def __init__(self):
PackagerBase.__init__(self)
self.packager = None
### API calls ###########################################################
def exists(self, recipe):
"""
Checks to see if a package is available in this packager
and returns the version as a string. If no version can be determined,
return True.
If not available, return None.
"""
self.log.trace("exists({0})".format(recipe.id))
return self._packager_run_tree(recipe, self._package_exists)
def installed(self, recipe):
"""
Returns the installed version of package (identified by recipe)
as a string, or False if the package is not installed.
May also return True if a version can't be determined, but the
recipe is installed.
"""
self.log.trace("Checking if recipe {0} is installed".format(recipe.id))
return self._packager_run_tree(recipe, self._package_installed)
def install(self, recipe, static=False):
"""
Run the installation process for a package given a recipe.
May raise an exception if things go terribly wrong.
Otherwise, return True on success and False if installing
failed in a controlled manner (e.g. the package wasn't available
by this package manager).
"""
self.log.trace("install({0}, static={1})".format(recipe.id, static))
return self._packager_run_tree(recipe, self._package_install)
def update(self, recipe):
"""
Returns the updated version of package (identified by recipe)
as a string, or False if the package is not installed.
May also return True if a version can't be determined, but the
recipe is installed.
"""
self.log.trace("Checking if recipe {0} is installed".format(recipe.id))
return self._packager_run_tree(recipe, self._package_installed)
def verify(self, recipe):
"""
We can't really verify, we just need to trust the packager.
"""
self.log.trace("Skipping verification of recipe {0}".format(recipe.id))
return True
def uninstall(self, recipe):
"""
Uninstalls the package (identified by recipe).
Return True on Success or False on failure.
"""
self.log.info("No uninstall method specified for package {0}.".format(recipe.id))
### Packager access #####################################################
def _packager_run_tree(self, recipe, satisfy_evaluator):
"""
Recursively evaluate satisfy rules given in a recipe.
"""
try:
satisfy_rule = recipe.get_package_reqs(self.pkgtype)
except KeyError:
self.log.debug("No satisfy rule for package type {0}".format(self.pkgtype))
return False
if satisfy_rule is None:
return None
if satisfy_rule is True:
self.log.debug(
"Package {0} has an always-true satisfier for packager {1}".format(
recipe.id,
self.pkgtype,
)
)
return True
self.log.trace("Calling ev for recursive satisfier rule evaluation")
return satisfy_rule.ev(satisfy_evaluator)
def _package_exists(self, pkg_name, comparator=">=", required_version=None):
"""
Check if `pkg_name` is installable through this packager.
Return type same as 'exists()'.
"""
available_version = self.packager.get_available_version(pkg_name)
if available_version is True:
return True
if available_version is False \
or (required_version is not None and not vcompare(comparator, available_version, required_version)):
return False
return available_version
def _package_update(self, pkg_name, comparator=">=", required_version=None):
"""
Updates a specific package through the current package manager.
This is typically called by update() to do the actual package
update call.
Return type same as 'update()'.
"""
if not self._package_exists(pkg_name, comparator, required_version):
return False
if not self.packager.update(pkg_name):
return False
installed_version = self.packager.get_installed_version(pkg_name)
if installed_version is False \
or (required_version is not None and not vcompare(comparator, installed_version, required_version)):
return False
return True
def _package_install(self, pkg_name, comparator=">=", required_version=None):
"""
Installs a specific package through the current package manager.
This is typically called by install() to do the actual package
install call.
Returns False if the version comparison fails.
"""
if not self._package_exists(pkg_name, comparator, required_version):
return False
if not self.packager.install(pkg_name):
return False
installed_version = self.packager.get_installed_version(pkg_name)
if installed_version is False \
or installed_version is None \
or (required_version is not None and not vcompare(comparator, installed_version, required_version)):
return False
return True
def _package_installed(self, pkg_name, comparator=">=", required_version=None):
"""
Queries the current package manager to see if a package is installed.
Return type same as 'installed()'.
"""
installed_version = self.packager.get_installed_version(pkg_name)
if not installed_version:
return False
if required_version is None:
return True
try:
return vcompare(comparator, installed_version, required_version)
except TypeError:
return False
|
gnuradio/pybombs
|
pybombs/packagers/extern.py
|
Python
|
gpl-3.0
| 8,711
|
from __future__ import unicode_literals
from .exceptions import MultipleObjectsReturned, YouTubeError
from .models import Video
from .utils import safe_filename
from urllib import urlencode
from urllib2 import urlopen
from urlparse import urlparse, parse_qs, unquote
import re
YT_BASE_URL = 'http://www.youtube.com/get_video_info'
#YouTube quality and codecs id map.
#source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs
YT_ENCODING = {
#Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
#3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
#MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "520p", "H.264", "3D", "2-2.9", "AAC", "152"],
#WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
# The keys corresponding to the quality/codec map above.
YT_ENCODING_KEYS = (
'extension', 'resolution', 'video_codec', 'profile', 'video_bitrate',
'audio_codec', 'audio_bitrate'
)
class YouTube(object):
_filename = None
_fmt_values = []
_video_url = None
title = None
videos = []
# fmt was an undocumented URL parameter that allowed selecting
# YouTube quality mode without using player user interface.
@property
def url(self):
"""Exposes the video url."""
return self._video_url
@url.setter
def url(self, url):
""" Defines the URL of the YouTube video."""
self._video_url = url
#Reset the filename.
self._filename = None
#Get the video details.
self._get_video_info()
@property
def filename(self):
"""
Exposes the title of the video. If this is not set, one is
generated based on the name of the video.
"""
if not self._filename:
self._filename = safe_filename(self.title)
return self._filename
@filename.setter
def filename(self, filename):
""" Defines the filename."""
self._filename = filename
if self.videos:
for video in self.videos:
video.filename = filename
@property
def video_id(self):
"""Gets the video ID extracted from the URL."""
parts = urlparse(self._video_url)
qs = getattr(parts, 'query', None)
if qs:
video_id = parse_qs(qs).get('v', None)
if video_id:
return video_id.pop()
def get(self, extension=None, resolution=None):
"""
Return a single video given an extention and resolution.
Keyword arguments:
extention -- The desired file extention (e.g.: mp4).
resolution -- The desired video broadcasting standard.
"""
result = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
else:
result.append(v)
if not len(result):
return
elif len(result) is 1:
return result[0]
else:
d = len(result)
raise MultipleObjectsReturned("get() returned more than one "
"object -- it returned {}!".format(d))
def filter(self, extension=None, resolution=None):
"""
Return a filtered list of videos given an extention and
resolution criteria.
Keyword arguments:
extention -- The desired file extention (e.g.: mp4).
resolution -- The desired video broadcasting standard.
"""
results = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
else:
results.append(v)
return results
def _fetch(self, path, data):
"""
Given a path, traverse the response for the desired data. (A
modified ver. of my dictionary traverse method:
https://gist.github.com/2009119)
Keyword arguments:
path -- A tuple representing a path to a node within a tree.
data -- The data containing the tree.
"""
elem = path[0]
#Get first element in tuple, and check if it contains a list.
if type(data) is list:
# Pop it, and let's continue..
return self._fetch(path, data.pop())
#Parse the url encoded data
data = parse_qs(data)
#Get the element in our path
data = data.get(elem, None)
#Offset the tuple by 1.
path = path[1::1]
#Check if the path has reached the end OR the element return
#nothing.
if len(path) is 0 or data is None:
if type(data) is list and len(data) is 1:
data = data.pop()
return data
else:
# Nope, let's keep diggin'
return self._fetch(path, data)
def _parse_stream_map(self, data):
"""
Python's `parse_qs` can't properly decode the stream map
containing video data so we use this instead.
Keyword arguments:
data -- The parsed response from YouTube.
"""
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"sig": [],
"type": []
}
text = data["url_encoded_fmt_stream_map"][0]
# Split individual videos
videos = text.split(",")
# Unquote the characters and split to parameters
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
def _get_video_info(self):
"""
This is responsable for executing the request, extracting the
necessary details, and populating the different video
resolutions and formats into a list.
"""
querystring = urlencode({'asv': 3, 'el': 'detailpage', 'hl': 'en_US',
'video_id': self.video_id})
self.title = None
self.videos = []
response = urlopen(YT_BASE_URL + '?' + querystring)
if response:
content = response.read().decode()
data = parse_qs(content)
if 'errorcode' in data:
error = data.get('reason', 'An unknown error has occurred')
if isinstance(error, list):
error = error.pop()
raise YouTubeError(error)
stream_map = self._parse_stream_map(data)
video_urls = stream_map["url"]
#Apparently signatures are not needed as of 2014-02-28
self.title = self._fetch(('title',), content)
for idx in range(len(video_urls)):
url = video_urls[idx]
try:
fmt, data = self._extract_fmt(url)
except (TypeError, KeyError):
pass
else:
v = Video(url, self.filename, **data)
self.videos.append(v)
self._fmt_values.append(fmt)
self.videos.sort()
def _extract_fmt(self, text):
"""
YouTube does not pass you a completely valid URLencoded form,
I suspect this is suppose to act as a deterrent.. Nothing some
regulular expressions couldn't handle.
Keyword arguments:
text -- The malformed data contained within each url node.
"""
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = YT_ENCODING.get(itag, None)
if not attr:
return itag, None
data = {}
map(lambda k, v: data.update({k: v}), YT_ENCODING_KEYS, attr)
return itag, data
|
dandygithub/kodi
|
addons/plugin.video.online-life.cc/lib/pytube/api.py
|
Python
|
gpl-3.0
| 9,210
|