code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from collective.grok import gs
from eappi.map import MessageFactory as _
from plone.registry.interfaces import IRegistry
from zope.component import getUtility
from eappi.map.interfaces import IEappiSettings
@gs.importstep(
name=u'eappi.map',
title=_('eappi.map import handler'),
description=_(''))
def setupVarious(context):
if context.readDataFile('eappi.map.marker.txt') is None:
return
portal = context.getSite()
registry = getUtility(IRegistry)
registry.registerInterface(IEappiSettings)
# do anything here
| oikoumene/eappi.map | eappi/map/setuphandlers.py | Python | lgpl-3.0 | 554 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import Qt, pyqtSignal, pyqtProperty, QTimer
from typing import Iterable, TYPE_CHECKING
from UM.i18n import i18nCatalog
from UM.Qt.ListModel import ListModel
from UM.Application import Application
import UM.FlameProfiler
if TYPE_CHECKING:
from cura.Settings.ExtruderStack import ExtruderStack # To listen to changes on the extruders.
catalog = i18nCatalog("cura")
## Model that holds extruders.
#
# This model is designed for use by any list of extruders, but specifically
# intended for drop-down lists of the current machine's extruders in place of
# settings.
class ExtrudersModel(ListModel):
# The ID of the container stack for the extruder.
IdRole = Qt.UserRole + 1
## Human-readable name of the extruder.
NameRole = Qt.UserRole + 2
## Colour of the material loaded in the extruder.
ColorRole = Qt.UserRole + 3
## Index of the extruder, which is also the value of the setting itself.
#
# An index of 0 indicates the first extruder, an index of 1 the second
# one, and so on. This is the value that will be saved in instance
# containers.
IndexRole = Qt.UserRole + 4
# The ID of the definition of the extruder.
DefinitionRole = Qt.UserRole + 5
# The material of the extruder.
MaterialRole = Qt.UserRole + 6
# The variant of the extruder.
VariantRole = Qt.UserRole + 7
StackRole = Qt.UserRole + 8
MaterialBrandRole = Qt.UserRole + 9
ColorNameRole = Qt.UserRole + 10
## Is the extruder enabled?
EnabledRole = Qt.UserRole + 11
## List of colours to display if there is no material or the material has no known
# colour.
defaultColors = ["#ffc924", "#86ec21", "#22eeee", "#245bff", "#9124ff", "#ff24c8"]
## Initialises the extruders model, defining the roles and listening for
# changes in the data.
#
# \param parent Parent QtObject of this list.
def __init__(self, parent = None):
super().__init__(parent)
self.addRoleName(self.IdRole, "id")
self.addRoleName(self.NameRole, "name")
self.addRoleName(self.EnabledRole, "enabled")
self.addRoleName(self.ColorRole, "color")
self.addRoleName(self.IndexRole, "index")
self.addRoleName(self.DefinitionRole, "definition")
self.addRoleName(self.MaterialRole, "material")
self.addRoleName(self.VariantRole, "variant")
self.addRoleName(self.StackRole, "stack")
self.addRoleName(self.MaterialBrandRole, "material_brand")
self.addRoleName(self.ColorNameRole, "color_name")
self._update_extruder_timer = QTimer()
self._update_extruder_timer.setInterval(100)
self._update_extruder_timer.setSingleShot(True)
self._update_extruder_timer.timeout.connect(self.__updateExtruders)
self._active_machine_extruders = [] # type: Iterable[ExtruderStack]
self._add_optional_extruder = False
# Listen to changes
Application.getInstance().globalContainerStackChanged.connect(self._extrudersChanged) # When the machine is swapped we must update the active machine extruders
Application.getInstance().getExtruderManager().extrudersChanged.connect(self._extrudersChanged) # When the extruders change we must link to the stack-changed signal of the new extruder
Application.getInstance().getContainerRegistry().containerMetaDataChanged.connect(self._onExtruderStackContainersChanged) # When meta data from a material container changes we must update
self._extrudersChanged() # Also calls _updateExtruders
addOptionalExtruderChanged = pyqtSignal()
def setAddOptionalExtruder(self, add_optional_extruder):
if add_optional_extruder != self._add_optional_extruder:
self._add_optional_extruder = add_optional_extruder
self.addOptionalExtruderChanged.emit()
self._updateExtruders()
@pyqtProperty(bool, fset = setAddOptionalExtruder, notify = addOptionalExtruderChanged)
def addOptionalExtruder(self):
return self._add_optional_extruder
## Links to the stack-changed signal of the new extruders when an extruder
# is swapped out or added in the current machine.
#
# \param machine_id The machine for which the extruders changed. This is
# filled by the ExtruderManager.extrudersChanged signal when coming from
# that signal. Application.globalContainerStackChanged doesn't fill this
# signal; it's assumed to be the current printer in that case.
def _extrudersChanged(self, machine_id = None):
machine_manager = Application.getInstance().getMachineManager()
if machine_id is not None:
if machine_manager.activeMachine is None:
# No machine, don't need to update the current machine's extruders
return
if machine_id != machine_manager.activeMachine.getId():
# Not the current machine
return
# Unlink from old extruders
for extruder in self._active_machine_extruders:
extruder.containersChanged.disconnect(self._onExtruderStackContainersChanged)
extruder.enabledChanged.disconnect(self._updateExtruders)
# Link to new extruders
self._active_machine_extruders = []
extruder_manager = Application.getInstance().getExtruderManager()
for extruder in extruder_manager.getActiveExtruderStacks():
if extruder is None: #This extruder wasn't loaded yet. This happens asynchronously while this model is constructed from QML.
continue
extruder.containersChanged.connect(self._onExtruderStackContainersChanged)
extruder.enabledChanged.connect(self._updateExtruders)
self._active_machine_extruders.append(extruder)
self._updateExtruders() # Since the new extruders may have different properties, update our own model.
def _onExtruderStackContainersChanged(self, container):
# Update when there is an empty container or material or variant change
if container.getMetaDataEntry("type") in ["material", "variant", None]:
# The ExtrudersModel needs to be updated when the material-name or -color changes, because the user identifies extruders by material-name
self._updateExtruders()
modelChanged = pyqtSignal()
def _updateExtruders(self):
self._update_extruder_timer.start()
## Update the list of extruders.
#
# This should be called whenever the list of extruders changes.
@UM.FlameProfiler.profile
def __updateExtruders(self):
extruders_changed = False
if self.count != 0:
extruders_changed = True
items = []
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
# get machine extruder count for verification
machine_extruder_count = global_container_stack.getProperty("machine_extruder_count", "value")
for extruder in Application.getInstance().getExtruderManager().getActiveExtruderStacks():
position = extruder.getMetaDataEntry("position", default = "0")
try:
position = int(position)
except ValueError:
# Not a proper int.
position = -1
if position >= machine_extruder_count:
continue
default_color = self.defaultColors[position] if 0 <= position < len(self.defaultColors) else self.defaultColors[0]
color = extruder.material.getMetaDataEntry("color_code", default = default_color) if extruder.material else default_color
material_brand = extruder.material.getMetaDataEntry("brand", default = "generic")
color_name = extruder.material.getMetaDataEntry("color_name")
# construct an item with only the relevant information
item = {
"id": extruder.getId(),
"name": extruder.getName(),
"enabled": extruder.isEnabled,
"color": color,
"index": position,
"definition": extruder.getBottom().getId(),
"material": extruder.material.getName() if extruder.material else "",
"variant": extruder.variant.getName() if extruder.variant else "", # e.g. print core
"stack": extruder,
"material_brand": material_brand,
"color_name": color_name
}
items.append(item)
extruders_changed = True
if extruders_changed:
# sort by extruder index
items.sort(key = lambda i: i["index"])
# We need optional extruder to be last, so add it after we do sorting.
# This way we can simply interpret the -1 of the index as the last item (which it now always is)
if self._add_optional_extruder:
item = {
"id": "",
"name": catalog.i18nc("@menuitem", "Not overridden"),
"enabled": True,
"color": "#ffffff",
"index": -1,
"definition": "",
"material": "",
"variant": "",
"stack": None,
"material_brand": "",
"color_name": "",
}
items.append(item)
if self._items != items:
self.setItems(items)
self.modelChanged.emit()
| Patola/Cura | cura/Machines/Models/ExtrudersModel.py | Python | lgpl-3.0 | 9,841 |
import pytest
def test_terms():
from pycldf.terms import TERMS
assert 'alignment' in TERMS.properties
assert not TERMS.is_cldf_uri('http://example.org')
assert TERMS.is_cldf_uri('http://cldf.clld.org/v1.0/terms.rdf#source')
assert len(TERMS.properties) + len(TERMS.classes) == len(TERMS)
assert len(TERMS.modules) + len(TERMS.components) == len(TERMS.classes)
assert 'LanguageTable' in TERMS.components
assert 'LanguageTable' not in TERMS.modules
assert 'Wordlist' in TERMS.modules
id_ = TERMS['id']
assert id_.version == 'v1.0'
assert '<p>' in id_.comment()
assert '</a>' in TERMS['Wordlist'].comment(one_line=True)
assert TERMS['languageReference'].cardinality is None
def test_invalid_uri():
from pycldf.terms import TERMS
with pytest.warns(UserWarning):
with pytest.raises(ValueError):
TERMS.is_cldf_uri('http://cldf.clld.org/unknown')
def test_cltsReference():
from pycldf.terms import TERMS
col = TERMS['cltsReference'].to_column()
assert col.datatype.read('NA') and col.datatype.read('rounded_open-mid_central_vowel')
with pytest.raises(ValueError):
col.datatype.read('Na')
| glottobank/pycldf | tests/test_terms.py | Python | apache-2.0 | 1,201 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'ordre Ramer."""
from secondaires.navigation.constantes import VIT_RAMES
from secondaires.navigation.equipage.signaux import *
from ..ordre import *
class Ramer(Ordre):
"""Ordre ramer.
Cet ordre demande au matelot tenant les rames spécifiées de
ramer à une vitesse spécifiée. Cet ordre peut être utilisé pour
demander de s'arrêter de ramer (pas de relâcher les rames).
"""
cle = "ramer"
etats_autorises = ("tenir_rames", )
def __init__(self, matelot, navire, rames=None, vitesse=""):
Ordre.__init__(self, matelot, navire, rames, vitesse)
self.rames = rames
self.vitesse = vitesse
def executer(self):
"""Exécute l'ordre : tient les rames."""
matelot = self.matelot
personnage = matelot.personnage
salle = personnage.salle
rames = self.rames
vitesse = self.vitesse
vitesses = tuple(VIT_RAMES.keys()) + ("centre", "gauche", "droite")
if salle is not rames.parent:
yield SignalAbandonne("Je ne suis pas dans la salle des rames.")
elif vitesse not in vitesses:
yield SignalAbandonne("Je ne connais pas la vitesse {}.".format(
vitesse))
if rames.tenu is not personnage:
yield SignalAbandonne("Je ne tiens pas ces rames.")
else:
if vitesse == "centre":
rames.centrer()
elif vitesse == "droite":
rames.virer_tribord()
elif vitesse == "gauche":
rames.virer_babord()
else:
rames.changer_vitesse(vitesse)
while personnage.stats.endurance > 20:
yield 3
rames.relacher()
yield SignalRelais("Je suis trop fatigué.")
| stormi/tsunami | src/secondaires/navigation/equipage/ordres/ramer.py | Python | bsd-3-clause | 3,377 |
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2015b'
VERSION = '2015.2' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('US-ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('US-ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pytz/__init__.py | Python | agpl-3.0 | 34,010 |
# wsse/__init__.py
# coding=utf-8
# pywsse
# Authors: Rushy Panchal, Naphat Sanguansin, Adam Libresco, Jérémie Lumbroso
# Date: August 30th, 2016
import logging
from . import settings
# If a handler is not already present, add a basic null handler to suppress
# errors.
logger = logging.getLogger(settings.LOGGER_NAME)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
| PrincetonUniversity/pywsse | wsse/__init__.py | Python | lgpl-3.0 | 391 |
# Copyright (C) 2014 Saggi Mizrahi, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import logging
from functools import partial
from Queue import Queue
from weakref import ref
from threading import Lock, Event
from vdsm.compat import json
from vdsm.password import protect_passwords, unprotect_passwords
from vdsm.utils import monotonic_time, traceback
__all__ = ["betterAsyncore", "stompreactor", "stomp"]
CALL_TIMEOUT = 15
_STATE_INCOMING = 1
_STATE_OUTGOING = 2
_STATE_ONESHOT = 4
class JsonRpcError(RuntimeError):
def __init__(self, code, msg):
self.code = code
self.message = msg
super(RuntimeError, self).__init__(
"[%d] %s" % (self.code, self.message)
)
class JsonRpcParseError(JsonRpcError):
def __init__(self):
JsonRpcError.__init__(self, -32700,
"Invalid JSON was received by the server. "
"An error occurred on the server while parsing "
"the JSON text.")
class JsonRpcInvalidRequestError(JsonRpcError):
log = logging.getLogger("JsonRpcInvalidRequestError")
def __init__(self, object_name, msg_content):
self.log.error("Invalid message found %s", msg_content)
JsonRpcError.__init__(self, -32600,
"The JSON sent is not a valid Request object "
"with " + object_name)
class JsonRpcMethodNotFoundError(JsonRpcError):
def __init__(self):
JsonRpcError.__init__(self, -32601,
"The method does not exist / is not available.")
class JsonRpcInvalidParamsError(JsonRpcError):
def __init__(self):
JsonRpcError.__init__(self, -32602,
"Invalid method parameter(s).")
class JsonRpcInternalError(JsonRpcError):
def __init__(self, msg=None):
if not msg:
msg = "Internal JSON-RPC error."
JsonRpcError.__init__(self, -32603, msg)
class JsonRpcBindingsError(JsonRpcError):
def __init__(self):
JsonRpcError.__init__(self, -32604,
"Missing bindings for JSON-RPC.")
class JsonRpcNoResponseError(JsonRpcError):
def __init__(self, method=''):
JsonRpcError.__init__(self, -32605,
"No response for JSON-RPC "
"%s request." % method)
class JsonRpcRequest(object):
def __init__(self, method, params=(), reqId=None):
self.method = method
self.params = protect_passwords(params)
self.id = reqId
@classmethod
def decode(cls, msg):
try:
obj = json.loads(msg, 'utf-8')
except:
raise JsonRpcParseError()
return cls.fromRawObject(obj)
@staticmethod
def fromRawObject(obj):
if obj.get("jsonrpc") != "2.0":
raise JsonRpcInvalidRequestError("wrong protocol version", obj)
method = obj.get("method")
if method is None:
raise JsonRpcInvalidRequestError("missing method header", obj)
reqId = obj.get("id")
# when sending notifications id is not provided
params = obj.get('params', [])
if not isinstance(params, (list, dict)):
raise JsonRpcInvalidRequestError("wrong params type", obj)
return JsonRpcRequest(method, params, reqId)
def toDict(self):
return {
'jsonrpc': '2.0',
'method': self.method,
'params': self.params,
'id': self.id
}
def encode(self):
res = self.toDict()
return json.dumps(res, 'utf-8')
def isNotification(self):
return (self.id is None)
class JsonRpcResponse(object):
def __init__(self, result=None, error=None, reqId=None):
self.result = unprotect_passwords(result)
self.error = error
self.id = reqId
def toDict(self):
res = {'jsonrpc': '2.0',
'id': self.id}
if self.error is not None:
res['error'] = {'code': self.error.code,
'message': self.error.message}
else:
res['result'] = self.result
return res
def encode(self):
res = self.toDict()
return json.dumps(res, 'utf-8')
@staticmethod
def decode(msg):
obj = json.loads(msg, 'utf-8')
return JsonRpcResponse.fromRawObject(obj)
@staticmethod
def fromRawObject(obj):
if obj.get("jsonrpc") != "2.0":
raise JsonRpcInvalidRequestError("wrong protocol version", obj)
if "result" not in obj and "error" not in obj:
raise JsonRpcInvalidRequestError("missing result or error info",
obj)
result = obj.get("result")
error = obj.get("error")
reqId = obj.get("id")
return JsonRpcResponse(result, error, reqId)
class Notification(object):
"""
Represents jsonrpc notification message. It builds proper jsonrpc
notification and pass it a callback which is responsible for
sending it.
"""
log = logging.getLogger("jsonrpc.Notification")
def __init__(self, event_id, cb):
self._event_id = event_id
self._cb = cb
def emit(self, **kwargs):
self._add_notify_time(kwargs)
notification = json.dumps({'jsonrpc': '2.0',
'method': self._event_id,
'params': kwargs})
self.log.debug("Sending event %s", notification)
self._cb(notification)
def _add_notify_time(self, body):
body['notify_time'] = int(monotonic_time() * 1000)
class _JsonRpcClientRequestContext(object):
def __init__(self, requests, callback):
self.callback = callback
self._requests = requests
self._responses = {}
for req in requests:
if req.id is None:
continue # Notifications don't have responses
self._responses[req.id] = None
def addResponse(self, resp):
self._responses[resp.id] = resp
def isDone(self):
for v in self._responses.values():
if v is None:
return False
return True
def getResponses(self):
return self._responses.values()
def ids(self):
return self._responses.keys()
def encode(self):
return ("[" +
", ".join(r.encode() for r in self._requests) +
"]")
class _JsonRpcServeRequestContext(object):
def __init__(self, client, addr):
self._requests = []
self._client = client
self._addr = addr
self._counter = 0
self._requests = {}
self._responses = []
def setRequests(self, requests):
for request in requests:
if not request.isNotification():
self._counter += 1
self._requests[request.id] = request
self.sendReply()
@property
def counter(self):
return self._counter
@property
def address(self):
return self._addr
def sendReply(self):
if len(self._requests) > 0:
return
encodedObjects = []
for response in self._responses:
try:
encodedObjects.append(response.encode())
except: # Error encoding data
response = JsonRpcResponse(None, JsonRpcInternalError,
response.id)
encodedObjects.append(response.encode())
if len(encodedObjects) == 1:
data = encodedObjects[0]
else:
data = '[' + ','.join(encodedObjects) + ']'
self._client.send(data.encode('utf-8'))
def addResponse(self, response):
self._responses.append(response)
def requestDone(self, response):
del self._requests[response.id]
self.addResponse(response)
self.sendReply()
class JsonRpcCall(object):
def __init__(self):
self._ev = Event()
self.responses = None
def callback(self, c, resp):
if not isinstance(resp, list):
resp = [resp]
self.responses = resp
self._ev.set()
def wait(self, timeout=None):
self._ev.wait(timeout)
return self.isSet()
def isSet(self):
return self._ev.is_set()
class JsonRpcClient(object):
def __init__(self, transport):
self.log = logging.getLogger("jsonrpc.JsonRpcClient")
transport.set_message_handler(self._handleMessage)
self._transport = transport
self._runningRequests = {}
self._lock = Lock()
self._eventcbs = []
def callMethod(self, methodName, params=[], rid=None):
responses = self.call(JsonRpcRequest(methodName, params, rid))
if responses is None:
raise JsonRpcNoResponseError(methodName)
response = responses[0]
if response.error:
raise JsonRpcError(response.error['code'],
response.error['message'])
else:
return response.result
def call(self, *reqs, **kwargs):
call = self.call_async(*reqs)
call.wait(kwargs.get('timeout', CALL_TIMEOUT))
return call.responses
def call_async(self, *reqs):
call = JsonRpcCall()
self.call_cb(call.callback, *reqs)
return call
def call_cb(self, cb, *reqs):
ctx = _JsonRpcClientRequestContext(reqs, cb)
with self._lock:
for rid in ctx.ids():
try:
self._runningRequests[rid]
except KeyError:
pass
else:
raise ValueError("Request id already in use %s", rid)
self._runningRequests[rid] = ctx
self._transport.send(ctx.encode())
# All notifications
if ctx.isDone():
self._finalizeCtx(ctx)
def _finalizeCtx(self, ctx):
if not ctx.isDone():
return
cb = ctx.callback
if cb is not None:
cb(self, ctx.getResponses())
def _processIncomingResponse(self, resp):
if isinstance(resp, list):
map(self._processIncomingResponse, resp)
return
resp = JsonRpcResponse.fromRawObject(resp)
with self._lock:
if resp.id is None:
self.log.warning(
"Got an error from server without an ID (%s)",
resp.error,
)
ctx = self._runningRequests.pop(resp.id)
ctx.addResponse(resp)
self._finalizeCtx(ctx)
def _isResponse(self, obj):
if isinstance(obj, list):
v = None
for res in map(self._isResponse, obj):
if v is None:
v = res
if v != res:
raise TypeError("batch is mixed")
return v
else:
return ("result" in obj or "error" in obj)
def _handleMessage(self, req):
transport, message = req
try:
mobj = json.loads(message)
isResponse = self._isResponse(mobj)
except:
self.log.exception("Problem parsing message from client")
if isResponse:
self._processIncomingResponse(mobj)
else:
self._processEvent(mobj)
def _processEvent(self, obj):
if isinstance(obj, list):
map(self._processEvent, obj)
return
req = JsonRpcRequest.fromRawObject(obj)
if not req.isNotification():
self.log.warning("Recieved non notification, ignoring")
self.emit(req.method, req.params)
def close(self):
self._transport.close()
stop = close
def registerEventCallback(self, eventcb):
self._eventcbs.append(ref(eventcb))
def unregisterEventCallback(self, eventcb):
for r in self._eventcbs[:]:
cb = r()
if cb is None or cb == eventcb:
try:
self._eventcbs.remove(r)
except ValueError:
# Double unregister, ignore.
pass
def emit(self, event, params):
for r in self._eventcbs[:]:
cb = r()
if cb is None:
continue
cb(self, event, params)
class JsonRpcServer(object):
log = logging.getLogger("jsonrpc.JsonRpcServer")
"""
Creates new JsonrRpcServer by providing a bridge, timeout in seconds
which defining how often we should log connections stats and thread
factory.
"""
def __init__(self, bridge, timeout, threadFactory=None):
self._bridge = bridge
self._workQueue = Queue()
self._threadFactory = threadFactory
self._timeout = timeout
self._next_report = monotonic_time() + self._timeout
self._counter = 0
def queueRequest(self, req):
self._workQueue.put_nowait(req)
"""
Aggregates number of requests received by vdsm. Each request from
a batch is added separately. After time defined by timeout we log
number of requests.
"""
def _attempt_log_stats(self):
self._counter += 1
if monotonic_time() > self._next_report:
self.log.info('%s requests processed during %s seconds',
self._counter, self._timeout)
self._next_report += self._timeout
self._counter = 0
def _serveRequest(self, ctx, req):
self._attempt_log_stats()
mangledMethod = req.method.replace(".", "_")
logLevel = logging.DEBUG
if mangledMethod in ('Host_getVMList', 'Host_getAllVmStats',
'Host_getStats', 'StorageDomain_getStats',
'VM_getStats', 'Host_fenceNode'):
logLevel = logging.TRACE
self.log.log(logLevel, "Calling '%s' in bridge with %s",
req.method, req.params)
try:
method = getattr(self._bridge, mangledMethod)
except AttributeError:
if req.isNotification():
return
ctx.requestDone(JsonRpcResponse(None,
JsonRpcMethodNotFoundError(),
req.id))
return
try:
params = req.params
self._bridge.register_server_address(ctx.address)
if isinstance(req.params, list):
res = method(*params)
else:
res = method(**params)
self._bridge.unregister_server_address()
except JsonRpcError as e:
ctx.requestDone(JsonRpcResponse(None, e, req.id))
except Exception as e:
self.log.exception("Internal server error")
ctx.requestDone(JsonRpcResponse(None,
JsonRpcInternalError(str(e)),
req.id))
else:
res = True if res is None else res
self.log.log(logLevel, "Return '%s' in bridge with %s",
req.method, res)
ctx.requestDone(JsonRpcResponse(res, None, req.id))
@traceback(on=log.name)
def serve_requests(self):
while True:
obj = self._workQueue.get()
if obj is None:
break
client, addr, msg = obj
self._parseMessage(client, addr, msg)
def _parseMessage(self, client, addr, msg):
ctx = _JsonRpcServeRequestContext(client, addr)
try:
rawRequests = json.loads(msg)
except:
ctx.addResponse(JsonRpcResponse(None, JsonRpcParseError(), None))
ctx.sendReply()
return
if isinstance(rawRequests, list):
# Empty batch request
if len(rawRequests) == 0:
ctx.addResponse(
JsonRpcResponse(None,
JsonRpcInvalidRequestError(
'request batch is empty',
rawRequests),
None))
ctx.sendReply()
return
else:
# From this point on we know it's always a list
rawRequests = [rawRequests]
# JSON Parsed handling each request
requests = []
for rawRequest in rawRequests:
try:
req = JsonRpcRequest.fromRawObject(rawRequest)
requests.append(req)
except JsonRpcError as err:
ctx.addResponse(JsonRpcResponse(None, err, None))
except:
ctx.addResponse(JsonRpcResponse(None,
JsonRpcInternalError(),
None))
ctx.setRequests(requests)
# No request was built successfully or is only notifications
if ctx.counter == 0:
ctx.sendReply()
for request in requests:
self._runRequest(ctx, request)
def _runRequest(self, ctx, request):
if self._threadFactory is None:
self._serveRequest(ctx, request)
else:
try:
self._threadFactory(partial(self._serveRequest, ctx, request))
except Exception as e:
self.log.exception("could not allocate request thread")
ctx.requestDone(
JsonRpcResponse(
None,
JsonRpcInternalError(
str(e)
),
request.id
)
)
def stop(self):
self.log.info("Stopping JsonRPC Server")
self._workQueue.put_nowait(None)
| germanovm/vdsm | lib/yajsonrpc/__init__.py | Python | gpl-2.0 | 18,676 |
"""
Settings for edX ACE on devstack.
"""
from __future__ import absolute_import
from openedx.core.djangoapps.ace_common.settings import common
def plugin_settings(settings):
"""
Override common settings and use `file_email` for better debugging.
"""
common.plugin_settings(settings)
settings.ACE_ENABLED_CHANNELS = [
'file_email'
]
settings.ACE_CHANNEL_DEFAULT_EMAIL = 'file_email'
settings.ACE_CHANNEL_TRANSACTIONAL_EMAIL = 'file_email'
| ESOedX/edx-platform | openedx/core/djangoapps/ace_common/settings/devstack.py | Python | agpl-3.0 | 485 |
#!/usr/bin/env python
import os
import sys
import routes
import logging
import coloredlogs
import numpy as np
import tornado.web
import tornado.ioloop
import psycopg2.extras
import db.dbconn as db
from tornado import gen
import tornado.platform.twisted
tornado.platform.twisted.install()
from toradbapi import ConnectionPool
from twisted.internet import reactor
USER = 'airinfo'
PASSWORD = 'password(8)'
DATABASE = 'flights'
HOST = 'margffoy-tuay.com'
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
coloredlogs.install(level='info')
clr = 'clear'
if os.name == 'nt':
clr = 'cls'
def main():
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
settings = {}
# settings = {"static_path": os.path.join(
# os.path.dirname(__file__), "static")}
application = tornado.web.Application(routes.ROUTES,
debug=True, serve_traceback=True, autoreload=True, **settings)
print "Server is now at: 127.0.0.1:8000"
ioloop = tornado.ioloop.IOLoop.instance()
db.initialize_db('psycopg2', cp_noisy=True, user=USER, password=PASSWORD,
database=DATABASE, host=HOST, cursor_factory=psycopg2.extras.DictCursor)
W = np.load('./nn/W_partial.npz')['arr_0']
application.W = W
application.db = db
application.listen(8000)
try:
ioloop.start()
except KeyboardInterrupt:
pass
finally:
print "Closing server...\n"
db.close()
tornado.ioloop.IOLoop.instance().stop()
if __name__ == '__main__':
os.system(clr)
main()
| TiMed-dev/TIMed-backend | main.py | Python | mit | 1,687 |
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# Copyright (C) 2010 Benjamin Elbers <elbersb@gmail.com>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import subprocess
import re
import time
import os.path
import base64
import hashlib
import urllib
from otrverwaltung.GeneratorTask import GeneratorTask
from otrverwaltung.cutlists import Cutlist
from otrverwaltung.conclusions import FileConclusion
from otrverwaltung.constants import DownloadStatus, DownloadTypes, Action, Cut_action, Status
from otrverwaltung import fileoperations
class Download:
def __init__(self, app, config, filename=None, link=None):
""" Torrent: link=None """
self._app = app
self._config = config
self.filename = filename
self.link = link
self.log = ""
self.information = {
'output' : '',
'status' : -1,
'size' : None,
'progress' : 0,
'speed' : '',
'est' : '',
'message_short' : '',
# Torrent
'seeders': None,
'upspeed': None,
'uploaded': None,
'ratio': None
}
self.__task = None
self.__process = None
#
# Storage
#
def to_json(self):
information = self.information.copy()
if 'cutlist' in information.keys():
information['cutlist'] = None
return {
'information' : information,
'filename': self.filename,
'link': self.link
}
def from_json(self, json):
self.information = json['information']
self.filename = json['filename']
self.link = json['link']
#
# Init methods for action
#
def download_torrent(self):
self.information['download_type'] = DownloadTypes.TORRENT
def download_basic(self, preferred_downloader):
self.information['download_type'] = DownloadTypes.BASIC
self.information['preferred_downloader'] = preferred_downloader
def download_decode(self, cutlist_id=None):
if cutlist_id:
self.information['download_type'] = DownloadTypes.OTR_CUT
self.information['cutlist_id'] = cutlist_id
self.information['cutlist'] = None
else:
self.information['download_type'] = DownloadTypes.OTR_DECODE
#
# Convenience methods used only by this class
#
def _finished(self):
self.information['status'] = DownloadStatus.FINISHED
self.information['progress'] = 100
self.information['est'] = ""
# unused by now
def _parse_time(time):
""" Takes a string '5m' or '6h2m59s' and calculates seconds. """
m = re.match('((?P<h>[0-9]*)h)?((?P<m>[0-9]{1,2})m)?((?P<s>[0-9]{1,2})s)?', time)
if m:
d = m.groupdict()
time = 60 * 60 * int(m.group('h')) if m.group('h') else 0
time = (time + 60 * int(m.group('m'))) if m.group('m') else time
time = (time + int(m.group('s'))) if m.group('s') else time
return time
else:
return 0
#
# Download
#
def _download(self):
self.log = ''
self.information['message_short'] = ''
self.information['status'] = DownloadStatus.RUNNING
if self.information['download_type'] == DownloadTypes.TORRENT:
# download torrent if necessary
torrent_filename = os.path.join(self._config.get('general', 'folder_new_otrkeys'), self.filename + '.torrent')
if not os.path.exists(torrent_filename):
password = base64.b64decode(self._config.get('general', 'password'))
hash = hashlib.md5(password).hexdigest()
email = self._config.get('general', 'email')
url = 'http://81.95.11.2/xbt/xbt_torrent_create.php?filename=%s&email=%s&mode=free&hash=%s' % (self.filename, email, hash)
try:
urllib.urlretrieve(url, torrent_filename)
# read filename
f = open(torrent_filename, 'r')
line = f.readlines()[0]
except IOError, error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Torrentdatei konnte nicht geladen werden.'
yield "Torrentdatei konnte nicht heruntergeladen werden (%s)!" % error
return
if "Hash wrong" in line:
os.remove(torrent_filename)
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'OTR-Daten nicht korrekt!'
yield 'OTR-Daten nicht korrekt!'
return
self.information['output'] = self._config.get('general', 'folder_new_otrkeys')
command = self._config.get('downloader', 'aria2c_torrent') + ["-d", self.information['output'], "-T", torrent_filename]
yield "Ausgeführt wird:\n%s\n" % " ".join(command)
try:
self.__process = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError, error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Aria2c ist nicht installiert.'
yield "Ist aria2c installiert? Der Befehl konnte nicht ausgeführt werden:\nFehlermeldung: %s" % error
return
while self.__process.poll() == None:
line = self.__process.stdout.readline().strip()
if "Checksum" in line:
result = re.findall('Checksum:.*\((.*%)\)', line)
if result:
self.information['message_short'] = 'Überprüfen...%s' % result[0]
elif "SEEDING" in line:
self.information['message_short'] = 'Seeden...'
self.information['status'] = DownloadStatus.SEEDING # _NOT_ DownloadStatus.FINISHED
self.information['progress'] = 100
self.information['est'] = ''
self.information['speed'] = ''
self.information['seeders'] = None
result = re.findall('ratio:(.*)\) ', line)
if result:
self.information['ratio'] = result[0]
result = re.findall('UP:(.*)\((.*)\)', line)
if result:
self.information['upspeed'] = result[0][0]
self.information['uploaded'] = result[0][1]
elif "%" in line:
self.information['message_short'] = ''
# get size
if not self.information['size']:
try:
# aria2c gives size always in MiB (hopefully)
size = re.findall('SIZE:.*/(.*)MiB\(', line)[0]
size = size.replace(',', '')
size = int(round(float(size))) * 1024 * 1024
self.information['size'] = size
yield line
except:
pass
# get progress
result = re.findall('([0-9]{1,3})%', line)
if result:
self.information['progress'] = int(result[0])
# get speed, est
if "UP" in line:
result = re.findall('SPD:(.*) UP:(.*)\((.*)\) ETA:(.*)]', line)
if result:
self.information['speed'] = result[0][0]
self.information['upspeed'] = result[0][1]
self.information['uploaded'] = result[0][2]
self.information['est'] = result[0][3]
else:
result = re.findall('SPD:(.*) .*ETA:(.*)]', line)
if result:
self.information['speed'] = result[0][0]
self.information['est'] = result[0][1]
# get seeder info
result = re.findall('SEED:([0-9]*) ', line)
if result:
self.information['seeders'] = result[0]
else:
yield line
self.update_view()
### Process is terminated
stdout = self.__process.stdout.read().strip()
yield stdout
# A torrent download only stops:
# a) when the user clicks 'stop'
# b) when an error occured
if self.information['status'] != DownloadStatus.STOPPED:
self.information['status'] = DownloadStatus.ERROR
elif self.information['download_type'] == DownloadTypes.BASIC:
self.information['output'] = self._config.get('general', 'folder_new_otrkeys')
if self.information['preferred_downloader'] == 'wget':
command = self._config.get('downloader', 'wget') + ["-c", "-P", self.information['output'], self.link]
yield "Ausgeführt wird:\n%s\n" % " ".join(command)
try:
self.__process = subprocess.Popen(command, stderr=subprocess.PIPE)
except OSError, error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Wget ist nicht installiert.'
yield "Ist Wget installiert? Der Befehl konnte nicht ausgeführt werden:\n%s" % error
return
while True:
exit_code = self.__process.poll()
if exit_code != None:
if self.information['status'] != DownloadStatus.STOPPED:
if exit_code==0:
self._finished()
else:
self.information['status'] = DownloadStatus.ERROR
break
line = self.__process.stderr.readline().strip()
if line:
if not self.information['size']:
result = re.findall(': ([0-9]*) \(', line)
if result:
self.information['size'] = int(result[0])
if "%" in line:
result = re.findall('([0-9]{1,3})% (.*)[ =](.*)', line)
if result:
progress = int(result[0][0])
if self.information['progress'] == progress:
continue
else:
self.information['progress'] = progress
self.information['speed'] = result[0][1]
if progress == 100:
self._finished()
else:
self.information['est'] = result[0][2]
else:
yield line
self.update_view()
### Process is terminated
yield self.__process.stderr.read().strip()
else:
command = self._config.get('downloader', 'aria2c') + ["-d", self.information['output'], self.link]
yield "Ausgeführt wird:\n%s\n" % " ".join(command)
try:
self.__process = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError, error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Aria2c ist nicht installiert.'
yield "Ist aria2c installiert? Der Befehl konnte nicht ausgeführt werden:\n%s" % error
return
while self.__process.poll() == None:
line = self.__process.stdout.readline().strip()
if "%" in line:
if "FileAlloc" in line:
result = re.findall('FileAlloc:.*\(([0-9]{1,3}%)', line)
self.information['message_short'] = 'Datei wird angelegt...%s' % result[0]
else:
self.information['message_short'] = ''
if not self.information['size']:
try:
# aria2c gives size always in MiB (hopefully)
size = re.findall('.*FileAlloc.*/(.*)\(', line)[0]
size = size.strip('MiB')
size = size.replace(',', '')
size = int(round(float(size))) * 1024 * 1024
self.information['size'] = size
self.update_view()
yield line
except:
pass
result = re.findall('\(([0-9]{1,3})%\).*SPD:(.*) ETA:(.*)]', line)
if result:
self.information['progress'] = int(result[0][0])
self.information['speed'] = result[0][1]
self.information['est'] = result[0][2]
self.update_view()
else:
yield line
### Process is terminated
stdout = self.__process.stdout.read().strip()
yield stdout
if not self.information['status'] in [DownloadStatus.STOPPED, DownloadStatus.ERROR]:
time.sleep(1) # wait for log being updated - very ugly
if 'download completed' in self.log:
self._finished()
else:
self.information['status'] = DownloadStatus.ERROR
elif self.information['download_type'] in [DownloadTypes.OTR_DECODE, DownloadTypes.OTR_CUT]:
decoder = self._config.get('general', 'decoder')
email = self._config.get('general', 'email')
password = base64.b64decode(self._config.get('general', 'password'))
cache_dir = self._config.get('general', 'folder_trash_otrkeys')
command = [decoder, "-b", "0", "-n", "-i", self.link, "-e", email, "-p", password, "-c", cache_dir]
if self.information['download_type'] == DownloadTypes.OTR_CUT:
self.information['output'] = self._config.get('general', 'folder_cut_avis')
if not self.information['cutlist']:
cutlist = Cutlist()
cutlist.id = self.information['cutlist_id']
error = cutlist.download(self._config.get('general', 'server'), os.path.join(self.information['output'], self.filename))
if error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Cutlist konnte nicht geladen werden.'
yield error
return
self.information['cutlist'] = cutlist
command += ["-o", self.information['output'], "-C", self.information['cutlist'].local_filename]
else:
self.information['output'] = self._config.get('general', 'folder_uncut_avis')
command += ["-o", self.information['output']]
# write command to log, but strip out email and password
log = list(command)
log[log.index('-p') + 1] = '*******'
log[log.index('-e') + 1] = '*******'
yield "Ausgeführt wird:\n%s\n" % " ".join(log)
try:
self.__process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, error:
self.information['status'] = DownloadStatus.ERROR
self.information['message_short'] = 'Dekoder nicht gefunden.'
yield "Der Pfad zum Dekoder scheint nicht korrekt zu sein. Der folgende Befehl konnte nicht ausgeführt werden\nFehlermeldung: %s" % error
return
line = ''
while self.__process.poll()==None:
char = self.__process.stdout.read(1)
if char=='\r' or char=='\n':
line = line.strip()
if not line:
continue
if not "%" in line:
yield line
result = re.findall("([0-9]{1,3})%", line)
if result:
self.information['progress'] = int(result[0])
result = re.findall("[0-9]{1,3}%.*: (.*)", line)
if result:
self.information['speed'] = result[0]
self.update_view()
line = ''
else:
line += char
### Process is terminated
stderr = self.__process.stderr.read().strip()
if stderr:
self.information['status'] = DownloadStatus.ERROR
if "invalid option" in stderr:
self.information['message_short'] = 'Der Dekoder ist veraltet.'
yield "Es ist ein veralteter Dekoder angegeben!\n"
elif "maximale Anzahl":
self.information['message_short'] = 'Maximale Anzahl der Dekodierungen erreicht.'
yield unicode(stderr, 'iso-8859-1')
else:
self.information['message_short'] = stderr
yield stderr
if not self.information['status'] in [DownloadStatus.ERROR, DownloadStatus.STOPPED]:
self._finished()
# remove otrkey and .segments file
otrkey = os.path.join(cache_dir, self.filename)
fileoperations.remove_file(otrkey, None)
fileoperations.remove_file(os.path.join(cache_dir, self.filename + '.segments'), None)
if self.information['download_type'] == DownloadTypes.OTR_CUT:
# rename file to "cut" filename
filename = os.path.join(self.information['output'], self.filename.rstrip(".otrkey"))
new_filename, extension = os.path.splitext(filename)
new_filename += ".cut" + extension
fileoperations.rename_file(filename, new_filename, None)
conclusion = FileConclusion(Action.DECODEANDCUT, otrkey=otrkey, uncut_video=filename)
conclusion.decode.status = Status.OK
conclusion.cut_video = new_filename
conclusion.cut.cutlist = self.information['cutlist']
conclusion.cut.cutlist.read_from_file()
conclusion.cut.status = Status.OK
conclusion.cut.cut_action = Cut_action.CHOOSE_CUTLIST
if self._config.get('general', 'rename_cut'):
conclusion.cut.rename = self._app.rename_by_schema(self.filename.rstrip(".otrkey"))
else:
conclusion.cut.rename = os.path.basename(new_filename)
self._app.conclusions_manager.add_conclusions(conclusion)
self.update_view()
def start(self, force=False):
def loop(*args):
self.log += "%s\n" % args[0]
if force or not self.information['status'] in [DownloadStatus.RUNNING, DownloadStatus.SEEDING]:
self.__task = GeneratorTask(self._download, loop)
self.__task.start()
def stop(self):
if self.information['status'] in [DownloadStatus.RUNNING, DownloadStatus.SEEDING]:
self.information['status'] = DownloadStatus.STOPPED
self.information['message_short'] = ""
self.information['est'] = ""
self.information['speed'] = ""
self.update_view()
if self.__process:
try:
self.__process.kill()
except OSError:
pass
| xsteadfastx/otr-verwaltung | otrverwaltung/downloader.py | Python | gpl-3.0 | 22,876 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
import sys
import collections
import re
from xml_helper import xml_parse, xml_print, tree
from copy import deepcopy
TEMPLATE = '''\
<refentry id="systemd.directives" conditional="HAVE_PYTHON">
<refentryinfo>
<title>systemd.directives</title>
<productname>systemd</productname>
</refentryinfo>
<refmeta>
<refentrytitle>systemd.directives</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>systemd.directives</refname>
<refpurpose>Index of configuration directives</refpurpose>
</refnamediv>
<refsect1>
<title>Unit directives</title>
<para>Directives for configuring units, used in unit
files.</para>
<variablelist id='unit-directives' />
</refsect1>
<refsect1>
<title>Options on the kernel command line</title>
<para>Kernel boot options for configuring the behaviour of the
systemd process.</para>
<variablelist id='kernel-commandline-options' />
</refsect1>
<refsect1>
<title>Environment variables</title>
<para>Environment variables understood by the systemd manager
and other programs and environment variable-compatible settings.</para>
<variablelist id='environment-variables' />
</refsect1>
<refsect1>
<title>EFI variables</title>
<para>EFI variables understood by
<citerefentry><refentrytitle>systemd-boot</refentrytitle><manvolnum>7</manvolnum></citerefentry>
and other programs.</para>
<variablelist id='efi-variables' />
</refsect1>
<refsect1>
<title>UDEV directives</title>
<para>Directives for configuring systemd units through the
udev database.</para>
<variablelist id='udev-directives' />
</refsect1>
<refsect1>
<title>Network directives</title>
<para>Directives for configuring network links through the
net-setup-link udev builtin and networks through
systemd-networkd.</para>
<variablelist id='network-directives' />
</refsect1>
<refsect1>
<title>Journal fields</title>
<para>Fields in the journal events with a well known meaning.</para>
<variablelist id='journal-directives' />
</refsect1>
<refsect1>
<title>PAM configuration directives</title>
<para>Directives for configuring PAM behaviour.</para>
<variablelist id='pam-directives' />
</refsect1>
<refsect1>
<title><filename>/etc/crypttab</filename> and
<filename>/etc/fstab</filename> options</title>
<para>Options which influence mounted filesystems and
encrypted volumes.</para>
<variablelist id='fstab-options' />
</refsect1>
<refsect1>
<title><citerefentry><refentrytitle>systemd.nspawn</refentrytitle><manvolnum>5</manvolnum></citerefentry>
directives</title>
<para>Directives for configuring systemd-nspawn containers.</para>
<variablelist id='nspawn-directives' />
</refsect1>
<refsect1>
<title>Program configuration options</title>
<para>Directives for configuring the behaviour of the
systemd process and other tools through configuration files.</para>
<variablelist id='config-directives' />
</refsect1>
<refsect1>
<title>Command line options</title>
<para>Command-line options accepted by programs in the
systemd suite.</para>
<variablelist id='options' />
</refsect1>
<refsect1>
<title>Constants</title>
<para>Various constant used and/or defined by systemd.</para>
<variablelist id='constants' />
</refsect1>
<refsect1>
<title>Miscellaneous options and directives</title>
<para>Other configuration elements which don't fit in
any of the above groups.</para>
<variablelist id='miscellaneous' />
</refsect1>
<refsect1>
<title>Files and directories</title>
<para>Paths and file names referred to in the
documentation.</para>
<variablelist id='filenames' />
</refsect1>
<refsect1>
<title>Colophon</title>
<para id='colophon' />
</refsect1>
</refentry>
'''
COLOPHON = '''\
This index contains {count} entries in {sections} sections,
referring to {pages} individual manual pages.
'''
def _extract_directives(directive_groups, formatting, page):
t = xml_parse(page)
section = t.find('./refmeta/manvolnum').text
pagename = t.find('./refmeta/refentrytitle').text
storopt = directive_groups['options']
for variablelist in t.iterfind('.//variablelist'):
klass = variablelist.attrib.get('class')
storvar = directive_groups[klass or 'miscellaneous']
# <option>s go in OPTIONS, unless class is specified
for xpath, stor in (('./varlistentry/term/varname', storvar),
('./varlistentry/term/option',
storvar if klass else storopt)):
for name in variablelist.iterfind(xpath):
text = re.sub(r'([= ]).*', r'\1', name.text).rstrip()
if text.startswith('-'):
# for options, merge options with and without mandatory arg
text = text.partition('=')[0]
stor[text].append((pagename, section))
if text not in formatting:
# use element as formatted display
if name.text[-1] in "= '":
name.clear()
else:
name.tail = ''
name.text = text
formatting[text] = name
storfile = directive_groups['filenames']
for xpath, absolute_only in (('.//refsynopsisdiv//filename', False),
('.//refsynopsisdiv//command', False),
('.//filename', True)):
for name in t.iterfind(xpath):
if absolute_only and not (name.text and name.text.startswith('/')):
continue
if name.attrib.get('index') == 'false':
continue
name.tail = ''
if name.text:
if name.text.endswith('*'):
name.text = name.text[:-1]
if not name.text.startswith('.'):
text = name.text.partition(' ')[0]
if text != name.text:
name.clear()
name.text = text
if text.endswith('/'):
text = text[:-1]
storfile[text].append((pagename, section))
if text not in formatting:
# use element as formatted display
formatting[text] = name
else:
text = ' '.join(name.itertext())
storfile[text].append((pagename, section))
formatting[text] = name
storfile = directive_groups['constants']
for name in t.iterfind('.//constant'):
if name.attrib.get('index') == 'false':
continue
name.tail = ''
if name.text.startswith('('): # a cast, strip it
name.text = name.text.partition(' ')[2]
storfile[name.text].append((pagename, section))
formatting[name.text] = name
def _make_section(template, name, directives, formatting):
varlist = template.find(".//*[@id='{}']".format(name))
for varname, manpages in sorted(directives.items()):
entry = tree.SubElement(varlist, 'varlistentry')
term = tree.SubElement(entry, 'term')
display = deepcopy(formatting[varname])
term.append(display)
para = tree.SubElement(tree.SubElement(entry, 'listitem'), 'para')
b = None
for manpage, manvolume in sorted(set(manpages)):
if b is not None:
b.tail = ', '
b = tree.SubElement(para, 'citerefentry')
c = tree.SubElement(b, 'refentrytitle')
c.text = manpage
c.attrib['target'] = varname
d = tree.SubElement(b, 'manvolnum')
d.text = manvolume
entry.tail = '\n\n'
def _make_colophon(template, groups):
count = 0
pages = set()
for group in groups:
count += len(group)
for pagelist in group.values():
pages |= set(pagelist)
para = template.find(".//para[@id='colophon']")
para.text = COLOPHON.format(count=count,
sections=len(groups),
pages=len(pages))
def _make_page(template, directive_groups, formatting):
"""Create an XML tree from directive_groups.
directive_groups = {
'class': {'variable': [('manpage', 'manvolume'), ...],
'variable2': ...},
...
}
"""
for name, directives in directive_groups.items():
_make_section(template, name, directives, formatting)
_make_colophon(template, directive_groups.values())
return template
def make_page(*xml_files):
"Extract directives from xml_files and return XML index tree."
template = tree.fromstring(TEMPLATE)
names = [vl.get('id') for vl in template.iterfind('.//variablelist')]
directive_groups = {name:collections.defaultdict(list)
for name in names}
formatting = {}
for page in xml_files:
try:
_extract_directives(directive_groups, formatting, page)
except Exception:
raise ValueError("failed to process " + page)
return _make_page(template, directive_groups, formatting)
if __name__ == '__main__':
with open(sys.argv[1], 'wb') as f:
f.write(xml_print(make_page(*sys.argv[2:])))
| splantefeve/systemd | tools/make-directive-index.py | Python | gpl-2.0 | 10,612 |
# -*- coding: utf-8 -*-
#
# Advanced Emulator Launcher miscellaneous set of objects
#
# 1. Always use new style classes in Python 2 to ease the transition to Python 3.
# All classes in Python 2 must have object as base class.
# See https://stackoverflow.com/questions/4015417/python-class-inherits-object
# Copyright (c) 2016-2018 Wintermute0110 <wintermute0110@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# --- Python standard library ---
from __future__ import unicode_literals
from __future__ import division
import abc
import collections
import shlex
import subprocess
import webbrowser
import re
from os.path import expanduser
import uuid
import random
import binascii
# --- AEL packages ---
from resources.net_IO import *
from resources.disk_IO import *
from resources.platforms import *
from resources.report import *
from resources.utils import FileName
from resources.constants import *
# #################################################################################################
# #################################################################################################
# Assets/Artwork
# #################################################################################################
# #################################################################################################
ASSET_SETTING_KEYS = {
ASSET_ICON_ID : '',
ASSET_FANART_ID : 'scraper_fanart',
ASSET_BANNER_ID : 'scraper_banner',
ASSET_POSTER_ID : '',
ASSET_CLEARLOGO_ID : 'scraper_clearlogo',
ASSET_CONTROLLER_ID : '',
ASSET_TRAILER_ID : '',
ASSET_TITLE_ID : 'scraper_title',
ASSET_SNAP_ID : 'scraper_snap',
ASSET_BOXFRONT_ID : 'scraper_boxfront',
ASSET_BOXBACK_ID : 'scraper_boxback',
ASSET_CARTRIDGE_ID : 'scraper_cart',
ASSET_FLYER_ID : '',
ASSET_MAP_ID : '',
ASSET_MANUAL_ID : ''
}
MAME_ASSET_SETTING_KEYS = {
ASSET_ICON_ID : '',
ASSET_FANART_ID : 'scraper_fanart_MAME',
ASSET_BANNER_ID : 'scraper_marquee_MAME',
ASSET_POSTER_ID : '',
ASSET_CLEARLOGO_ID : 'scraper_clearlogo_MAME',
ASSET_CONTROLLER_ID : '',
ASSET_TRAILER_ID : '',
ASSET_TITLE_ID : 'scraper_title_MAME',
ASSET_SNAP_ID : 'scraper_snap_MAME',
ASSET_BOXFRONT_ID : 'scraper_cabinet_MAME',
ASSET_BOXBACK_ID : 'scraper_cpanel_MAME',
ASSET_CARTRIDGE_ID : 'scraper_pcb_MAME',
ASSET_FLYER_ID : 'scraper_flyer_MAME',
ASSET_MAP_ID : '',
ASSET_MANUAL_ID : ''
}
# -------------------------------------------------------------------------------------------------
# Gets all required information about an asset: path, name, etc.
# Returns an object with all the information
# -------------------------------------------------------------------------------------------------
class AssetInfo:
id = 0
key = ''
default_key = ''
rom_default_key = ''
name = ''
description = name
plural = ''
fname_infix = '' # Used only when searching assets when importing XML
kind_str = ''
exts = []
exts_dialog = []
path_key = ''
def get_description(self):
if self.description == '': return self.name
return self.description
def __eq__(self, other):
return isinstance(other, AssetInfo) and self.id == other.id
def __hash__(self):
return self.id.__hash__()
def __str__(self):
return self.name
#
# Class to interact with the asset engine.
# This class uses the asset_infos, dictionary of AssetInfo indexed by asset_ID
#
class AssetInfoFactory(object):
def __init__(self):
# default collections
self.ASSET_INFO_ID_DICT = {} # ID -> object
self.ASSET_INFO_KEY_DICT = {} # Key -> object
self._load_asset_data()
# -------------------------------------------------------------------------------------------------
# Asset functions
# -------------------------------------------------------------------------------------------------
def get_all(self):
return list(self.ASSET_INFO_ID_DICT.values())
def get_asset_info(self, asset_ID):
asset_info = self.ASSET_INFO_ID_DICT.get(asset_ID, None)
if asset_info is None:
log_error('get_asset_info() Wrong asset_ID = {0}'.format(asset_ID))
return AssetInfo()
return asset_info
# Returns the corresponding assetinfo object for the
# given key (eg: 's_icon')
def get_asset_info_by_key(self, asset_key):
asset_info = self.ASSET_INFO_KEY_DICT.get(asset_key, None)
if asset_info is None:
log_error('get_asset_info_by_key() Wrong asset_key = {0}'.format(asset_key))
return AssetInfo()
return asset_info
def get_asset_kinds_for_roms(self):
rom_asset_kinds = []
for rom_asset_id in ROM_ASSET_ID_LIST:
rom_asset_kinds.append(self.ASSET_INFO_ID_DICT[rom_asset_id])
return rom_asset_kinds
# IDs is a list (or an iterable that returns an asset ID
# Returns a list of AssetInfo objects.
# If the asset kind is given, it will filter out assets not corresponding to that kind.
def get_asset_list_by_IDs(self, IDs, kind = None):
asset_info_list = []
for asset_ID in IDs:
asset_info = self.ASSET_INFO_ID_DICT.get(asset_ID, None)
if asset_info is None:
log_error('get_asset_list_by_IDs() Wrong asset_ID = {0}'.format(asset_ID))
continue
if kind is None or asset_info.kind_str == kind: asset_info_list.append(asset_info)
return asset_info_list
# todo: use 1 type of identifier not number constants and name strings ('s_icon')
def get_asset_info_by_namekey(self, name_key):
if name_key == '': return None
kind = ASSET_KEYS_TO_CONSTANTS[name_key]
return self.get_asset_info(kind)
#
# Get extensions to search for files
# Input : ['png', 'jpg']
# Output: ['png', 'jpg', 'PNG', 'JPG']
#
def asset_get_filesearch_extension_list(self, exts):
ext_list = list(exts)
for ext in exts:
ext_list.append(ext.upper())
return ext_list
#
# Gets extensions to be used in Kodi file dialog.
# Input : ['png', 'jpg']
# Output: '.png|.jpg'
#
def asset_get_dialog_extension_list(self, exts):
ext_string = ''
for ext in exts:
ext_string += '.' + ext + '|'
# >> Remove trailing '|' character
ext_string = ext_string[:-1]
return ext_string
#
# Scheme SUFIX uses suffixes for artwork. All artwork assets are stored in the same directory.
# Name example: "Sonic The Hedgehog (Europe)_a3e_title"
# First 3 characters of the objectID are added to avoid overwriting of images. For example, in the
# Favourites special category there could be ROMs with the same name for different systems.
#
# asset_ID -> Assets ID defined in constants.py
# AssetPath -> FileName object
# asset_base_noext -> Unicode string
# objectID -> Object MD5 ID fingerprint (Unicode string)
#
# Returns a FileName object
#
def assets_get_path_noext_SUFIX(self, asset_ID, AssetPath, asset_base_noext, objectID = '000'):
objectID_str = '_' + objectID[0:3]
if asset_ID == ASSET_ICON_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_icon')
elif asset_ID == ASSET_FANART_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_fanart')
elif asset_ID == ASSET_BANNER_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_banner')
elif asset_ID == ASSET_POSTER_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_poster')
elif asset_ID == ASSET_CLEARLOGO_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_clearlogo')
elif asset_ID == ASSET_CONTROLLER_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_controller')
elif asset_ID == ASSET_TRAILER_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_trailer')
elif asset_ID == ASSET_TITLE_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_title')
elif asset_ID == ASSET_SNAP_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_snap')
elif asset_ID == ASSET_BOXFRONT_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_boxfront')
elif asset_ID == ASSET_BOXBACK_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_boxback')
elif asset_ID == ASSET_CARTRIDGE_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_cartridge')
elif asset_ID == ASSET_FLYER_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_flyer')
elif asset_ID == ASSET_MAP_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_map')
elif asset_ID == ASSET_MANUAL_ID: asset_path_noext_FN = AssetPath.pjoin(asset_base_noext + objectID_str + '_manual')
else:
asset_path_noext_FN = FileName('')
log_error('assets_get_path_noext_SUFIX() Wrong asset_ID = {0}'.format(asset_ID))
return asset_path_noext_FN
#
# Search for local assets and put found files into a list.
# This function is used in _roms_add_new_rom() where there is no need for a file cache.
#
def assets_search_local_assets(self, launcher, ROMFile, enabled_ROM_asset_list):
log_verb('assets_search_local_assets() Searching for ROM local assets...')
local_asset_list = [''] * len(ROM_ASSET_ID_LIST)
for i, asset_kind in enumerate(ROM_ASSET_ID_LIST):
AInfo = g_assetFactory.get_asset_info(asset_kind)
if not enabled_ROM_asset_list[i]:
log_verb('assets_search_local_assets() Disabled {0:<9}'.format(AInfo.name))
continue
asset_path = launcher.get_asset_path(AInfo)
local_asset = misc_look_for_file(asset_path, ROMFile.getBaseNoExt(), AInfo.exts)
if local_asset:
local_asset_list[i] = local_asset.getPath()
log_verb('assets_search_local_assets() Found {0:<9} "{1}"'.format(AInfo.name, local_asset_list[i]))
else:
local_asset_list[i] = ''
log_verb('assets_search_local_assets() Missing {0:<9}'.format(AInfo.name))
return local_asset_list
#
# A) This function checks if all path_* share a common root directory. If so
# this function returns that common directory as an Unicode string.
# B) If path_* do not share a common root directory this function returns ''.
#
def assets_get_ROM_asset_path(self, launcher):
ROM_asset_path = ''
duplicated_bool_list = [False] * len(ROM_ASSET_ID_LIST)
AInfo_first = g_assetFactory.get_asset_info(ROM_ASSET_ID_LIST[0])
path_first_asset_FN = FileName(launcher[AInfo_first.path_key])
log_debug('assets_get_ROM_asset_path() path_first_asset "{0}"'.format(path_first_asset_FN.getPath()))
for i, asset_kind in enumerate(ROM_ASSET_ID_LIST):
AInfo = g_assetFactory.get_asset_info(asset_kind)
current_path_FN = FileName(launcher[AInfo.path_key])
if current_path_FN.getDir() == path_first_asset_FN.getDir():
duplicated_bool_list[i] = True
return path_first_asset_FN.getDir() if all(duplicated_bool_list) else ''
#
# Gets extensions to be used in regular expressions.
# Input : ['png', 'jpg']
# Output: '(png|jpg)'
#
@staticmethod
def asset_get_regexp_extension_list(exts):
ext_string = ''
for ext in exts:
ext_string += ext + '|'
# >> Remove trailing '|' character
ext_string = ext_string[:-1]
return '(' + ext_string + ')'
#
# This must match the order of the list Category_asset_ListItem_list in _command_edit_category()
# TODO: deprecated?
@staticmethod
def assets_choose_Category_mapped_artwork(dict_object, key, index):
if index == 0: dict_object[key] = 's_icon'
elif index == 1: dict_object[key] = 's_fanart'
elif index == 2: dict_object[key] = 's_banner'
elif index == 3: dict_object[key] = 's_poster'
elif index == 4: dict_object[key] = 's_clearlogo'
#
# This must match the order of the list Category_asset_ListItem_list in _command_edit_category()
# TODO: deprecated?
@staticmethod
def assets_get_Category_mapped_asset_idx(dict_object, key):
if dict_object[key] == 's_icon': index = 0
elif dict_object[key] == 's_fanart': index = 1
elif dict_object[key] == 's_banner': index = 2
elif dict_object[key] == 's_poster': index = 3
elif dict_object[key] == 's_clearlogo': index = 4
else: index = 0
return index
#
# This must match the order of the list Launcher_asset_ListItem_list in _command_edit_launcher()
# TODO: deprecated?
@staticmethod
def assets_choose_Launcher_mapped_artwork(dict_object, key, index):
if index == 0: dict_object[key] = 's_icon'
elif index == 1: dict_object[key] = 's_fanart'
elif index == 2: dict_object[key] = 's_banner'
elif index == 3: dict_object[key] = 's_poster'
elif index == 4: dict_object[key] = 's_clearlogo'
elif index == 5: dict_object[key] = 's_controller'
#
# This must match the order of the list Launcher_asset_ListItem_list in _command_edit_launcher()
# TODO: deprecated?
@staticmethod
def assets_get_Launcher_mapped_asset_idx(dict_object, key):
if dict_object[key] == 's_icon': index = 0
elif dict_object[key] == 's_fanart': index = 1
elif dict_object[key] == 's_banner': index = 2
elif dict_object[key] == 's_poster': index = 3
elif dict_object[key] == 's_clearlogo': index = 4
elif dict_object[key] == 's_controller': index = 5
else: index = 0
return index
# since we are using a single instance for the assetinfo factory we can automatically load
# all the asset objects into the memory
def _load_asset_data(self):
# >> These are used very frequently so I think it is better to have a cached list.
a = AssetInfo()
a.id = ASSET_ICON_ID
a.key = 's_icon'
a.default_key = 'default_icon'
a.rom_default_key = 'roms_default_icon'
a.name = 'Icon'
a.name_plural = 'Icons'
a.fname_infix = 'icon'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_icon'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_FANART_ID
a.key = 's_fanart'
a.default_key = 'default_fanart'
a.rom_default_key = 'roms_default_fanart'
a.name = 'Fanart'
a.plural = 'Fanarts'
a.fname_infix = 'fanart'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_fanart'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_BANNER_ID
a.key = 's_banner'
a.default_key = 'default_banner'
a.rom_default_key = 'roms_default_banner'
a.name = 'Banner'
a.description = 'Banner / Marquee'
a.plural = 'Banners'
a.fname_infix = 'banner'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_banner'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_POSTER_ID
a.key = 's_poster'
a.default_key = 'default_poster'
a.rom_default_key = 'roms_default_poster'
a.name = 'Poster'
a.plural = 'Posters'
a.fname_infix = 'poster'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_poster'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_CLEARLOGO_ID
a.key = 's_clearlogo'
a.default_key = 'default_clearlogo'
a.rom_default_key = 'roms_default_clearlogo'
a.name = 'Clearlogo'
a.plural = 'Clearlogos'
a.fname_infix = 'clearlogo'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_clearlogo'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_CONTROLLER_ID
a.key = 's_controller'
a.default_key = 'default_controller'
a.name = 'Controller'
a.plural = 'Controllers'
a.fname_infix = 'controller'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_controller'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_TRAILER_ID
a.key = 's_trailer'
a.name = 'Trailer'
a.plural = 'Trailers'
a.fname_infix = 'trailer'
a.kind_str = 'video'
a.exts = self.asset_get_filesearch_extension_list(TRAILER_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(TRAILER_EXTENSION_LIST)
a.path_key = 'path_trailer'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_TITLE_ID
a.key = 's_title'
a.default_key = 'default_title'
a.rom_default_key = 'roms_default_title'
a.name = 'Title'
a.plural = 'Titles'
a.fname_infix = 'title'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_title'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_SNAP_ID
a.key = 's_snap'
a.name = 'Snap'
a.plural = 'Snaps'
a.fname_infix = 'snap'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_snap'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_BOXFRONT_ID
a.key = 's_boxfront'
a.name = 'Boxfront'
a.description = 'Boxfront / Cabinet'
a.plural = 'Boxfronts'
a.fname_infix = 'boxfront'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_boxfront'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_BOXBACK_ID
a.key = 's_boxback'
a.name = 'Boxback'
a.description = 'Boxback / CPanel'
a.plural = 'Boxbacks'
a.fname_infix = 'boxback'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_boxback'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_CARTRIDGE_ID
a.key = 's_cartridge'
a.name = 'Cartridge'
a.description = 'Cartridge / PCB'
a.plural = 'Cartridges'
a.fname_infix = 'cartridge'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_cartridge'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_FLYER_ID
a.key = 's_flyer'
a.name = 'Flyer'
a.plural = 'Flyers'
a.fname_infix = 'flyer'
a.kind_str = 'image'
a.fname_infix = 'poster'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_flyer'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_MAP_ID
a.key = 's_map'
a.name = 'Map'
a.plural = 'Maps'
a.fname_infix = 'map'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_map'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_MANUAL_ID
a.key = 's_manual'
a.name = 'Manual'
a.plural = 'Manuals'
a.fname_infix = 'manual'
a.kind_str = 'manual'
a.exts = self.asset_get_filesearch_extension_list(MANUAL_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(MANUAL_EXTENSION_LIST)
a.path_key = 'path_manual'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
a = AssetInfo()
a.id = ASSET_3DBOX_ID
a.key = 's_3dbox'
a.name = '3D Box'
a.fname_infix = '3dbox'
a.kind_str = 'image'
a.exts = self.asset_get_filesearch_extension_list(IMAGE_EXTENSION_LIST)
a.exts_dialog = self.asset_get_dialog_extension_list(IMAGE_EXTENSION_LIST)
a.path_key = 'path_3dbox'
self.ASSET_INFO_ID_DICT[a.id] = a
self.ASSET_INFO_KEY_DICT[a.key] = a
# --- Global object to get asset info ---
g_assetFactory = AssetInfoFactory()
# #################################################################################################
# #################################################################################################
# Data storage objects.
# #################################################################################################
# #################################################################################################
#
# * Repository class for creating and retrieveing Categories/Launchers/ROM Collection objects.
#
# * This object only retrieves database dictionaries. Actual objects are created by the
# class AELObjectFactory().
#
# * ROM objects can be created exclusively by Launcher objects using the ROMSetRepository class.
#
class ObjectRepository(object):
def __init__(self, g_PATHS, g_settings):
self.PATHS = g_PATHS
self.settings = g_settings
# Categories/Launchers are needed for virtually every AEL operation so load it
# right now.
# When AEL is used for the first time and categories.xml does not exists, just create
# empty structures.
# ROM Collection index is loaded lazily if needed.
self.header_dic = {}
self.categories = {}
self.launchers = {}
self.collections = None
if not self.PATHS.CATEGORIES_FILE_PATH.exists():
log_debug('ObjectRepository::init() categories.xml does not exist. Creating empty data.')
else:
fs_load_catfile(self.PATHS.CATEGORIES_FILE_PATH,
self.header_dic, self.categories, self.launchers)
def num_categories(self): return len(self.categories)
def num_launchers(self): return len(self.launchers)
def num_launchers_in_cat(self, category_id):
# This implementation is slow, must be optimised.
num_launchers = 0
for launcher_id in self.launchers:
if self.launchers[launcher_id]['categoryID'] != category_id: continue
num_launchers += 1
return num_launchers
#
# Finds an actual Category by ID in the database.
# Returns a Category database dictionary or None if not found.
#
def find_category(self, category_id):
if category_id in self.categories:
category_dic = self.categories[category_id]
else:
category_dic = None
return category_dic
def find_category_all(self):
category_dic_list = []
for category_key in sorted(self.categories, key = lambda c : self.categories[c]['m_name']):
category_dic_list.append(self.categories[category_key])
return category_dic_list
# Returns an OrderedDict, key is category_id and value is the Category name.
# Categories are ordered alphabetically by m_name.
# This function is useful for select dialogs.
def get_categories_odict(self):
categories_odict = collections.OrderedDict()
# sorted(category_list, key = lambda c : c.get_name())
for category_id in sorted(self.categories, key = lambda c : self.categories[c]['m_name']):
categories_odict[category_id] = self.categories[category_id]['m_name']
return categories_odict
#
# Finds an actual Launcher by ID in the database.
# Returns a Launcher database dictionary or None if not found.
#
def find_launcher(self, launcher_id):
if launcher_id in self.launchers:
launcher_dic = self.launchers[launcher_id]
else:
launcher_dic = None
return launcher_dic
#
# Returns a list of launchers belonging to category_id
# Launchers are sorted alphabetically by m_name.
#
def find_launchers_by_category_id(self, category_id):
launchers_dic_list = []
for launcher_id in self.launchers:
if self.launchers[launcher_id]['categoryID'] != category_id: continue
launchers_dic_list.append(self.launchers[launcher_id])
sorted_launcher_dic_list = []
for launcher_dic in sorted(launchers_dic_list, key = lambda c : c['m_name']):
sorted_launcher_dic_list.append(launcher_dic)
return sorted_launcher_dic_list
# Removes a category from the database.
# Launchers belonging to this Category must be deleted first, otherwise will become orphaned.
def delete_category(self, category):
category_id = category.get_id()
del self.categories[category_id]
self.save_main_database()
#
# Removes a Launcher from the database.
# If Launcher supports ROMs it also removes all files associated with the Launcher.
#
def delete_launcher(self, launcher):
if launcher.supports_launching_roms(): launcher.delete_ROM_databases()
del self.launchers[launcher.get_id()]
self.save_main_database()
def save_category(self, category_dic):
self.categories[category_dic['id']] = category_dic
self.save_main_database()
#
# Use this function instead of save_object() when the launcher timestamp must be controlled.
#
def save_launcher(self, launcher_dic, update_launcher_timestamp = True):
if update_launcher_timestamp:
launcher_dic['timestamp_launcher'] = time.time()
self.launchers[launcher_dic['id']] = launcher_dic
self.save_main_database()
# Saves the Categories and Launchers in the categories.xml database.
# Updates the database timestamp.
def save_main_database(self):
# >> time.time() returns a float. Usually precision is much better than a second,
# >> but not always.
# >> See https://docs.python.org/2/library/time.html#time.time
# NOTE When updating reports timestamp of categories/launchers this must not be modified.
self.header_dic['database_version'] = '0.10.0'
self.header_dic['update_timestamp'] = time.time()
fs_write_catfile(self.PATHS.CATEGORIES_FILE_PATH,
self.header_dic, self.categories, self.launchers)
# -------------------------------------------------------------------------------------------------
# Repository class for Collection objects.
# Arranges retrieving and storing of the Collection launchers from and into the xml data file.
# -------------------------------------------------------------------------------------------------
class CollectionRepository(object):
def __init__(self, PATHS, settings, obj_factory):
# log_debug('CollectionRepository::__init__()')
self.obj_factory = obj_factory
def _parse_xml_to_dictionary(self, collection_element):
__debug_xml_parser = False
collection = { 'type': OBJ_LAUNCHER_COLLECTION }
# Parse child tags of category
for collection_child in collection_element:
# By default read strings
xml_text = collection_child.text if collection_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = collection_child.tag
if __debug_xml_parser: log_debug('{0} --> {1}'.format(xml_tag, xml_text.encode('utf-8')))
# Internal data is always stored as Unicode. ElementTree already outputs Unicode.
collection[xml_tag] = xml_text
return collection
def find(self, collection_id):
collection_element = self.data_context.get_node('Collection', collection_id)
if collection_element is None:
log_debug('Cannot find collection with id {}'.format(collection_id))
return None
collection_dic = self._parse_xml_to_dictionary(collection_element)
collection = self.launcher_factory.create(collection_dic)
return collection
def find_all(self):
collections = []
collection_elements = self.data_context.get_nodes('Collection')
log_debug('Found {0} collections'.format(len(collection_element)))
for collection_element in collection_elements:
collection_dic = self._parse_xml_to_dictionary(collection_element)
collection = self.launcher_factory.create(collection_dic)
collections.append(collection)
return collections
def save(self, collection, update_launcher_timestamp = True):
if update_launcher_timestamp:
collection.update_timestamp()
collection_id = collection.get_id()
collection_dic = collection.get_data_dic()
self.data_context.save_node('Collection', collection_id, collection_dic)
self.data_context.commit()
def save_multiple(self, collections, update_launcher_timestamp = True):
for collection in collections:
if update_launcher_timestamp:
collection.update_timestamp()
collection_id = collection.get_id()
collection_dic = collection.get_data_dic()
self.data_context.save_node('Collection', collection_id, collection_dic)
self.data_context.commit()
def delete(self, collection):
collection_id = collection.get_id()
self.data_context.remove_node('Collection', collection_id)
self.data_context.commit()
# -------------------------------------------------------------------------------------------------
# Rom sets constants
# -------------------------------------------------------------------------------------------------
ROMSET_CPARENT = '_index_CParent'
ROMSET_PCLONE = '_index_PClone'
ROMSET_PARENTS = '_parents'
ROMSET_DAT = '_DAT'
# -------------------------------------------------------------------------------------------------
# --- Repository class for ROM set objects of Standard ROM Launchers ---
# Arranges retrieving and storing of roms belonging to a particular standard ROM launcher.
#
# NOTE ROMs in a collection are stored as a list and ROMs in Favourites are stored as
# a dictionary. Convert the Collection list into an ordered dictionary and then
# converted back the ordered dictionary into a list before saving the collection.
# -------------------------------------------------------------------------------------------------
class ROMSetRepository(object):
def __init__(self, PATHS, settings, store_as_dictionary = True):
self.PATHS = PATHS
self.settings = settings
self.store_as_dictionary = store_as_dictionary
self.ROMs_dir = self.PATHS.ROMS_DIR
#
# Loads ROM databases from disk
#
def load_ROMs(self, launcher, view_mode = None):
log_debug('ROMSetRepository::load_ROMs() Starting ...')
roms_base_noext = launcher.get_roms_base()
if view_mode is None: view_mode = launcher.get_display_mode()
if roms_base_noext is None:
repository_file = self.ROMs_dir
elif view_mode == LAUNCHER_DMODE_FLAT:
repository_file = self.ROMs_dir.pjoin('{}.json'.format(roms_base_noext))
else:
repository_file = self.ROMs_dir.pjoin('{}_parents.json'.format(roms_base_noext))
if not repository_file.exists():
log_warning('Launcher JSON not found "{0}"'.format(repository_file.getPath()))
return {}
log_info('Loading ROMs in Launcher ({0}:{1}) ...'.format(
launcher.get_launcher_type(), launcher.get_name()))
log_info('View mode {0}...'.format(view_mode))
roms_data = {}
# --- Parse using json module ---
# >> On Github issue #8 a user had an empty JSON file for ROMs. This raises
# exception exceptions.ValueError and launcher cannot be deleted. Deal
# with this exception so at least launcher can be rescanned.
log_verb('RomSetRepository.find_by_launcher(): Loading roms from file {0}'.format(repository_file.getPath()))
try:
roms_data = repository_file.readJson()
except ValueError:
statinfo = repository_file.stat()
log_error('RomSetRepository.find_by_launcher(): ValueError exception in json.load() function')
log_error('RomSetRepository.find_by_launcher(): Dir {0}'.format(repository_file.getPath()))
log_error('RomSetRepository.find_by_launcher(): Size {0}'.format(statinfo.st_size))
return None
# --- Extract roms from JSON data structure and ensure version is correct ---
if roms_data and isinstance(roms_data, list) and 'control' in roms_data[0]:
control_str = roms_data[0]['control']
version_int = roms_data[0]['version']
roms_data = roms_data[1]
roms = {}
if isinstance(roms_data, list):
for rom_data in roms_data:
r = ROM(rom_data, launcher)
key = r.get_id()
roms[key] = r
else:
for key in roms_data:
r = ROM(roms_data[key], launcher)
roms[key] = r
return roms
def find_index_file_by_launcher(self, launcher, type):
roms_base_noext = launcher.get_roms_base()
repository_file = self.ROMs_dir.pjoin('{0}{1}.json'.format(roms_base_noext, type))
if not repository_file.exists():
log_warning('RomSetRepository.find_index_file_by_launcher(): File not found {0}'.format(repository_file.getPath()))
return None
log_verb('RomSetRepository.find_index_file_by_launcher(): Loading rom index from file {0}'.format(repository_file.getPath()))
try:
index_data = repository_file.readJson()
except ValueError:
statinfo = repository_file.stat()
log_error('RomSetRepository.find_index_file_by_launcher(): ValueError exception in json.load() function')
log_error('RomSetRepository.find_index_file_by_launcher(): Dir {0}'.format(repository_file.getPath()))
log_error('RomSetRepository.find_index_file_by_launcher(): Size {0}'.format(statinfo.st_size))
return None
return index_data
def save_rom_set(self, launcher, roms, view_mode = None):
romdata = None
if self.store_as_dictionary:
romdata = {key: roms[key].get_data_dic() for (key) in roms}
else:
romdata = [roms[key].get_data_dic() for (key) in roms]
# --- Create JSON data structure, including version number ---
control_dic = {
'control' : 'Advanced Emulator {} ROMs'.format(launcher.get_launcher_type()),
'version' : AEL_STORAGE_FORMAT
}
raw_data = []
raw_data.append(control_dic)
raw_data.append(romdata)
# >> Get file names
roms_base_noext = launcher.get_roms_base()
if view_mode is None:
view_mode = launcher.get_display_mode()
if roms_base_noext is None:
repository_file = self.ROMs_dir
elif view_mode == LAUNCHER_DMODE_FLAT:
repository_file = self.ROMs_dir.pjoin('{}.json'.format(roms_base_noext))
else:
repository_file = self.ROMs_dir.pjoin('{}_parents.json'.format(roms_base_noext))
log_verb('RomSetRepository.save_rom_set() Dir {0}'.format(self.ROMs_dir.getPath()))
log_verb('RomSetRepository.save_rom_set() JSON {0}'.format(repository_file.getPath()))
# >> Write ROMs JSON dictionary.
# >> Do note that there is a bug in the json module where the ensure_ascii=False flag can produce
# >> a mix of unicode and str objects.
# >> See http://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
try:
repository_file.writeJson(raw_data)
except OSError:
kodi_notify_warn('(OSError) Cannot write {0} file'.format(repository_file.getPath()))
log_error('RomSetRepository.save_rom_set() (OSError) Cannot write {0} file'.format(repository_file.getPath()))
except IOError:
kodi_notify_warn('(IOError) Cannot write {0} file'.format(repository_file.getPath()))
log_error('RomSetRepository.save_rom_set() (IOError) Cannot write {0} file'.format(repository_file.getPath()))
# -------------------------------------------------------------------------------------------------
# Standard ROM databases
# -------------------------------------------------------------------------------------------------
#
# <roms_base_noext>.json
# <roms_base_noext>.xml
# <roms_base_noext>_index_CParent.json
# <roms_base_noext>_index_PClone.json
# <roms_base_noext>_parents.json
# <roms_base_noext>_DAT.json
#
def delete_all_by_launcher(self, launcher):
roms_base_noext = launcher.get_roms_base()
# >> Delete ROMs JSON file
roms_json_FN = self.ROMs_dir.pjoin(roms_base_noext + '.json')
if roms_json_FN.exists():
log_info('Deleting ROMs JSON "{0}"'.format(roms_json_FN.getPath()))
roms_json_FN.unlink()
# >> Delete ROMs info XML file
roms_xml_FN = self.ROMs_dir.pjoin(roms_base_noext + '.xml')
if roms_xml_FN.exists():
log_info('Deleting ROMs XML "{0}"'.format(roms_xml_FN.getPath()))
roms_xml_FN.unlink()
# >> Delete No-Intro/Redump stuff if exist
roms_index_CParent_FN = self.ROMs_dir.pjoin(roms_base_noext + '_index_CParent.json')
if roms_index_CParent_FN.exists():
log_info('Deleting CParent JSON "{0}"'.format(roms_index_CParent_FN.getPath()))
roms_index_CParent_FN.unlink()
roms_index_PClone_FN = self.ROMs_dir.pjoin(roms_base_noext + '_index_PClone.json')
if roms_index_PClone_FN.exists():
log_info('Deleting PClone JSON "{0}"'.format(roms_index_PClone_FN.getPath()))
roms_index_PClone_FN.unlink()
roms_parents_FN = self.ROMs_dir.pjoin(roms_base_noext + '_parents.json')
if roms_parents_FN.exists():
log_info('Deleting parents JSON "{0}"'.format(roms_parents_FN.getPath()))
roms_parents_FN.unlink()
roms_DAT_FN = self.ROMs_dir.pjoin(roms_base_noext + '_DAT.json')
if roms_DAT_FN.exists():
log_info('Deleting DAT JSON "{0}"'.format(roms_DAT_FN.getPath()))
roms_DAT_FN.unlink()
return
def delete_by_launcher(self, launcher, kind):
roms_base_noext = launcher.get_roms_base()
rom_set_file_name = roms_base_noext + kind
rom_set_path = self.ROMs_dir.pjoin(rom_set_file_name + '.json')
if rom_set_path.exists():
log_info('delete_by_launcher() Deleting {0}'.format(rom_set_path.getPath()))
rom_set_path.unlink()
return
# -------------------------------------------------------------------------------------------------
# Strategy class for updating the ROM play statistics.
# Updates the amount of times a ROM is played and which rom recently has been played.
# Uses functions in disk_IO.py for maximum speed.
# ROMStatisticsStrategyABC() is exclusively called at ROM execution time.
# -------------------------------------------------------------------------------------------------
class ROMStatisticsStrategyABC(object):
__metaclass__ = abc.ABCMeta
def __init__(self, PATHS, settings):
self.PATHS = PATHS
self.settings = settings
self.MAX_RECENT_PLAYED_ROMS = 100
@abc.abstractmethod
def update_launched_rom_stats(self, rom_to_update): pass
class VirtualROMStatisticsStrategy(ROMStatisticsStrategyABC):
def update_launched_rom_stats(self, rom_to_update):
pass
class ROMStatisticsStrategy(ROMStatisticsStrategyABC):
def __init__(self, PATHS, settings, recent_played_launcher, most_played_launcher):
self.recent_played_launcher = recent_played_launcher
self.most_played_launcher = most_played_launcher
super(ROMStatisticsStrategy, self).__init__(PATHS, settings)
def update_launched_rom_stats(self, rom_to_update):
rom_to_update.increase_launch_count()
virtual_rom = rom_to_update.copy_as_virtual_ROM()
# --- Compute ROM recently played list ---
recently_played_roms = self.recent_played_launcher.get_roms()
if recently_played_roms:
recently_played_roms = [rom for rom in recently_played_roms if rom.get_id() != rom_to_update.get_id()]
else:
recently_played_roms = []
recently_played_roms.insert(0, virtual_rom)
if len(recently_played_roms) > self.MAX_RECENT_PLAYED_ROMS:
log_debug('RomStatisticsStrategy() len(recently_played_roms) = {0}'.format(len(recently_played_roms)))
log_debug('RomStatisticsStrategy() Trimming list to {0} ROMs'.format(self.MAX_RECENT_PLAYED_ROMS))
temp_list = recently_played_roms[:self.MAX_RECENT_PLAYED_ROMS]
recently_played_roms = temp_list
self.recent_played_launcher.update_ROM_set(recently_played_roms)
# --- Compute most played ROM statistics ---
most_played_roms = self.most_played_launcher.get_roms()
if most_played_roms is None:
most_played_roms = []
else:
most_played_roms = [rom for rom in most_played_roms if rom.get_id() != rom_to_update.get_id()]
most_played_roms.append(virtual_rom)
self.most_played_launcher.update_ROM_set(most_played_roms)
# -------------------------------------------------------------------------------------------------
# Abstract base class for business objects which support the generic
# metadata fields and assets.
#
# --- Class hierarchy ---
#
# MetaDataItemABC(object) (abstract class)
# |
# |----- Category
# | |
# | |----- VirtualCategory
# |
# |----- ROM
# |
# |----- LauncherABC (abstract class)
# |
# |----- StandaloneLauncher (Standalone launcher)
# |
# |----- ROMLauncherABC (abstract class)
# |
# |----- CollectionLauncher (ROM Collection launcher)
# |
# |----- VirtualLauncher (Browse by ... launcher)
# |
# |----- StandardRomLauncher (Standard launcher)
# |
# |----- LnkLauncher
# |
# |----- RetroplayerLauncher
# |
# |----- RetroarchLauncher
# |
# |----- SteamLauncher
# |
# |----- NvidiaGameStreamLauncher
#
# -------------------------------------------------------------------------------------------------
class MetaDataItemABC(object):
__metaclass__ = abc.ABCMeta
#
# Addon PATHS is required to store/retrieve assets.
# Addon settings is required because the way the metadata is displayed may depend on
# some addon settings.
#
def __init__(self, PATHS, addon_settings, entity_data, objectRepository):
self.PATHS = PATHS
self.settings = addon_settings
self.entity_data = entity_data
self.objectRepository = objectRepository
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
@abc.abstractmethod
def get_object_name(self): pass
@abc.abstractmethod
def get_assets_kind(self): pass
@abc.abstractmethod
def save_to_disk(self): pass
@abc.abstractmethod
def delete_from_disk(self): pass
# --- Database ID and utilities ---------------------------------------------------------------
def set_id(self, id):
self.entity_data['id'] = id
def get_id(self):
return self.entity_data['id']
def get_data_dic(self):
return self.entity_data
def copy_of_data_dic(self):
return self.entity_data.copy()
def set_custom_attribute(self, key, value):
self.entity_data[key] = value
def get_custom_attribute(self, key, default_value = None):
return self.entity_data[key] if key in self.entity_data else default_value
def import_data_dic(self, data):
for key in data:
self.entity_data[key] = data[key]
def dump_data_dic_to_log(self):
log_debug('Dumping object {0}'.format(self.__class__))
for key in self.entity_data:
log_debug('[{0}] = {1}'.format(key, unicode(self.entity_data[key])))
# NOTE Rename to get_filename_from_field()
def _get_value_as_filename(self, field):
if not field in self.entity_data: return None
path = self.entity_data[field]
if path == '': return None
return FileName(path)
def _get_directory_filename_from_field(self, field):
if not field in self.entity_data: return None
path = self.entity_data[field]
if path == '': return None
return FileName(path, isdir=True)
# --- Metadata --------------------------------------------------------------------------------
def get_name(self):
return self.entity_data['m_name'] if 'm_name' in self.entity_data else 'Unknown'
def set_name(self, name):
self.entity_data['m_name'] = name
def get_releaseyear(self):
return self.entity_data['m_year'] if 'm_year' in self.entity_data else ''
def set_releaseyear(self, releaseyear):
self.entity_data['m_year'] = releaseyear
def get_genre(self):
return self.entity_data['m_genre'] if 'm_genre' in self.entity_data else ''
def set_genre(self, genre):
self.entity_data['m_genre'] = genre
def get_developer(self):
return self.entity_data['m_developer'] if 'm_developer' in self.entity_data else ''
def set_developer(self, developer):
self.entity_data['m_developer'] = developer
# In AEL 0.9.7 m_rating is stored as a string.
def get_rating(self):
return int(self.entity_data['m_rating']) if self.entity_data['m_rating'] else ''
def set_rating(self, rating):
try:
self.entity_data['m_rating'] = int(rating)
except:
self.entity_data['m_rating'] = ''
def get_plot(self):
return self.entity_data['m_plot'] if 'm_plot' in self.entity_data else ''
def set_plot(self, plot):
self.entity_data['m_plot'] = plot
#
# Used when rendering Categories/Launchers/ROMs
#
def get_trailer(self):
return self.entity_data['s_trailer'] if 's_trailer' in self.entity_data else ''
def set_trailer(self, trailer_str):
if 'http' in trailer_str:
matches = re.search(r'^.*((youtu.(be|com)\/)|(v\/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#\&\?]*).*', trailer_str, re.I)
if matches is not None:
video_id = matches.groups()[-1]
trailer_str = 'plugin://plugin.video.youtube/play/?video_id={}'.format(video_id)
self.entity_data['s_trailer'] = trailer_str
# --- Finished status stuff -------------------------------------------------------------------
def is_finished(self):
return 'finished' in self.entity_data and self.entity_data['finished']
def get_finished_str(self):
finished = self.entity_data['finished']
finished_display = 'Finished' if finished == True else 'Unfinished'
return finished_display
def change_finished_status(self):
finished = self.entity_data['finished']
finished = False if finished else True
self.entity_data['finished'] = finished
# --- Assets/artwork --------------------------------------------------------------------------
def has_asset(self, asset_info):
if not asset_info.key in self.entity_data: return False
return self.entity_data[asset_info.key] != None and self.entity_data[asset_info.key] != ''
#
# Gets the asset path (str) of the given assetinfo type.
#
def get_asset_str(self, asset_info=None, asset_id=None, fallback = ''):
if asset_info is None and asset_id is None: return None
if asset_id is not None: asset_info = g_assetFactory.get_asset_info(asset_id)
if asset_info.key in self.entity_data and self.entity_data[asset_info.key]:
return self.entity_data[asset_info.key]
return fallback
#
# Gets the asset path (str) of the mapped asset type following
# the given input of either an assetinfo object or asset id.
#
def get_mapped_asset_str(self, asset_info=None, asset_id=None, fallback = ''):
asset_info = self.get_mapped_asset_info(asset_info, asset_id)
if asset_info.key in self.entity_data and self.entity_data[asset_info.key]:
return self.entity_data[asset_info.key]
return fallback
def get_asset_FN(self, asset_info):
if not asset_info or not asset_info.key in self.entity_data :
return None
return self._get_value_as_filename(asset_info.key)
def set_asset(self, asset_info, path_FN):
path = path_FN.getPath() if path_FN else ''
self.entity_data[asset_info.key] = path
def clear_asset(self, asset_info):
self.entity_data[asset_info.key] = ''
def get_assets_path_FN(self):
return self._get_directory_filename_from_field('assets_path')
#
# Get a list of the assets that can be mapped to a defaultable asset.
# They must be images, no videos, no documents.
#
@abc.abstractmethod
def get_mappable_asset_list(self): pass
#
# Gets the actual assetinfo object that is mapped for
# the given assetinfo for this particular MetaDataItem.
#
def get_mapped_asset_info(self, asset_info=None, asset_id=None):
if asset_info is None and asset_id is None: return None
if asset_id is not None: asset_info = g_assetFactory.get_asset_info(asset_id)
mapped_key = self.get_mapped_asset_key(asset_info)
mapped_asset_info = g_assetFactory.get_asset_info_by_key(mapped_key)
return mapped_asset_info
#
# Gets the database filename mapped for asset_info.
# Note that the mapped asset uses diferent fields wheter it is a Category/Launcher/ROM
#
def get_mapped_asset_key(self, asset_info):
if asset_info.default_key is '':
log_error('Requested mapping for AssetInfo without default key. Type {}'.format(asset_info.id))
raise AddonError('Not supported asset type used. This might be a bug!')
return self.entity_data[asset_info.default_key]
def set_mapped_asset_key(self, asset_info, mapped_to_info):
self.entity_data[asset_info.default_key] = mapped_to_info.key
def __str__(self):
return '{}}#{}: {}'.format(self.get_object_name(), self.get_id(), self.get_name())
# -------------------------------------------------------------------------------------------------
# Class representing an AEL Cateogry.
# Contains code to generate the context menus passed to Dialog.select()
# -------------------------------------------------------------------------------------------------
class Category(MetaDataItemABC):
def __init__(self, PATHS, settings, category_dic, objectRepository):
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
if category_dic is None:
category_dic = fs_new_category()
category_dic['id'] = misc_generate_random_SID()
super(Category, self).__init__(PATHS, settings, category_dic, objectRepository)
def get_object_name(self): return 'Category'
def get_assets_kind(self): return KIND_ASSET_CATEGORY
def is_virtual(self): return False
def save_to_disk(self): self.objectRepository.save_category(self.entity_data)
def delete_from_disk(self):
# Object becomes invalid after deletion.
self.objectRepository.delete_category(self.entity_data)
self.entity_data = None
self.objectRepository = None
def num_launchers(self):
return self.objectRepository.num_launchers_in_cat(self.entity_data['id'])
def get_main_edit_options(self):
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['CATEGORY_STATUS'] = 'Category status: {0}'.format(self.get_finished_str())
options['EXPORT_CATEGORY_XML'] = 'Export Category XML configuration ...'
options['DELETE_CATEGORY'] = 'Delete Category'
return options
def get_metadata_edit_options(self):
# NOTE The Category NFO file logic must be moved to this class. Settings not need to
# be used as a parameter here.
NFO_FileName = fs_get_category_NFO_name(self.settings, self.entity_data)
NFO_found_str = 'NFO found' if NFO_FileName.exists() else 'NFO not found'
plot_str = text_limit_string(self.get_plot(), PLOT_STR_MAXSIZE)
options = collections.OrderedDict()
options['EDIT_METADATA_TITLE'] = "Edit Title: '{0}'".format(self.get_name())
options['EDIT_METADATA_RELEASEYEAR'] = "Edit Release Year: '{0}'".format(self.get_releaseyear())
options['EDIT_METADATA_GENRE'] = "Edit Genre: '{0}'".format(self.get_genre())
options['EDIT_METADATA_DEVELOPER'] = "Edit Developer: '{0}'".format(self.get_developer())
options['EDIT_METADATA_RATING'] = "Edit Rating: '{0}'".format(self.get_rating())
options['EDIT_METADATA_PLOT'] = "Edit Plot: '{0}'".format(plot_str)
options['IMPORT_NFO_FILE'] = 'Import NFO file (default, {0})'.format(NFO_found_str)
options['IMPORT_NFO_FILE_BROWSE'] = 'Import NFO file (browse NFO file) ...'
options['SAVE_NFO_FILE'] = 'Save NFO file (default location)'
return options
#
# Returns an ordered dictionary with all the object assets, ready to be edited.
# Keys are AssetInfo objects.
# Values are the current file for the asset as Unicode string or '' if the asset is not set.
#
def get_assets_odict(self):
asset_info_list = g_assetFactory.get_asset_list_by_IDs(CATEGORY_ASSET_ID_LIST)
asset_odict = collections.OrderedDict()
for asset_info in asset_info_list:
asset_fname_str = self.entity_data[asset_info.key] if self.entity_data[asset_info.key] else ''
asset_odict[asset_info] = asset_fname_str
return asset_odict
#
# Get a list of the assets that can be mapped to a defaultable asset.
# They must be images, no videos, no documents.
#
def get_mappable_asset_list(self):
return g_assetFactory.get_asset_list_by_IDs(COLLECTION_ASSET_ID_LIST, 'image')
def __str__(self):
return super().__str__()
# -------------------------------------------------------------------------------------------------
# Class representing the virtual categories in AEL.
# All ROM Collections is a Virtual Category.
# ...
# -------------------------------------------------------------------------------------------------
class VirtualCategory(MetaDataItemABC):
#
# obj_dic is mandatory in Virtual Categories an must have the following fields:
# 1) id
# 2) type
# 3) m_name
#
def __init__(self, PATHS, settings, obj_dic, objectRepository):
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
# This object is special, obj_dic must be not None and have certain fields.
entity_data = fs_new_category()
entity_data['id'] = obj_dic['id']
entity_data['type'] = obj_dic['type']
entity_data['m_name'] = obj_dic['m_name']
super(VirtualCategory, self).__init__(PATHS, settings, entity_data, objectRepository)
def get_object_name(self): return 'Virtual Category'
def get_assets_kind(self): return KIND_ASSET_CATEGORY
def is_virtual(self): return True
def save_to_disk(self): pass
def delete_from_disk(self): pass
# -------------------------------------------------------------------------------------------------
# Class representing a ROM file you can play through AEL.
# -------------------------------------------------------------------------------------------------
class ROM(MetaDataItemABC):
def __init__(self, rom_data = None, launcher = None):
if rom_data is None:
rom_data = fs_new_rom()
rom_data['id'] = misc_generate_random_SID()
rom_data['type'] = OBJ_ROM
# back/parent reference
self.launcher = launcher
super(ROM, self).__init__(None, None, rom_data, None)
def get_launcher(self):
return self.launcher
# is this virtual only? Should we make a VirtualRom(Rom)?
def get_launcher_id(self):
return self.entity_data['launcherID']
def is_virtual_rom(self):
return 'launcherID' in self.entity_data
def get_platform(self):
if self.is_virtual_rom():
return self.entity_data['platform']
return self.launcher.get_platform()
def get_nointro_status(self):
return self.entity_data['nointro_status']
def get_pclone_status(self):
return self.entity_data['pclone_status'] if 'pclone_status' in self.entity_data else ''
def get_clone(self):
return self.entity_data['cloneof']
def has_alternative_application(self):
return 'altapp' in self.entity_data and self.entity_data['altapp']
def get_alternative_application(self):
return self.entity_data['altapp']
def has_alternative_arguments(self):
return 'altarg' in self.entity_data and self.entity_data['altarg']
def get_alternative_arguments(self):
return self.entity_data['altarg']
def get_filename(self):
return self.entity_data['filename']
def get_file(self):
return self._get_value_as_filename('filename')
def has_multiple_disks(self):
return 'disks' in self.entity_data and self.entity_data['disks']
def get_disks(self):
if not self.has_multiple_disks():
return []
return self.entity_data['disks']
def get_extra_ROM(self):
return self.entity_data['i_extra_ROM']
def set_as_extra_ROM(self):
self.entity_data['i_extra_ROM'] = True
def get_nfo_file(self):
ROMFileName = self.get_file()
nfo_file_path = ROMFileName.changeExtension('.nfo')
return nfo_file_path
def get_number_of_players(self):
return self.entity_data['m_nplayers']
def get_esrb_rating(self):
return self.entity_data['m_esrb']
def get_favourite_status(self):
return self.entity_data['fav_status'] if 'fav_status' in self.entity_data else None
def get_launch_count(self):
return self.entity_data['launch_count']
def set_file(self, file):
self.entity_data['filename'] = file.getPath()
def add_disk(self, disk):
self.entity_data['disks'].append(disk)
def set_number_of_players(self, amount):
self.entity_data['m_nplayers'] = amount
def set_esrb_rating(self, esrb):
self.entity_data['m_esrb'] = esrb
def set_nointro_status(self, status):
self.entity_data['nointro_status'] = status
def set_pclone_status(self, status):
self.entity_data['pclone_status'] = status
def set_clone(self, clone):
self.entity_data['cloneof'] = clone
# todo: definitly something for a inherited FavouriteRom class
# >> Favourite ROM unique fields
# >> Favourite ROMs in "Most played ROMs" DB also have 'launch_count' field.
def set_favourite_status(self, state):
self.entity_data['fav_status'] = state
def increase_launch_count(self):
launch_count = self.entity_data['launch_count'] if 'launch_count' in self.entity_data else 0
launch_count += 1
self.entity_data['launch_count'] = launch_count
def set_alternative_application(self, application):
self.entity_data['altapp'] = application
def set_alternative_arguments(self, arg):
self.entity_data['altarg'] = arg
def get_box_sizing(self):
if 'box_size' in self.entity_data: return self.entity_data['box_size']
# fallback to launcher size
if self.launcher: return self.launcher.get_box_sizing()
return BOX_SIZE_POSTER
def set_box_sizing(self, box_size):
self.entity_data['box_size'] = box_size
def copy(self):
data = self.copy_of_data_dic()
return ROM(data, self.launcher)
def copy_as_virtual_ROM(self):
data = self.copy_of_data_dic()
data['launcherID'] = self.launcher.get_id()
data['platform'] = self.get_platform()
return ROM(data, None)
# -------------------------------------------------------------------------------------------------
# Favourite ROM creation/management
# -------------------------------------------------------------------------------------------------
#
# Creates a new Favourite ROM dictionary from parent ROM and Launcher.
#
# No-Intro Missing ROMs are not allowed in Favourites or Virtual Launchers.
# fav_status = ['OK', 'Unlinked ROM', 'Unlinked Launcher', 'Broken'] default 'OK'
# 'OK' ROM filename exists and launcher exists and ROM id exists
# 'Unlinked ROM' ROM filename exists but ROM ID in launcher does not
# 'Unlinked Launcher' ROM filename exists but Launcher ID not found
# Note that if the launcher does not exists implies ROM ID does not exist.
# If launcher doesn't exist ROM JSON cannot be loaded.
# 'Broken' ROM filename does not exist. ROM is unplayable
#
def copy_as_favourite_ROM(self):
# >> Copy original rom
# todo: Should we make a FavouriteRom class inheriting Rom?
favourite_data = self.copy_of_data_dic()
favourite_data['launcherID'] = self.launcher.get_id()
favourite_data['platform'] = self.get_platform()
favourite = ROM(favourite_data, None)
# Delete nointro_status field from ROM. Make sure this is done in the copy to be
# returned to avoid chaning the function parameters (dictionaries are mutable!)
# See http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary
# NOTE keep it!
# del favourite_data['nointro_status']
# >> Favourite ROM unique fields
# >> Favourite ROMs in "Most played ROMs" DB also have 'launch_count' field.
favourite.set_favourite_status('OK')
# >> Copy parent launcher fields into Favourite ROM
#favourite.set_custom_attribute('launcherID', self.get_id())
#favourite.set_custom_attribute('platform', self.get_platform())
#favourite.set_custom_attribute('application', self.get_custom_attribute('application'))
#favourite.set_custom_attribute('args', self.get_custom_attribute('args'))
#favourite.set_custom_attribute('args_extra', self.get_custom_attribute('args_extra'))
#favourite.set_custom_attribute('rompath', self.get_rom_path().getPath())
#favourite.set_custom_attribute('romext', self.get_custom_attribute('romext'))
#favourite.set_custom_attribute('toggle_window', self.is_in_windowed_mode())
#favourite.set_custom_attribute('non_blocking', self.is_non_blocking())
#favourite.set_custom_attribute('roms_default_icon', self.get_custom_attribute('roms_default_icon'))
#favourite.set_custom_attribute('roms_default_fanart', self.get_custom_attribute('roms_default_fanart'))
#favourite.set_custom_attribute('roms_default_banner', self.get_custom_attribute('roms_default_banner'))
#favourite.set_custom_attribute('roms_default_poster', self.get_custom_attribute('roms_default_poster'))
#favourite.set_custom_attribute('roms_default_clearlogo',self.get_custom_attribute('roms_default_clearlogo'))
return favourite
def delete_from_disk(self):
if self.launcher is None:
raise AddonError('Launcher not set for ROM')
self.launcher.delete_ROM(self)
def get_assets_kind(self): return KIND_ASSET_ROM
def get_object_name(self):
return "ROM"
def save_to_disk(self):
if self.launcher is None:
raise AddonError('Launcher not set for ROM')
self.launcher.save_ROM(self)
# ---------------------------------------------------------------------------------------------
# ROM asset methods
# ---------------------------------------------------------------------------------------------
#
# Returns an ordered dictionary with all the object assets, ready to be edited.
# Keys are AssetInfo objects.
# Values are the current file for the asset as Unicode string or '' if the asset is not set.
#
def get_assets_odict(self):
asset_info_list = g_assetFactory.get_asset_list_by_IDs(ROM_ASSET_ID_LIST)
asset_odict = collections.OrderedDict()
for asset_info in asset_info_list:
asset_odict[asset_info] = self.get_asset_str(asset_info)
return asset_odict
def get_assets_path_FN(self):
if not self.launcher:
return None
return self.launcher.get_assets_path_FN()
#
# Get a list of the assets that can be mapped to a defaultable asset.
# They must be images, no videos, no documents.
#
def get_mappable_asset_list(self):
return g_assetFactory.get_asset_list_by_IDs(ROM_ASSET_ID_LIST, 'image')
def get_edit_options(self, category_id):
delete_rom_txt = 'Delete ROM'
if category_id == VCATEGORY_FAVOURITES_ID:
delete_rom_txt = 'Delete Favourite ROM'
if category_id == VCATEGORY_COLLECTIONS_ID:
delete_rom_txt = 'Delete Collection ROM'
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['ROM_STATUS'] = 'Status: {0}'.format(self.get_finished_str()).encode('utf-8')
options['ADVANCED_MODS'] = 'Advanced Modifications ...'
options['DELETE_ROM'] = delete_rom_txt
if category_id == VCATEGORY_FAVOURITES_ID:
options['MANAGE_FAV_ROM'] = 'Manage Favourite ROM object ...'
elif category_id == VCATEGORY_COLLECTIONS_ID:
options['MANAGE_COL_ROM'] = 'Manage Collection ROM object ...'
options['MANAGE_COL_ROM_POS'] = 'Manage Collection ROM position ...'
return options
# >> Metadata edit dialog
def get_metadata_edit_options(self):
NFO_FileName = fs_get_ROM_NFO_name(self.get_data_dic())
NFO_found_str = 'NFO found' if NFO_FileName.exists() else 'NFO not found'
plot_str = text_limit_string(self.entity_data['m_plot'], PLOT_STR_MAXSIZE)
rating = self.get_rating()
if rating == -1:
rating = 'not rated'
options = collections.OrderedDict()
options['EDIT_METADATA_TITLE'] = u"Edit Title: '{0}'".format(self.get_name()).encode('utf-8')
options['EDIT_METADATA_RELEASEYEAR'] = u"Edit Release Year: '{0}'".format(self.get_releaseyear()).encode('utf-8')
options['EDIT_METADATA_GENRE'] = u"Edit Genre: '{0}'".format(self.get_genre()).encode('utf-8')
options['EDIT_METADATA_DEVELOPER'] = u"Edit Developer: '{0}'".format(self.get_developer()).encode('utf-8')
options['EDIT_METADATA_NPLAYERS'] = u"Edit NPlayers: '{0}'".format(self.get_number_of_players()).encode('utf-8')
options['EDIT_METADATA_ESRB'] = u"Edit ESRB rating: '{0}'".format(self.get_esrb_rating()).encode('utf-8')
options['EDIT_METADATA_RATING'] = u"Edit Rating: '{}'".format(rating).encode('utf-8')
options['EDIT_METADATA_PLOT'] = u"Edit Plot: '{}'".format(plot_str).encode('utf-8')
options['EDIT_METADATA_BOXSIZE'] = u"Edit Box Size: '{}'".format(self.get_box_sizing())
options['LOAD_PLOT'] = "Load Plot from TXT file ..."
options['IMPORT_NFO_FILE'] = u"Import NFO file (default, {})".format(NFO_found_str).encode('utf-8')
options['SAVE_NFO_FILE'] = "Save NFO file (default location)"
options['SCRAPE_ROM_METADATA'] = "Scrape Metadata"
return options
#
# Returns a dictionary of options to choose from
# with which you can do advanced modifications on this specific rom.
#
def get_advanced_modification_options(self):
log_debug('ROM::get_advanced_modification_options() Returning edit options')
log_debug('ROM::get_advanced_modification_options() Returning edit options')
options = collections.OrderedDict()
options['CHANGE_ROM_FILE'] = "Change ROM file: '{0}'".format(self.get_filename())
options['CHANGE_ALT_APPLICATION'] = "Alternative application: '{0}'".format(self.get_alternative_application())
options['CHANGE_ALT_ARGUMENTS'] = "Alternative arguments: '{0}'".format(self.get_alternative_arguments())
return options
#
# Reads an NFO file with ROM information.
# See comments in fs_export_ROM_NFO() about verbosity.
# About reading files in Unicode http://stackoverflow.com/questions/147741/character-reading-from-file-in-python
#
# todo: Replace with nfo_file_path.readXml() and just use XPath
def update_with_nfo_file(self, nfo_file_path, verbose = True):
log_debug('Rom.update_with_nfo_file() Loading "{0}"'.format(nfo_file_path.getPath()))
if not nfo_file_path.exists():
if verbose:
kodi_notify_warn('NFO file not found {0}'.format(nfo_file_path.getPath()))
log_debug("Rom.update_with_nfo_file() NFO file not found '{0}'".format(nfo_file_path.getPath()))
return False
# todo: Replace with nfo_file_path.readXml() and just use XPath
# --- Import data ---
# >> Read file, put in a string and remove line endings.
# >> We assume NFO files are UTF-8. Decode data to Unicode.
# file = open(nfo_file_path, 'rt')
nfo_str = nfo_file_path.loadFileToStr()
nfo_str = nfo_str.replace('\r', '').replace('\n', '')
# Search for metadata tags. Regular expression is non-greedy.
# See https://docs.python.org/2/library/re.html#re.findall
# If RE has no groups it returns a list of strings with the matches.
# If RE has groups then it returns a list of groups.
item_title = re.findall('<title>(.*?)</title>', nfo_str)
item_year = re.findall('<year>(.*?)</year>', nfo_str)
item_genre = re.findall('<genre>(.*?)</genre>', nfo_str)
item_developer = re.findall('<developer>(.*?)</developer>', nfo_str)
item_nplayers = re.findall('<nplayers>(.*?)</nplayers>', nfo_str)
item_esrb = re.findall('<esrb>(.*?)</esrb>', nfo_str)
item_rating = re.findall('<rating>(.*?)</rating>', nfo_str)
item_plot = re.findall('<plot>(.*?)</plot>', nfo_str)
item_trailer = re.findall('<trailer>(.*?)</trailer>', nfo_str)
# >> Future work: ESRB and maybe nplayer fields must be sanitized.
if len(item_title) > 0: self.set_name(text_unescape_XML(item_title[0]))
if len(item_year) > 0: self.set_releaseyear(text_unescape_XML(item_year[0]))
if len(item_genre) > 0: self.set_genre(text_unescape_XML(item_genre[0]))
if len(item_developer) > 0: self.set_developer(text_unescape_XML(item_developer[0]))
if len(item_rating) > 0: self.set_rating(text_unescape_XML(item_rating[0]))
if len(item_plot) > 0: self.set_plot(text_unescape_XML(item_plot[0]))
if len(item_nplayers) > 0: self.set_number_of_players(text_unescape_XML(item_nplayers[0]))
if len(item_esrb) > 0: self.set_esrb_rating(text_unescape_XML(item_esrb[0]))
if len(item_trailer) > 0: self.set_trailer(text_unescape_XML(item_trailer[0]))
if verbose:
kodi_notify('Imported {0}'.format(nfo_file_path.getPath()))
return True
def __str__(self):
"""Overrides the default implementation"""
return json.dumps(self.entity_data)
# -------------------------------------------------------------------------------------------------
# Abstract base class for launching anything that is supported.
# Implement classes that inherit this base class to support new ways of launching.
# -------------------------------------------------------------------------------------------------
class LauncherABC(MetaDataItemABC):
__metaclass__ = abc.ABCMeta
#
# In an abstract class launcher_data is mandatory.
#
def __init__(self, PATHS, settings, launcher_data, objectRepository, executorFactory):
self.executorFactory = executorFactory
self.application = None
self.arguments = None
self.title = None
super(LauncherABC, self).__init__(PATHS, settings, launcher_data, objectRepository)
# --------------------------------------------------------------------------------------------
# Core methods
# --------------------------------------------------------------------------------------------
@abc.abstractmethod
def get_launcher_type(self): pass
# By default Launchers do not support ROMs. Redefine in child class if Launcher has ROMs.
def supports_launching_roms(self): return False
# By default Launchers do not PClone ROMs. Redefine in child class if necessary.
def supports_parent_clone_roms(self): return False
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Builds a new Launcher.
# Leave category_id empty to add launcher to root folder.
# Returns True if Launcher was sucesfully built.
# Returns False if Launcher was not built (user canceled the dialogs or some other
# error happened).
#
def build(self, category):
log_debug('LauncherABC::build() Starting ...')
# --- Call hook before wizard ---
if not self._build_pre_wizard_hook(): return False
# --- Launcher build code (ask user about launcher stuff) ---
wizard = WizardDialog_Dummy(None, 'categoryID', category.get_id())
wizard = WizardDialog_Dummy(wizard, 'type', self.get_launcher_type())
# >> Call Child class wizard builder method
wizard = self._builder_get_wizard(wizard)
# >> Run wizard
self.entity_data = wizard.runWizard(self.entity_data)
if not self.entity_data: return False
self.entity_data['timestamp_launcher'] = time.time()
# --- Call hook after wizard ---
if not self._build_post_wizard_hook(): return False
return True
#
# Creates a new launcher using a wizard of dialogs.
# Child concrete classes must implement this method.
#
@abc.abstractmethod
def _builder_get_wizard(self, wizard): pass
@abc.abstractmethod
def _build_pre_wizard_hook(self): pass
@abc.abstractmethod
def _build_post_wizard_hook(self): pass
def _builder_get_title_from_app_path(self, input, item_key, launcher):
if input: return input
appPath = FileName(launcher['application'])
title = appPath.getBaseNoExt()
title_formatted = title.replace('.' + title.split('.')[-1], '').replace('.', ' ')
return title_formatted
def _builder_get_appbrowser_filter(self, item_key, launcher):
if item_key in launcher:
application = launcher[item_key]
if application == 'JAVA':
return '.jar'
return '.bat|.exe|.cmd|.lnk' if is_windows() else ''
#
# Wizard helper, when a user wants to set a custom value instead of the predefined list items.
#
def _builder_user_selected_custom_browsing(self, item_key, launcher):
return launcher[item_key] == 'BROWSE'
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
#
# Returns a dictionary of options to choose from with which you can edit or manage this
# specific launcher in the "Edit Launcher" context menu.
# Different launchers have a different may menu, hence this method is abstract.
#
@abc.abstractmethod
def get_main_edit_options(self):
pass
#
# Returns a dictionary of options to choose from with which you can edit the metadata
# of a launcher.
# All launchers have the same metadata so method is defined here.
#
def get_metadata_edit_options(self):
log_debug('LauncherABC::get_metadata_edit_options() Starting ...')
plot_str = text_limit_string(self.entity_data['m_plot'], PLOT_STR_MAXSIZE)
rating = self.get_rating() if self.get_rating() != -1 else 'not rated'
NFO_FileName = fs_get_launcher_NFO_name(self.settings, self.entity_data)
NFO_found_str = 'NFO found' if NFO_FileName.exists() else 'NFO not found'
options = collections.OrderedDict()
options['EDIT_METADATA_TITLE'] = "Edit Title: '{}'".format(self.get_name())
options['EDIT_METADATA_PLATFORM'] = "Edit Platform: {}".format(self.entity_data['platform'])
options['EDIT_METADATA_RELEASEYEAR'] = "Edit Release Year: '{}'".format(self.entity_data['m_year'])
options['EDIT_METADATA_GENRE'] = "Edit Genre: '{}'".format(self.entity_data['m_genre'])
options['EDIT_METADATA_DEVELOPER'] = "Edit Developer: '{}'".format(self.entity_data['m_developer'])
options['EDIT_METADATA_RATING'] = "Edit Rating: '{}'".format(rating)
options['EDIT_METADATA_PLOT'] = "Edit Plot: '{}'".format(plot_str)
options['EDIT_METADATA_BOXSIZE'] = "Edit Box Size: '{}'".format(self.get_box_sizing())
options['IMPORT_NFO_FILE'] = 'Import NFO file (default {})'.format(NFO_found_str)
options['IMPORT_NFO_FILE_BROWSE'] = 'Import NFO file (browse NFO file) ...'
options['SAVE_NFO_FILE'] = 'Save NFO file (default location)'
return options
#
# get_advanced_modification_options() is custom for every concrete launcher class.
#
@abc.abstractmethod
def get_advanced_modification_options(self): pass
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
#
# Launchs a ROM launcher or standalone launcher
# For standalone launchers romext is the extension of the application (only used in Windoze)
#
@abc.abstractmethod
def launch(self):
log_debug('LauncherABC::launch() Starting ...')
# --- Create executor object ---
if self.executorFactory is None:
log_error('LauncherABC::launch() self.executorFactory is None')
log_error('Cannot create an executor for {}'.format(self.application.getPath()))
kodi_notify_error('LauncherABC::launch() self.executorFactory is None'
'This is a bug, please report it.')
return
executor = self.executorFactory.create(self.application)
if executor is None:
log_error('Cannot create an executor for {}'.format(self.application.getPath()))
kodi_notify_error('Cannot execute application')
return
log_debug('Name = "{}"'.format(self.title))
log_debug('Application = "{}"'.format(self.application.getPath()))
log_debug('Arguments = "{}"'.format(self.arguments))
log_debug('Executor = "{}"'.format(executor.__class__.__name__))
# --- Execute app ---
self._launch_pre_exec(self.title, self.is_in_windowed_mode())
executor.execute(self.application, self.arguments, self.is_non_blocking())
self._launch_post_exec(self.is_in_windowed_mode())
#
# These two functions do things like stopping music before lunch, toggling full screen, etc.
# Variables set in this function:
# self.kodi_was_playing True if Kodi player was ON, False otherwise
# self.kodi_audio_suspended True if Kodi audio suspended before launching
#
def _launch_pre_exec(self, title, toggle_screen_flag):
log_debug('LauncherABC::_launch_pre_exec() Starting ...')
# --- User notification ---
if self.settings['display_launcher_notify']:
kodi_notify('Launching {}'.format(title))
# --- Stop/Pause Kodi mediaplayer if requested in settings ---
self.kodi_was_playing = False
# id="media_state_action" default="0" values="Stop|Pause|Let Play"
media_state_action = self.settings['media_state_action']
media_state_str = ['Stop', 'Pause', 'Let Play'][media_state_action]
log_verb('_launch_pre_exec() media_state_action is "{}" ({})'.format(media_state_str, media_state_action))
if media_state_action == 0 and xbmc.Player().isPlaying():
log_verb('_launch_pre_exec() Calling xbmc.Player().stop()')
xbmc.Player().stop()
xbmc.sleep(100)
self.kodi_was_playing = True
elif media_state_action == 1 and xbmc.Player().isPlaying():
log_verb('_launch_pre_exec() Calling xbmc.Player().pause()')
xbmc.Player().pause()
xbmc.sleep(100)
self.kodi_was_playing = True
# --- Force audio suspend if requested in "Settings" --> "Advanced"
# >> See http://forum.kodi.tv/showthread.php?tid=164522
self.kodi_audio_suspended = False
if self.settings['suspend_audio_engine']:
log_verb('_launch_pre_exec() Suspending Kodi audio engine')
xbmc.audioSuspend()
xbmc.enableNavSounds(False)
xbmc.sleep(100)
self.kodi_audio_suspended = True
else:
log_verb('_launch_pre_exec() DO NOT suspend Kodi audio engine')
# --- Force joystick suspend if requested in "Settings" --> "Advanced"
# >> See https://forum.kodi.tv/showthread.php?tid=287826&pid=2627128#pid2627128
# >> See https://forum.kodi.tv/showthread.php?tid=157499&pid=1722549&highlight=input.enablejoystick#pid1722549
# >> See https://forum.kodi.tv/showthread.php?tid=313615
self.kodi_joystick_suspended = False
# if self.settings['suspend_joystick_engine']:
# log_verb('_launch_pre_exec() Suspending Kodi joystick engine')
# >> Research. Get the value of the setting first
# >> Apparently input.enablejoystick is not supported on Kodi Krypton anymore.
# c_str = ('{"id" : 1, "jsonrpc" : "2.0",'
# ' "method" : "Settings.GetSettingValue",'
# ' "params" : {"setting":"input.enablejoystick"}}')
# response = xbmc.executeJSONRPC(c_str)
# log_debug('JSON ''{0}'''.format(c_str))
# log_debug('Response ''{0}'''.format(response))
# c_str = ('{"id" : 1, "jsonrpc" : "2.0",'
# ' "method" : "Settings.SetSettingValue",'
# ' "params" : {"setting" : "input.enablejoystick", "value" : false} }')
# response = xbmc.executeJSONRPC(c_str)
# log_debug('JSON ''{0}'''.format(c_str))
# log_debug('Response ''{0}'''.format(response))
# self.kodi_joystick_suspended = True
# log_error('_launch_pre_exec() Suspending Kodi joystick engine not supported on Kodi Krypton!')
# else:
# log_verb('_launch_pre_exec() DO NOT suspend Kodi joystick engine')
# --- Toggle Kodi windowed/fullscreen if requested ---
if toggle_screen_flag:
log_verb('_launch_pre_exec() Toggling Kodi fullscreen')
kodi_toggle_fullscreen()
else:
log_verb('_launch_pre_exec() Toggling Kodi fullscreen DEACTIVATED in Launcher')
# Disable screensaver
if self.settings['suspend_screensaver']:
kodi_disable_screensaver()
else:
screensaver_mode = kodi_get_screensaver_mode()
log_debug('_run_before_execution() Screensaver status "{}"'.format(screensaver_mode))
# --- Pause Kodi execution some time ---
delay_tempo_ms = self.settings['delay_tempo']
log_verb('_launch_pre_exec() Pausing {} ms'.format(delay_tempo_ms))
xbmc.sleep(delay_tempo_ms)
log_debug('LauncherABC::_launch_pre_exec() function ENDS')
def _launch_post_exec(self, toggle_screen_flag):
log_debug('LauncherABC::_launch_post_exec() Starting ...')
# --- Stop Kodi some time ---
delay_tempo_ms = self.settings['delay_tempo']
log_verb('_launch_post_exec() Pausing {} ms'.format(delay_tempo_ms))
xbmc.sleep(delay_tempo_ms)
# --- Toggle Kodi windowed/fullscreen if requested ---
if toggle_screen_flag:
log_verb('_launch_post_exec() Toggling Kodi fullscreen')
kodi_toggle_fullscreen()
else:
log_verb('_launch_post_exec() Toggling Kodi fullscreen DEACTIVATED in Launcher')
# --- Resume audio engine if it was suspended ---
# Calling xmbc.audioResume() takes a loong time (2/4 secs) if audio was not properly suspended!
# Also produces this in Kodi's log:
# WARNING: CActiveAE::StateMachine - signal: 0 from port: OutputControlPort not handled for state: 7
# ERROR: ActiveAE::Resume - failed to init
if self.kodi_audio_suspended:
log_verb('_launch_post_exec() Kodi audio engine was suspended before launching')
log_verb('_launch_post_exec() Resuming Kodi audio engine')
xbmc.audioResume()
xbmc.enableNavSounds(True)
xbmc.sleep(100)
else:
log_verb('_launch_post_exec() DO NOT resume Kodi audio engine')
# --- Resume joystick engine if it was suspended ---
if self.kodi_joystick_suspended:
log_verb('_launch_post_exec() Kodi joystick engine was suspended before launching')
log_verb('_launch_post_exec() Resuming Kodi joystick engine')
# response = xbmc.executeJSONRPC(c_str)
# log_debug('JSON ''{0}'''.format(c_str))
# log_debug('Response ''{0}'''.format(response))
log_verb('_launch_post_exec() Not supported on Kodi Krypton!')
else:
log_verb('_launch_post_exec() DO NOT resume Kodi joystick engine')
# Restore screensaver status.
if self.settings['suspend_screensaver']:
kodi_restore_screensaver()
else:
screensaver_mode = kodi_get_screensaver_mode()
log_debug('_run_after_execution() Screensaver status "{}"'.format(screensaver_mode))
# --- Resume Kodi playing if it was paused. If it was stopped, keep it stopped. ---
media_state_action = self.settings['media_state_action']
media_state_str = ['Stop', 'Pause', 'Let Play'][media_state_action]
log_verb('_launch_post_exec() media_state_action is "{}" ({})'.format(media_state_str, media_state_action))
log_verb('_launch_post_exec() self.kodi_was_playing is {}'.format(self.kodi_was_playing))
if self.kodi_was_playing and media_state_action == 1:
log_verb('_launch_post_exec() Calling xbmc.Player().play()')
xbmc.Player().play()
log_debug('LauncherABC::_launch_post_exec() function ENDS')
# ---------------------------------------------------------------------------------------------
# Launcher metadata and flags related methods
# ---------------------------------------------------------------------------------------------
def get_platform(self): return self.entity_data['platform']
def set_platform(self, platform): self.entity_data['platform'] = platform
def get_category_id(self): return self.entity_data['categoryID'] if 'categoryID' in self.entity_data else None
def update_category(self, category_id): self.entity_data['categoryID'] = category_id
def is_in_windowed_mode(self): return self.entity_data['toggle_window']
def set_windowed_mode(self, windowed_mode):
self.entity_data['toggle_window'] = windowed_mode
return self.is_in_windowed_mode()
def is_non_blocking(self):
return 'non_blocking' in self.entity_data and self.entity_data['non_blocking']
def set_non_blocking(self, is_non_blocking):
self.entity_data['non_blocking'] = is_non_blocking
return self.is_non_blocking()
# Change the application this launcher uses. Override if application is changeable.
def change_application(self): return False
def get_timestamp(self):
timestamp = self.entity_data['timestamp_launcher']
if timestamp is None or timestamp == '':
return float(0)
return float(timestamp)
def update_timestamp(self): self.entity_data['timestamp_launcher'] = time.time()
def get_report_timestamp(self):
timestamp = self.entity_data['timestamp_report']
if timestamp is None or timestamp == '':
return float(0)
return float(timestamp)
def update_report_timestamp(self): self.entity_data['timestamp_report'] = time.time()
def get_box_sizing(self):
return self.entity_data['box_size'] if 'box_size' in self.entity_data else BOX_SIZE_POSTER
def set_box_sizing(self, box_size):
self.entity_data['box_size'] = box_size
# ---------------------------------------------------------------------------------------------
# Launcher asset methods
# ---------------------------------------------------------------------------------------------
#
# Returns an ordered dictionary with all the object assets, ready to be edited.
# Keys are AssetInfo objects.
# Values are the current file for the asset as Unicode string or '' if the asset is not set.
#
def get_assets_odict(self):
asset_info_list = g_assetFactory.get_asset_list_by_IDs(LAUNCHER_ASSET_ID_LIST)
asset_odict = collections.OrderedDict()
for asset_info in asset_info_list:
asset_fname_str = self.entity_data[asset_info.key] if self.entity_data[asset_info.key] else ''
asset_odict[asset_info] = asset_fname_str
return asset_odict
#
# Get a dictionary of ROM assets with enabled status as a boolean.
#
# Returns dict:
# asset_status_dict Dict of AssetInfo object as key and enabled boolean as value
#
def get_ROM_assets_enabled_statusses(self, asset_ids_to_check = ROM_ASSET_ID_LIST):
asset_status_dict = collections.OrderedDict()
asset_info_list = g_assetFactory.get_asset_list_by_IDs(asset_ids_to_check)
# >> Check if asset paths are configured or not
for asset in asset_info_list:
enabled = True if asset.path_key in self.entity_data and self.entity_data[asset.path_key] else False
asset_status_dict[asset] = enabled
if not enabled:
log_verb('get_ROM_assets_enabled_statusses() {0:<9} path unconfigured'.format(asset.name))
else:
log_debug('get_ROM_assets_enabled_statusses() {0:<9} path configured'.format(asset.name))
return asset_status_dict
#
# Get a list of the assets that can be mapped to a defaultable asset.
# They must be images, no videos, no documents.
# The defaultable assets are always the same: icon, fanart, banner, poster, clearlogo.
#
def get_mappable_asset_list(self):
return g_assetFactory.get_asset_list_by_IDs(LAUNCHER_ASSET_ID_LIST, 'image')
# ---------------------------------------------------------------------------------------------
# NFO files for metadata
# ---------------------------------------------------------------------------------------------
#
# Python data model: lists and dictionaries are mutable. It means the can be changed if passed as
# parameters of functions. However, items can not be replaced by new objects!
# Notably, numbers, strings and tuples are immutable. Dictionaries and lists are mutable.
#
# See http://stackoverflow.com/questions/986006/how-do-i-pass-a-variable-by-reference
# See https://docs.python.org/2/reference/datamodel.html
#
# Function asumes that the NFO file already exists.
#
def import_nfo_file(self, nfo_file_path):
# --- Get NFO file name ---
log_debug('launcher.import_nfo_file() Importing launcher NFO "{0}"'.format(nfo_file_path.getPath()))
# --- Import data ---
if nfo_file_path.exists():
# >> Read NFO file data
try:
item_nfo = nfo_file_path.loadFileToStr()
except AddonException as e:
kodi_notify_warn('Exception reading NFO file {0}'.format(nfo_file_path.getPath()))
log_error("launcher.import_nfo_file() Exception reading NFO file '{0}': {1}".format(nfo_file_path.getPath(), str(e)))
return False
except:
kodi_notify_warn('Exception reading NFO file {0}'.format(nfo_file_path.getPath()))
log_error("launcher.import_nfo_file() Exception reading NFO file '{0}'".format(nfo_file_path.getPath()))
return False
item_nfo = item_nfo.replace('\r', '').replace('\n', '')
else:
kodi_notify_warn('NFO file not found {0}'.format(nfo_file_path.getBase()))
log_info("launcher.import_nfo_file() NFO file not found '{0}'".format(nfo_file_path.getPath()))
return False
# Find data
item_year = re.findall('<year>(.*?)</year>', item_nfo)
item_genre = re.findall('<genre>(.*?)</genre>', item_nfo)
item_developer = re.findall('<developer>(.*?)</developer>', item_nfo)
item_rating = re.findall('<rating>(.*?)</rating>', item_nfo)
item_plot = re.findall('<plot>(.*?)</plot>', item_nfo)
# >> Careful about object mutability! This should modify the dictionary
# >> passed as argument outside this function.
if len(item_year) > 0: self.set_releaseyear(text_unescape_XML(item_year[0]))
if len(item_genre) > 0: self.set_genre(text_unescape_XML(item_genre[0]))
if len(item_developer) > 0: self.set_developer(text_unescape_XML(item_developer[0]))
if len(item_rating) > 0: self.set_rating(text_unescape_XML(item_rating[0]))
if len(item_plot) > 0: self.set_plot(text_unescape_XML(item_plot[0]))
log_verb("import_nfo_file() Imported '{0}'".format(nfo_file_path.getPath()))
return True
#
# Standalone launchers:
# NFO files are stored in self.settings["launchers_nfo_dir"] if not empty.
# If empty, it defaults to DEFAULT_LAUN_NFO_DIR.
#
# ROM launchers:
# Same as standalone launchers.
#
def export_nfo_file(self, nfo_FileName):
# --- Get NFO file name ---
log_debug('export_nfo_file() Exporting launcher NFO "{0}"'.format(nfo_FileName.getPath()))
# If NFO file does not exist then create them. If it exists, overwrite.
nfo_content = []
nfo_content.append('<?xml version="1.0" encoding="utf-8" standalone="yes"?>\n')
nfo_content.append('<!-- Exported by AEL on {0} -->\n'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
nfo_content.append('<launcher>\n')
nfo_content.append(XML_text('year', self.get_releaseyear()))
nfo_content.append(XML_text('genre', self.get_genre()))
nfo_content.append(XML_text('developer', self.get_developer()))
nfo_content.append(XML_text('rating', self.get_rating()))
nfo_content.append(XML_text('plot', self.get_plot()))
nfo_content.append('</launcher>\n')
full_string = ''.join(nfo_content).encode('utf-8')
try:
nfo_FileName.writeAll(full_string)
except:
kodi_notify_warn('Exception writing NFO file {0}'.format(nfo_FileName.getPath()))
log_error("export_nfo_file() Exception writing'{0}'".format(nfo_FileName.getPath()))
return False
log_debug("export_nfo_file() Created '{0}'".format(nfo_FileName.getPath()))
return True
def export_configuration(self, path_to_export, category):
launcher_fn_str = 'Launcher_' + text_title_to_filename_str(self.get_name()) + '.xml'
log_debug('launcher.export_configuration() Exporting Launcher configuration')
log_debug('launcher.export_configuration() Name "{0}"'.format(self.get_name()))
log_debug('launcher.export_configuration() ID {0}'.format(self.get_id()))
log_debug('launcher.export_configuration() l_fn_str "{0}"'.format(launcher_fn_str))
if not path_to_export: return
export_FN = FileName(path_to_export, isdir = True).pjoin(launcher_fn_str)
if export_FN.exists():
confirm = kodi_dialog_yesno('Overwrite file {0}?'.format(export_FN.getPath()))
if not confirm:
kodi_notify_warn('Export of Launcher XML cancelled')
category_data = category.get_data() if category is not None else {}
# --- Print error message is something goes wrong writing file ---
try:
autoconfig_export_launcher(self.entity_data, export_FN, category_data)
except AEL_Error as E:
kodi_notify_warn('{0}'.format(E))
else:
kodi_notify('Exported Launcher "{0}" XML config'.format(self.get_name()))
# >> No need to update categories.xml and timestamps so return now.
# ---------------------------------------------------------------------------------------------
# Misc functions
# ---------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# Standalone application launcher
# -------------------------------------------------------------------------------------------------
class StandaloneLauncher(LauncherABC):
def __init__(self, PATHS, settings, launcher_dic, objectRepository, executorFactory):
# --- Create default Standalone Launcher if empty launcher_dic---
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_STANDALONE
super(StandaloneLauncher, self).__init__(PATHS, settings, launcher_dic, objectRepository, executorFactory)
# --------------------------------------------------------------------------------------------
# Core methods
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'Standalone launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_STANDALONE
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
# Object becomes invalid after deletion
def delete_from_disk(self):
self.objectRepository.delete_launcher(self.entity_data)
self.entity_data = None
self.objectRepository = None
def supports_launching_roms(self): return False
def supports_parent_clone_roms(self): return False
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Returns True if Launcher was sucesfully built.
# Returns False if Launcher was not built (user canceled the dialogs or some other
#
def build(self, launcher): return super(StandaloneLauncher, self).build(launcher)
#
# Creates a new launcher using a wizard of dialogs.
# _builder_get_wizard() is always defined in Launcher concrete classes and it's called by
# parent build() method.
#
def _builder_get_wizard(self, wizard):
wizard = WizardDialog_FileBrowse(wizard, 'application', 'Select the launcher application',
1, self._builder_get_appbrowser_filter)
wizard = WizardDialog_Dummy(wizard, 'args', '')
wizard = WizardDialog_Keyboard(wizard, 'args', 'Application arguments')
wizard = WizardDialog_Dummy(wizard, 'm_name', '',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
return wizard
def _build_pre_wizard_hook(self): return True
def _build_post_wizard_hook(self): return True
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('StandaloneLauncher::get_main_edit_options() Starting ...')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['EXPORT_LAUNCHER'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
def get_advanced_modification_options(self):
log_debug('StandaloneLauncher::get_advanced_modification_options() Starting ...')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
options = collections.OrderedDict()
options['EDIT_APPLICATION'] = "Change Application: '{0}'".format(self.entity_data['application'])
options['MODIFY_ARGS'] = "Modify Arguments: '{0}'".format(self.entity_data['args'])
options['ADDITIONAL_ARGS'] = "Modify aditional arguments ..."
options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
return options
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def launch(self):
log_debug('StandaloneLauncher::launch() Starting ...')
self.title = self.entity_data['m_name']
self.application = FileName(self.entity_data['application'])
self.arguments = self.entity_data['args']
# --- Check for errors and abort if errors found ---
if not self.application.exists():
log_error('Launching app not found "{0}"'.format(self.application.getPath()))
kodi_notify_warn('App {0} not found.'.format(self.application.getPath()))
return
# --- Argument substitution ---
log_info('Raw arguments "{0}"'.format(self.arguments))
self.arguments = self.arguments.replace('$apppath$' , self.application.getDir())
log_info('Final arguments "{0}"'.format(self.arguments))
# --- Call LauncherABC.launch(). Executor object is created there and invoked ---
super(StandaloneLauncher, self).launch()
log_debug('StandaloneLauncher::launch() END ...')
# ---------------------------------------------------------------------------------------------
# Launcher metadata and flags related methods
# ---------------------------------------------------------------------------------------------
def change_application(self):
current_application = self.entity_data['application']
selected_application = xbmcgui.Dialog().browse(1, 'Select the launcher application', 'files',
self._builder_get_appbrowser_filter('application', self.entity_data),
False, False, current_application).decode('utf-8')
if selected_application is None or selected_application == current_application:
return False
self.entity_data['application'] = selected_application
def set_args(self, args): self.entity_data['args'] = args
def get_args(self): return self.entity_data['args']
def get_additional_argument(self, index):
args = self.get_all_additional_arguments()
return args[index]
def get_all_additional_arguments(self):
return self.entity_data['args_extra']
def add_additional_argument(self, arg):
if not self.entity_data['args_extra']:
self.entity_data['args_extra'] = []
self.entity_data['args_extra'].append(arg)
log_debug('launcher.add_additional_argument() Appending extra_args to launcher {0}'.format(self.get_id()))
def set_additional_argument(self, index, arg):
if not self.entity_data['args_extra']:
self.entity_data['args_extra'] = []
self.entity_data['args_extra'][index] = arg
log_debug('launcher.set_additional_argument() Edited args_extra[{0}] to "{1}"'.format(index, self.entity_data['args_extra'][index]))
def remove_additional_argument(self, index):
del self.entity_data['args_extra'][index]
log_debug("launcher.remove_additional_argument() Deleted launcher['args_extra'][{0}]".format(index))
# -------------------------------------------------------------------------------------------------
# Abstract base class for launching anything ROMs or item based.
# This class support Parent/Clone generation, multidisc support, and ROM No-Intro/REDUMP audit.
# Inherit from this base class to implement your own specific ROM launcher.
# -------------------------------------------------------------------------------------------------
class ROMLauncherABC(LauncherABC):
__metaclass__ = abc.ABCMeta
# launcher_data is always valid, concrete classes fill it with defaults.
def __init__(self, PATHS, settings, launcher_data, objectRepository,
executorFactory, romsetRepository, statsStrategy):
self.roms = {}
self.romsetRepository = romsetRepository
self.statsStrategy = statsStrategy
super(ROMLauncherABC, self).__init__(PATHS, settings, launcher_data, objectRepository, executorFactory)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
# By default ROM Launchers supports Launching ROMs (of course), PClone ROMs and ROM Audit.
# Override this methods if necessary in child classes.
def supports_launching_roms(self): return True
def supports_parent_clone_roms(self): return True
def supports_ROM_audit(self): return True
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# In ROM launchers create the ROM asset paths. Child classes must call this method or
# problems will happen.
#
@abc.abstractmethod
def _build_post_wizard_hook(self):
log_debug('ROMLauncherABC::_build_pre_wizard_hook() Starting ...')
# Choose launcher ROM XML filename. There may be launchers with same name in different
# categories, or even launcher with the same name in the same category.
roms_base_noext = fs_get_ROMs_basename(self.get_name(), self.entity_data['m_name'], self.get_id())
self.entity_data['roms_base_noext'] = roms_base_noext
# --- Selected asset path ---
# A) User chooses one and only one assets path
# B) If this path is different from the ROM path then asset naming scheme 1 is used.
# B) If this path is the same as the ROM path then asset naming scheme 2 is used.
# >> Create asset directories. Function detects if we are using naming scheme 1 or 2.
# >> launcher is edited using Python passing by assignment.
self.rom_assets_init_dirs()
# --- Determine box size based on platform --
platform = get_AEL_platform(self.entity_data['platform'])
self.set_box_sizing(platform.default_box_size)
return True
def _builder_get_extensions_from_app_path(self, input, item_key ,launcher):
if input: return input
app = launcher['application']
appPath = FileName(app)
extensions = emudata_get_program_extensions(appPath.getBase())
return extensions
def _builder_get_arguments_from_application_path(self, input, item_key, launcher):
if input: return input
app = launcher['application']
appPath = FileName(app)
default_arguments = emudata_get_program_arguments(appPath.getBase())
return default_arguments
def _builder_get_value_from_rompath(self, input, item_key, launcher):
if input: return input
romPath = launcher['rompath']
return romPath
def _builder_get_value_from_assetpath(self, input, item_key, launcher):
if input: return input
romPath = FileName(launcher['assets_path'])
romPath = romPath.pjoin('games')
return romPath.getPath()
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
@abc.abstractmethod
def get_main_edit_options(self): pass
# get_metadata_edit_options() has a general implementation in LauncherABC class for
# all launchers.
#
# get_advanced_modification_options() is custom for every concrete launcher class.
#
@abc.abstractmethod
def get_advanced_modification_options(self): pass
# Returns the dialog options to choose from when managing the roms.
def get_manage_roms_options(self):
log_debug('ROMLauncherABC::get_manage_roms_options() Returning options')
options = collections.OrderedDict()
options['SET_ROMS_DEFAULT_ARTWORK'] = 'Choose ROMs default artwork ...'
options['SET_ROMS_ASSET_DIRS'] = 'Manage ROMs asset directories ...'
options['SCRAPE_ROMS'] = 'Scrape ROMs'
options['REMOVE_DEAD_ROMS'] = 'Remove dead/missing ROMs'
options['IMPORT_ROMS'] = 'Import ROMs metadata from NFO files'
options['EXPORT_ROMS'] = 'Export ROMs metadata to NFO files'
options['DELETE_ROMS_NFO'] = 'Delete ROMs NFO files'
options['CLEAR_ROMS'] = 'Clear ROMs from launcher'
return options
def get_audit_roms_options(self):
log_debug('ROMLauncherABC::get_audit_roms_options() Returning edit options')
display_mode_str = self.entity_data['launcher_display_mode']
no_intro_display_mode = self.entity_data['nointro_display_mode']
nointro_xml_file_FName = self.get_nointro_xml_filepath()
if not nointro_xml_file_FName or not nointro_xml_file_FName.exists():
no_intro_xml_file = 'NONE'
else:
no_intro_xml_file = nointro_xml_file_FileName.getBase()
options = collections.OrderedDict()
options['CHANGE_DISPLAY_MODE'] = 'Change launcher display mode (now {0}) ...'.format(display_mode_str)
options['CREATE_PARENTCLONE_DAT'] = 'Create Parent/Clone DAT based on ROM filenames'
options['CHANGE_DISPLAY_ROMS'] = 'Display ROMs (now {0}) ...'.format(no_intro_display_mode)
options['ADD_NO_INTRO'] = "Add No-Intro/Redump DAT: '{0}'".format(no_intro_xml_file)
options['DELETE_NO_INTRO'] = 'Delete No-Intro/Redump DAT'
options['UPDATE_ROM_AUDIT'] = 'Update ROM audit'
return options
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def launch(self):
self.title = self.rom.get_name()
self.selected_rom_file = None
applicationIsSet = self._launch_selectApplicationToUse()
argumentsAreSet = self._launch_selectArgumentsToUse()
romIsSelected = self._launch_selectRomFileToUse()
if not applicationIsSet or not argumentsAreSet or not romIsSelected:
return
self._launch_parseArguments()
if self.statsStrategy is not None:
self.statsStrategy.update_launched_rom_stats(self.rom)
self.save_ROM(self.rom)
super(ROMLauncherABC, self).launch()
@abc.abstractmethod
def _launch_selectApplicationToUse(self): return True
@abc.abstractmethod
def _launch_selectArgumentsToUse(self): return True
@abc.abstractmethod
def _launch_selectRomFileToUse(self): return True
# --- Argument substitution ---
def _launch_parseArguments(self):
log_info('RomLauncher() raw arguments "{0}"'.format(self.arguments))
#Application based arguments replacements
if self.application and isinstance(self.application, NewFileName):
apppath = self.application.getDir()
log_info('RomLauncher() application "{0}"'.format(self.application.getPath()))
log_info('RomLauncher() appbase "{0}"'.format(self.application.getBase()))
log_info('RomLauncher() apppath "{0}"'.format(apppath))
self.arguments = self.arguments.replace('$apppath$', apppath)
self.arguments = self.arguments.replace('$appbase$', self.application.getBase())
# ROM based arguments replacements
if self.selected_rom_file:
# --- Escape quotes and double quotes in ROMFileName ---
# >> This maybe useful to Android users with complex command line arguments
if self.settings['escape_romfile']:
log_info("RomLauncher() Escaping ROMFileName ' and \"")
self.selected_rom_file.escapeQuotes()
rompath = self.selected_rom_file.getDir()
rombase = self.selected_rom_file.getBase()
rombase_noext = self.selected_rom_file.getBaseNoExt()
log_info('RomLauncher() romfile "{0}"'.format(self.selected_rom_file.getPath()))
log_info('RomLauncher() rompath "{0}"'.format(rompath))
log_info('RomLauncher() rombase "{0}"'.format(rombase))
log_info('RomLauncher() rombasenoext "{0}"'.format(rombase_noext))
self.arguments = self.arguments.replace('$rom$', self.selected_rom_file.getPath())
self.arguments = self.arguments.replace('$romfile$', self.selected_rom_file.getPath())
self.arguments = self.arguments.replace('$rompath$', rompath)
self.arguments = self.arguments.replace('$rombase$', rombase)
self.arguments = self.arguments.replace('$rombasenoext$', rombase_noext)
# >> Legacy names for argument substitution
self.arguments = self.arguments.replace('%rom%', self.selected_rom_file.getPath())
self.arguments = self.arguments.replace('%ROM%', self.selected_rom_file.getPath())
category_id = self.get_category_id()
if category_id is None:
category_id = ''
# Default arguments replacements
self.arguments = self.arguments.replace('$categoryID$', category_id)
self.arguments = self.arguments.replace('$launcherID$', self.entity_data['id'])
self.arguments = self.arguments.replace('$romID$', self.rom.get_id())
self.arguments = self.arguments.replace('$romtitle$', self.title)
# automatic substitution of rom values
for rom_key, rom_value in self.rom.get_data_dic().iteritems():
if isinstance(rom_value, basestring):
self.arguments = self.arguments.replace('${}$'.format(rom_key), rom_value)
# automatic substitution of launcher values
for launcher_key, launcher_value in self.entity_data.iteritems():
if isinstance(launcher_value, basestring):
self.arguments = self.arguments.replace('${}$'.format(launcher_key), launcher_value)
log_info('RomLauncher() final arguments "{0}"'.format(self.arguments))
# ---------------------------------------------------------------------------------------------
# ROM asset methods
# ---------------------------------------------------------------------------------------------
#
# Creates path for assets (artwork) and automatically fills in the path_ fields in the
# launcher dictionary.
#
def rom_assets_init_dirs(self):
assets_dir_FN = FileName(self.entity_data['assets_path'], isdir = True)
log_verb('ROMLauncherABC::rom_assets_init_dirs() assets_dir_FN "{0}"'.format(assets_dir_FN.getPath()))
# --- Fill in launcher fields and create asset directories ---
if self.entity_data['platform'] == 'MAME':
log_verb('ROMLauncherABC::rom_assets_init_dirs() Creating MAME asset paths')
self.rom_assets_create_dir(assets_dir_FN, 'path_title', 'titles')
self.rom_assets_create_dir(assets_dir_FN, 'path_snap', 'snaps')
self.rom_assets_create_dir(assets_dir_FN, 'path_boxfront', 'cabinets')
self.rom_assets_create_dir(assets_dir_FN, 'path_boxback', 'cpanels')
self.rom_assets_create_dir(assets_dir_FN, 'path_cartridge', 'PCBs')
self.rom_assets_create_dir(assets_dir_FN, 'path_fanart', 'fanarts')
self.rom_assets_create_dir(assets_dir_FN, 'path_banner', 'marquees')
self.rom_assets_create_dir(assets_dir_FN, 'path_clearlogo', 'clearlogos')
self.rom_assets_create_dir(assets_dir_FN, 'path_flyer', 'flyers')
self.rom_assets_create_dir(assets_dir_FN, 'path_map', 'maps')
self.rom_assets_create_dir(assets_dir_FN, 'path_manual', 'manuals')
self.rom_assets_create_dir(assets_dir_FN, 'path_trailer', 'trailers')
else:
log_verb('ROMLauncherABC::rom_assets_init_dirs() Creating Standard asset paths')
self.rom_assets_create_dir(assets_dir_FN, 'path_title', 'titles')
self.rom_assets_create_dir(assets_dir_FN, 'path_snap', 'snaps')
self.rom_assets_create_dir(assets_dir_FN, 'path_boxfront', 'boxfronts')
self.rom_assets_create_dir(assets_dir_FN, 'path_boxback', 'boxbacks')
self.rom_assets_create_dir(assets_dir_FN, 'path_cartridge', 'cartridges')
self.rom_assets_create_dir(assets_dir_FN, 'path_fanart', 'fanarts')
self.rom_assets_create_dir(assets_dir_FN, 'path_banner', 'banners')
self.rom_assets_create_dir(assets_dir_FN, 'path_clearlogo', 'clearlogos')
self.rom_assets_create_dir(assets_dir_FN, 'path_flyer', 'flyers')
self.rom_assets_create_dir(assets_dir_FN, 'path_map', 'maps')
self.rom_assets_create_dir(assets_dir_FN, 'path_manual', 'manuals')
self.rom_assets_create_dir(assets_dir_FN, 'path_trailer', 'trailers')
#
# Create asset path and assign it to Launcher dictionary.
#
def rom_assets_create_dir(self, assets_dir_FN, key, path_name):
asset_dir_FN = assets_dir_FN.pjoin(path_name, isdir = True)
self.entity_data[key] = asset_dir_FN.getPath()
log_debug('ROMLauncherABC::rom_assets_create_dir() Creating "{0}"'.format(asset_dir_FN.getPath()))
asset_dir_FN.makedirs()
def get_ROM_mappable_asset_list(self):
MAPPABLE_ASSETS = [ASSET_ICON_ID, ASSET_FANART_ID, ASSET_BANNER_ID, ASSET_CLEARLOGO_ID, ASSET_POSTER_ID]
return g_assetFactory.get_asset_list_by_IDs(MAPPABLE_ASSETS)
#
# Gets the actual assetinfo object that is mapped for
# the given (ROM) assetinfo for this particular MetaDataItem.
#
def get_mapped_ROM_asset_info(self, asset_info=None, asset_id=None):
if asset_info is None and asset_id is None: return None
if asset_id is not None: asset_info = g_assetFactory.get_asset_info(asset_id)
mapped_key = self.get_mapped_ROM_asset_key(asset_info)
mapped_asset_info = g_assetFactory.get_asset_info_by_key(mapped_key)
return mapped_asset_info
#
# Gets the database filename mapped for asset_info.
# Note that the mapped asset uses diferent fields wheter it is a Category/Launcher/ROM
#
def get_mapped_ROM_asset_key(self, asset_info):
if asset_info.rom_default_key is '':
log_error('Requested mapping for AssetInfo without default key. Type {}'.format(asset_info.id))
raise AddonError('Not supported asset type used. This might be a bug!')
return self.entity_data[asset_info.rom_default_key]
def set_mapped_ROM_asset_key(self, asset_info, mapped_to_info):
self.entity_data[asset_info.rom_default_key] = mapped_to_info.key
#
# Search for local assets and place found files into a list.
# Returned list all has assets as defined in ROM_ASSET_LIST.
# This function is used in the Scraper.
#
# ROM -> Rom object
# asset_infos -> list of assets to request
#
def get_local_assets(self, ROM, asset_infos):
log_verb('get_local_assets() Searching for ROM local assets...')
ROMFile = ROM.get_file()
rom_basename_noext = ROMFile.getBaseNoExt()
local_assets = {}
for asset_info in asset_infos:
local_asset = misc_search_file_cache(self.get_asset_path(asset_info), rom_basename_noext, asset_info.exts)
if local_asset:
local_assets[asset_info.id] = local_asset
log_verb('get_local_assets() Found {0:<9} "{1}"'.format(asset_info.name, local_asset))
else:
local_assets[asset_info.id] = None
log_verb('get_local_assets() Missing {0:<9}'.format(asset_info.name))
return local_assets
# --- Create a cache of assets ---
# misc_add_file_cache() creates a set with all files in a given directory.
# That set is stored in a function internal cache associated with the path.
# Files in the cache can be searched with misc_search_file_cache()
def cache_assets(self, asset_id):
AInfo = g_assetFactory.get_asset_info(asset_id)
misc_add_file_cache(self.get_asset_path(AInfo))
# ---------------------------------------------------------------------------------------------
# ROM methods
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Utility functions of ROM Launchers
# Use the same function names as in ObjectRepository class.
# ---------------------------------------------------------------------------------------------
def load_ROMs(self): self.roms = self.romsetRepository.load_ROMs(self)
def save_current_ROMs(self):
self.romsetRepository.save_rom_set(self, self.roms)
def save_ROM(self, rom):
if not self.has_ROMs(): self.load_ROMs()
self.roms[rom.get_id()] = rom
self.romsetRepository.save_rom_set(self, self.roms)
def update_ROM_set(self, roms):
if not isinstance(roms, dict):
roms = dict((rom.get_id(), rom) for rom in roms)
self.romsetRepository.save_rom_set(self, roms)
self.roms = roms
def delete_ROM_databases(self):
self.romsetRepository.delete_all_by_launcher(self)
def delete_ROM(self, rom_id):
if not self.has_ROMs(): self.load_ROMs()
self.roms.pop(rom_id)
self.romsetRepository.save_rom_set(self, self.roms)
def select_ROM(self, rom_id):
if not self.has_ROMs(): self.load_ROMs()
if self.roms is None:
log_error('Unable to load romset')
return None
if not rom_id in self.roms:
log_error('RomID {0} not found in romset'.format(rom_id))
return None
self.rom = self.roms[rom_id]
return self.rom
def has_ROMs(self):
return self.roms is not None and len(self.roms) > 0
def has_ROM(self, rom_id):
if not self.has_ROMs(): self.load_ROMs()
return rom_id in self.roms
def get_number_of_ROMs(self):
return self.entity_data['num_roms']
def actual_amount_of_ROMs(self):
if not self.has_ROMs(): self.load_ROMs()
return len(self.roms)
def get_roms(self):
if not self.has_ROMs(): self.load_ROMs()
return self.roms.values() if self.roms else None
def get_ROM_IDs(self):
if not self.has_ROMs(): self.load_ROMs()
return self.roms.keys() if self.roms else None
def reset_PClone_ROMs(self):
self.romsetRepository.delete_by_launcher(self, ROMSET_CPARENT)
self.romsetRepository.delete_by_launcher(self, ROMSET_PCLONE)
self.romsetRepository.delete_by_launcher(self, ROMSET_PARENTS)
#
# Get a list of assets with duplicated paths. Refuse to do anything if duplicated paths found.
#
def get_duplicated_asset_dirs(self):
duplicated_bool_list = [False] * len(ROM_ASSET_ID_LIST)
duplicated_name_list = []
# >> Check for duplicated asset paths
for i, asset_i in enumerate(ROM_ASSET_ID_LIST[:-1]):
A_i = g_assetFactory.get_asset_info(asset_i)
for j, asset_j in enumerate(ROM_ASSET_ID_LIST[i+1:]):
A_j = g_assetFactory.get_asset_info(asset_j)
# >> Exclude unconfigured assets (empty strings).
if A_i.path_key not in self.entity_data or A_j.path_key not in self.entity_data \
or not self.entity_data[A_i.path_key] or not self.entity_data[A_j.path_key]: continue
# log_debug('asset_get_duplicated_asset_list() Checking {0:<9} vs {1:<9}'.format(A_i.name, A_j.name))
if self.entity_data[A_i.path_key] == self.entity_data[A_j.path_key]:
duplicated_bool_list[i] = True
duplicated_name_list.append('{0} and {1}'.format(A_i.name, A_j.name))
log_info('asset_get_duplicated_asset_list() DUPLICATED {0} and {1}'.format(A_i.name, A_j.name))
return duplicated_name_list
def set_default_rom_asset(self, asset_kind, mapped_to_kind):
self.entity_data[asset_kind.rom_default_key] = mapped_to_kind.key
def get_asset_path(self, asset_info):
if not asset_info:
return None
return self._get_value_as_filename(asset_info.path_key)
def set_asset_path(self, asset_info, path):
log_debug('Setting "{}" to {}'.format(asset_info.path_key, path))
self.entity_data[asset_info.path_key] = path
def get_rom_path(self):
return self._get_value_as_filename('rompath')
def has_extra_rompath(self):
return 'romextrapath' in self.entity_data and \
self.entity_data['romextrapath'] is not None and \
self.entity_data['romextrapath'] is not ''
def get_extra_rompath(self):
return self._get_value_as_filename('romextrapath')
def change_rom_path(self, path):
self.entity_data['rompath'] = path
def get_rom_asset_path(self):
return self._get_value_as_filename('ROM_asset_path')
def get_roms_base(self):
return self.entity_data['roms_base_noext'] if 'roms_base_noext' in self.entity_data else None
def update_roms_base(self, roms_base_noext):
self.entity_data['roms_base_noext'] = roms_base_noext
def get_roms_xml_file(self):
return self.entity_data['roms_xml_file']
def set_roms_xml_file(self, xml_file):
self.entity_data['roms_xml_file'] = xml_file
def clear_roms(self):
# Set ROM Audit to OFF.
if self.entity_data['audit_state'] == AUDIT_STATE_ON:
log_info('Setting audit_state = AUDIT_STATE_OFF')
self.entity_data['audit_state'] = AUDIT_STATE_OFF
self.entity_data['num_roms'] = 0
self.roms = {}
self.romsetRepository.delete_all_by_launcher(self)
def get_display_mode(self):
return self.entity_data['launcher_display_mode'] if 'launcher_display_mode' in self.entity_data else LAUNCHER_DMODE_FLAT
def change_display_mode(self, mode):
if mode == LAUNCHER_DMODE_PCLONE or mode == LAUNCHER_DMODE_1G1R:
# >> Check if user configured a No-Intro DAT. If not configured or file does
# >> not exists refuse to switch to PClone view and force normal mode.
if not self.has_nointro_xml():
log_info('RomsLauncher.change_display_mode() No-Intro DAT not configured.')
log_info('RomsLauncher.change_display_mode() Forcing Flat view mode.')
mode = LAUNCHER_DMODE_FLAT
else:
nointro_xml_file_FName = self.get_nointro_xml_filepath()
if not nointro_xml_file_FName.exists():
log_info('RomsLauncher.change_display_mode() No-Intro DAT not found.')
log_info('RomsLauncher.change_display_mode() Forcing Flat view mode.')
kodi_dialog_OK('No-Intro DAT cannot be found. PClone or 1G1R view mode cannot be set.')
mode = LAUNCHER_DMODE_FLAT
self.entity_data['launcher_display_mode'] = mode
log_debug('launcher_display_mode = {0}'.format(mode))
return mode
def get_nointro_display_mode(self):
return self.entity_data['nointro_display_mode'] if 'nointro_display_mode' in self.entity_data else LAUNCHER_DMODE_FLAT
def change_nointro_display_mode(self, mode):
self.entity_data['nointro_display_mode'] = mode
log_info('Launcher nointro display mode changed to "{0}"'.format(self.entity_data['nointro_display_mode']))
return mode
def has_nointro_xml(self):
return self.entity_data['nointro_xml_file'] if 'nointro_xml_file' in self.entity_data else None
def get_nointro_xml_filepath(self):
return self._get_value_as_filename('nointro_xml_file')
def set_nointro_xml_file(self, path):
self.entity_data['nointro_xml_file'] = path
def reset_nointro_xmldata(self):
if self.entity_data['nointro_xml_file']:
log_info('Deleting XML DAT file and forcing launcher to Normal view mode.')
self.entity_data['nointro_xml_file'] = ''
def set_audit_stats(self, num_of_roms, num_audit_parents, num_audit_clones, num_audit_have, num_audit_miss, num_audit_unknown):
self.set_number_of_roms(get_number_of_roms)
self.entity_data['num_parents'] = num_audit_parents
self.entity_data['num_clones'] = num_audit_clones
self.entity_data['num_have'] = num_audit_have
self.entity_data['num_miss'] = num_audit_miss
self.entity_data['num_unknown'] = num_audit_unknown
def set_number_of_roms(self, num_of_roms = -1):
if num_of_roms == -1:
num_of_roms = self.actual_amount_of_ROMs()
self.entity_data['num_roms'] = num_of_roms
def supports_multidisc(self):
return self.entity_data['multidisc']
def set_multidisc_support(self, supports_multidisc):
self.entity_data['multidisc'] = supports_multidisc
return self.supports_multidisc()
# -------------------------------------------------------------------------------------------------
# Collection Launcher
# Class hierarchy: CollectionLauncher --> ROMLauncherABC --> LauncherABC --> MetaDataItemABC --> object
# -------------------------------------------------------------------------------------------------
class CollectionLauncher(ROMLauncherABC):
def __init__(self, PATHS, settings, collection_dic,
executorFactory, romsetRepository, statsStrategy):
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
if collection_dic is None:
collection_dic = fs_new_collection()
collection_dic['id'] = misc_generate_random_SID()
super(CollectionLauncher, self).__init__(
PATHS, settings, collection_dic, None, romsetRepository, None, False
)
def get_object_name(self): return 'ROM Collection'
def get_assets_kind(self): return KIND_ASSET_CATEGORY
def get_launcher_type(self): return OBJ_LAUNCHER_COLLECTION
def save_to_disk(self): self.objectRepository.save_collection(self.entity_data)
def delete_from_disk(self):
# Object becomes invalid after deletion
self.objectRepository.delete_collection(self.entity_data)
self.entity_data = None
self.objectRepository = None
def supports_launching_roms(self): return True
def supports_parent_clone_roms(self): return False
def _builder_get_wizard(self, wizard): return wizard
def _build_pre_wizard_hook(self):
log_debug('CollectionLauncher::_build_pre_wizard_hook() Starting ...')
return True
def _build_post_wizard_hook(self):
log_debug('CollectionLauncher::_build_post_wizard_hook() Starting ...')
return True
# get_edit_options() is implemented in RomLauncher but Categories editing options
# are different.
def get_edit_options(self):
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EXPORT_COLLECTION'] = 'Export Collection XML'
options['DELETE_COLLECTION'] = 'Delete Collection'
return options
# get_metadata_edit_options() has a general implementation in Launcher class for
# Standard ROM Launchers. ROM Collections metadata is different from a Standard ROM Launcher
# so reimplement the method here.
def get_metadata_edit_options(self):
plot_str = text_limit_string(self.entity_data['m_plot'], PLOT_STR_MAXSIZE)
rating = self.get_rating() if self.get_rating() != -1 else 'not rated'
NFO_FileName = fs_get_launcher_NFO_name(self.settings, self.entity_data)
NFO_found_str = 'NFO found' if NFO_FileName.exists() else 'NFO not found'
options = collections.OrderedDict()
options['EDIT_METADATA_TITLE'] = "Edit Title: '{0}'".format(self.get_name())
options['EDIT_METADATA_GENRE'] = "Edit Genre: '{0}'".format(self.entity_data['m_genre'])
options['EDIT_METADATA_RATING'] = "Edit Rating: '{0}'".format(rating)
options['EDIT_METADATA_PLOT'] = "Edit Plot: '{0}'".format(plot_str)
options['IMPORT_NFO_FILE_DEFAULT'] = 'Import NFO file (default {0})'.format(NFO_found_str)
options['IMPORT_NFO_FILE_BROWSE'] = 'Import NFO file (browse NFO file) ...'
options['SAVE_NFO_FILE_DEFAULT'] = 'Save NFO file (default location)'
return options
def launch(self): pass
def _selectApplicationToUse(self): return False
def _selectArgumentsToUse(self): return False
def _selectRomFileToUse(self): return False
# -------------------------------------------------------------------------------------------------
# --- Virtual Launcher ---
# Virtual Launchers are ROM launchers which contain other ROMs from real launchers.
# Virtual Launchers cannot be edited.
# Virtual Launcher ROMs are Favourite ROMs that can be executed.
# -------------------------------------------------------------------------------------------------
class VirtualLauncher(ROMLauncherABC):
def __init__(self, PATHS, settings, collection_dic,
executorFactory, romsetRepository, statsStrategy):
# Look at the VirtualCategory construction for complete this.
super(VirtualLauncher, self).__init__(
PATHS, settings, collection_dic, None, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'Virtual launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_VIRTUAL
def save_to_disk(self): pass
def delete_from_disk(self): pass
def supports_launching_roms(self): return True
def supports_parent_clone_roms(self): return False
def supports_ROM_audit(self): return True
def _builder_get_wizard(self, wizard): return wizard
def _build_pre_wizard_hook(self): return True
def _build_post_wizard_hook(self): return True
def get_main_edit_options(self): pass
def get_advanced_modification_options(self): pass
def launch(self): pass
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def _launch_selectApplicationToUse(self): return False
def _launch_selectArgumentsToUse(self): return False
def _launch_selectRomFileToUse(self): return False
def has_nointro_xml(self): return False
# -------------------------------------------------------------------------------------------------
# Standard ROM launcher where user can fully customize all settings.
#
# The standard ROM launcher also supports Parent/Clone view modes and No-Intro/REDUMP DAT audit.
# -------------------------------------------------------------------------------------------------
class StandardRomLauncher(ROMLauncherABC):
#
# Handle in this constructor the creation of a new empty ROM Launcher.
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
#
def __init__(self, PATHS, settings, launcher_dic, objectRepository,
executorFactory, romsetRepository, statsStrategy):
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_ROM
super(StandardRomLauncher, self).__init__(
PATHS, settings, launcher_dic, objectRepository, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'ROM Launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_ROM
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
# Object becomes invalid after deletion
def delete_from_disk(self):
self.objectRepository.delete_launcher(self.entity_data)
self.entity_data = None
self.objectRepository = None
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Creates a new launcher using a wizard of dialogs. Called by parent build() method.
#
def _builder_get_wizard(self, wizard):
wizard = WizardDialog_FileBrowse(wizard, 'application', 'Select the launcher application',
1, self._builder_get_appbrowser_filter)
wizard = WizardDialog_FileBrowse(wizard, 'rompath', 'Select the ROMs path',
0, '')
wizard = WizardDialog_Dummy(wizard, 'romext', '',
self._builder_get_extensions_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'romext','Set files extensions, use "|" as separator. (e.g lnk|cbr)')
wizard = WizardDialog_Dummy(wizard, 'args', '',
self._builder_get_arguments_from_application_path)
wizard = WizardDialog_Keyboard(wizard, 'args', 'Application arguments')
wizard = WizardDialog_Dummy(wizard, 'm_name', '',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
wizard = WizardDialog_Dummy(wizard, 'assets_path', '',
self._builder_get_value_from_rompath)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory',
0, '')
return wizard
def _build_pre_wizard_hook(self):
log_debug('StandardRomLauncher::_build_pre_wizard_hook() Starting ...')
return True
def _build_post_wizard_hook(self):
log_debug('StandardRomLauncher::_build_post_wizard_hook() Starting ...')
return super(StandardRomLauncher, self)._build_post_wizard_hook()
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('StandardRomLauncher::get_main_edit_options() Returning edit options')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['LAUNCHER_MANAGE_ROMS'] = 'Manage ROMs ...'
options['LAUNCHER_AUDIT_ROMS'] = 'Audit ROMs / Launcher view mode ...'
options['EXPORT_LAUNCHER_XML'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
#
# get_advanced_modification_options() is custom for every concrete launcher class.
#
def get_advanced_modification_options(self):
log_debug('StandardRomLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
multidisc_str = 'ON' if self.entity_data['multidisc'] else 'OFF'
options = collections.OrderedDict()
options['EDIT_APPLICATION'] = "Edit Application: '{0}'".format(self.entity_data['application'])
options['EDIT_ARGS'] = "Edit Arguments: '{0}'".format(self.entity_data['args'])
options['EDIT_ADDITIONAL_ARGS'] = "Edit Aditional Arguments ..."
options['EDIT_ROMPATH'] = "Edit ROM path: '{0}'".format(self.entity_data['rompath'])
options['EDIT_ROMEXT'] = "Edit ROM extensions: '{0}'".format(self.entity_data['romext'])
options['EDIT_TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['EDIT_TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
options['EDIT_TOGGLE_MULTIDISC'] = "Multidisc ROM support (now {0})".format(multidisc_str)
return options
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def _launch_selectApplicationToUse(self):
if self.rom.has_alternative_application():
log_info('StandardRomLauncher() Using ROM altapp')
self.application = FileName(self.rom.get_alternative_application())
else:
self.application = FileName(self.entity_data['application'])
# --- Check for errors and abort if found --- todo: CHECK
if not self.application.exists():
log_error('StandardRomLauncher::_selectApplicationToUse(): Launching app not found "{0}"'.format(self.application.getPath()))
kodi_notify_warn('Launching app not found {0}'.format(self.application.getPath()))
return False
return True
def _launch_selectArgumentsToUse(self):
if self.rom.has_alternative_arguments():
log_info('StandardRomLauncher() Using ROM altarg')
self.arguments = self.rom.get_alternative_arguments()
elif self.entity_data['args_extra']:
# >> Ask user what arguments to launch application
log_info('StandardRomLauncher() Using Launcher args_extra')
launcher_args = self.entity_data['args']
arg_list = [self.entity_data_args] + self.entity_data['args_extra']
dialog = xbmcgui.Dialog()
dselect_ret = dialog.select('Select launcher arguments', arg_list)
if dselect_ret < 0:
return False
log_info('StandardRomLauncher() User chose args index {0} ({1})'.format(dselect_ret, arg_list[dselect_ret]))
self.arguments = arg_list[dselect_ret]
else:
self.arguments = self.entity_data['args']
return True
def _launch_selectRomFileToUse(self):
if not self.rom.has_multiple_disks():
self.selected_rom_file = self.rom.get_file()
return True
disks = self.rom.get_disks()
log_info('StandardRomLauncher._selectRomFileToUse() Multidisc ROM set detected')
dialog = xbmcgui.Dialog()
dselect_ret = dialog.select('Select ROM to launch in multidisc set', disks)
if dselect_ret < 0:
return False
selected_rom_base = disks[dselect_ret]
log_info('StandardRomLauncher._selectRomFileToUse() Selected ROM "{0}"'.format(selected_rom_base))
ROM_temp = self.rom.get_file()
ROM_dir = FileName(ROM_temp.getDir())
ROMFileName = ROM_dir.pjoin(selected_rom_base)
log_info('StandardRomLauncher._selectRomFileToUse() ROMFileName OP "{0}"'.format(ROMFileName.getPath()))
log_info('StandardRomLauncher._selectRomFileToUse() ROMFileName P "{0}"'.format(ROMFileName.getPath()))
if not ROMFileName.exists():
log_error('ROM not found "{0}"'.format(ROMFileName.getPath()))
kodi_notify_warn('ROM not found "{0}"'.format(ROMFileName.getPath()))
return False
self.selected_rom_file = ROMFileName
return True
# ---------------------------------------------------------------------------------------------
# Launcher metadata and flags related methods
# ---------------------------------------------------------------------------------------------
# All of the in the parent class LauncherABC.
def change_application(self):
current_application = self.entity_data['application']
selected_application = xbmcgui.Dialog().browse(1, 'Select the launcher application', 'files',
self._get_appbrowser_filter('application', self.entity_data),
False, False, current_application).decode('utf-8')
if selected_application is None or selected_application == current_application:
return False
self.entity_data['application'] = selected_application
return True
def change_arguments(self, args):
self.entity_data['args'] = args
def get_args(self):
return self.entity_data['args']
def get_additional_argument(self, index):
args = self.get_all_additional_arguments()
return args[index]
def get_all_additional_arguments(self):
return self.entity_data['args_extra']
def add_additional_argument(self, arg):
if not self.entity_data['args_extra']:
self.entity_data['args_extra'] = []
self.entity_data['args_extra'].append(arg)
log_debug('launcher.add_additional_argument() Appending extra_args to launcher {0}'.format(self.get_id()))
def set_additional_argument(self, index, arg):
if not self.entity_data['args_extra']:
self.entity_data['args_extra'] = []
self.entity_data['args_extra'][index] = arg
log_debug('launcher.set_additional_argument() Edited args_extra[{0}] to "{1}"'.format(index, self.entity_data['args_extra'][index]))
def remove_additional_argument(self, index):
del self.entity_data['args_extra'][index]
log_debug("launcher.remove_additional_argument() Deleted launcher['args_extra'][{0}]".format(index))
# ---------------------------------------------------------------------------------------------
# Launcher asset methods
# ---------------------------------------------------------------------------------------------
# All Launcher asset functions must be in parent class LauncherABC.
# ---------------------------------------------------------------------------------------------
# NFO files for metadata
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Misc functions
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# ROM methods
# ---------------------------------------------------------------------------------------------
# Move ROMs method to parent class ROMLauncherABC.
def get_roms_filtered(self):
if not self.has_ROMs():
self.load_ROMs()
filtered_roms = []
view_mode = self.get_display_mode()
dp_mode = self.get_nointro_display_mode()
pclone_index = self.get_pclone_indices()
dp_modes_for_have = [NOINTRO_DMODE_HAVE, NOINTRO_DMODE_HAVE_UNK, NOINTRO_DMODE_HAVE_MISS]
dp_modes_for_miss = [NOINTRO_DMODE_HAVE_MISS, NOINTRO_DMODE_MISS, NOINTRO_DMODE_MISS_UNK]
dp_modes_for_unknown = [NOINTRO_DMODE_HAVE_UNK, NOINTRO_DMODE_MISS_UNK, NOINTRO_DMODE_UNK]
for rom_id in self.roms:
rom = self.roms[rom_id]
nointro_status = rom.get_nointro_status()
# >> Filter ROM
# >> Always include a parent ROM regardless of filters in 'Parent/Clone mode'
# >> and '1G1R mode' launcher_display_mode if it has 1 or more clones.
if not view_mode == LAUNCHER_DMODE_FLAT and len(pclone_index[rom_id]):
filtered_roms.append(rom)
elif nointro_status == NOINTRO_STATUS_HAVE and dp_mode in dp_modes_for_have:
filtered_roms.append(rom)
elif nointro_status == NOINTRO_STATUS_MISS and dp_mode in dp_modes_for_miss:
filtered_roms.append(rom)
elif nointro_status == NOINTRO_STATUS_UNKNOWN and dp_mode in dp_modes_for_unknown:
filtered_roms.append(rom)
# >> Always copy roms with unknown status (AUDIT_STATUS_NONE)
else:
filtered_roms.append(rom)
return filtered_roms
def get_rom_extensions_combined(self):
return self.entity_data['romext']
def get_rom_extensions(self):
if not 'romext' in self.entity_data:
return []
return self.entity_data['romext'].split("|")
def change_rom_extensions(self, ext):
self.entity_data['romext'] = ext
def get_parent_roms(self):
return self.romsetRepository.find_by_launcher(self, LAUNCHER_DMODE_PCLONE)
def get_pclone_indices(self):
return self.romsetRepository.find_index_file_by_launcher(self, ROMSET_PCLONE)
def get_parent_indices(self):
return self.romsetRepository.find_index_file_by_launcher(self, ROMSET_CPARENT)
def update_parent_rom_set(self, roms):
if not isinstance(roms,dict):
roms = dict((rom.get_id(), rom) for rom in roms)
self.romsetRepository.save_rom_set(self, roms, LAUNCHER_DMODE_PCLONE)
# --- Retroplayer launcher ---
# See https://github.com/Wintermute0110/plugin.program.advanced.emulator.launcher/issues/33
# See https://forum.kodi.tv/showthread.php?tid=295463&pid=2620489#pid2620489
# -------------------------------------------------------------------------------------------------
class RetroplayerLauncher(ROMLauncherABC):
#
# Handle in this constructor the creation of a new empty ROM Launcher.
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
#
def __init__(self, PATHS, settings, launcher_dic, objectRepository,
executorFactory, romsetRepository, statsStrategy):
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_RETROPLAYER
super(RetroplayerLauncher, self).__init__(
PATHS, settings, launcher_dic, objectRepository, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'Retroplayer launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_RETROPLAYER
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
def delete_from_disk(self):
# Object becomes invalid after deletion.
self.objectRepository.delete_launcher(self.entity_data)
self.entity_data = None
self.objectRepository = None
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Creates a new launcher using a wizard of dialogs. Called by build()
#
def _builder_get_wizard(self, wizard):
log_debug('RetroplayerLauncher::_builder_get_wizard() Starting ...')
# Retroplayer launcher must not ask for a launcher app name.
wizard = WizardDialog_Dummy(wizard, 'application', RETROPLAYER_LAUNCHER_APP_NAME)
wizard = WizardDialog_FileBrowse(wizard, 'rompath', 'Select the ROMs path',
0, '')
wizard = WizardDialog_Dummy(wizard, 'romext', '',
self._builder_get_extensions_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'romext','Set ROM extensions, use "|" as separator. (e.g lnk|cbr)')
wizard = WizardDialog_Dummy(wizard, 'args', '$rom$')
# Why m_name is repeated???
wizard = WizardDialog_Dummy(wizard, 'm_name', '',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
wizard = WizardDialog_Dummy(wizard, 'assets_path', '',
self._builder_get_value_from_rompath)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory',
0, '')
return wizard
def _build_pre_wizard_hook(self):
log_debug('RetroplayerLauncher::_build_pre_wizard_hook() Starting ...')
return True
def _build_post_wizard_hook(self):
log_debug('RetroplayerLauncher::_build_post_wizard_hook() Starting ...')
return super(RetroplayerLauncher, self)._build_post_wizard_hook()
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('RetroplayerLauncher::get_main_edit_options() Returning edit options')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['LAUNCHER_MANAGE_ROMS'] = 'Manage ROMs ...'
options['LAUNCHER_AUDIT_ROMS'] = 'Audit ROMs / Launcher view mode ...'
options['EXPORT_LAUNCHER_XML'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
def get_advanced_modification_options(self):
log_debug('RetroplayerLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
multidisc_str = 'ON' if self.entity_data['multidisc'] else 'OFF'
options = collections.OrderedDict()
options['EDIT_ROMPATH'] = "Change ROM path: '{0}'".format(self.entity_data['rompath'])
options['EDIT_ROMEXT'] = "Modify ROM extensions: '{0}'".format(self.entity_data['romext'])
options['EDIT_TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['EDIT_TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
options['EDIT_TOGGLE_MULTIDISC'] = "Multidisc ROM support (now {0})".format(multidisc_str)
return options
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def launch(self):
log_info('RetroplayerLauncher::launch() Executing ROM with Kodi Retroplayer ...')
self.title = self.rom.get_name()
self._selectApplicationToUse()
ROMFileName = self._selectRomFileToUse()
# >> Create listitem object
label_str = ROMFileName.getBase()
listitem = xbmcgui.ListItem(label = label_str, label2 = label_str)
# >> Listitem metadata
# >> How to fill gameclient = string (game.libretro.fceumm) ???
genre_list = list(rom['m_genre'])
listitem.setInfo('game', {'title' : label_str, 'platform' : 'Test platform',
'genres' : genre_list, 'developer' : rom['m_developer'],
'overview' : rom['m_plot'], 'year' : rom['m_year'] })
log_info('RetroplayerLauncher() application.getPath() "{0}"'.format(application.getPath()))
log_info('RetroplayerLauncher() ROMFileName.getPath() "{0}"'.format(ROMFileName.getPath()))
log_info('RetroplayerLauncher() label_str "{0}"'.format(label_str))
# --- User notification ---
if self.settings['display_launcher_notify']:
kodi_notify('Launching "{0}" with Retroplayer'.format(self.title))
log_verb('RetroplayerLauncher() Calling xbmc.Player().play() ...')
xbmc.Player().play(ROMFileName.getPath(), listitem)
log_verb('RetroplayerLauncher() Calling xbmc.Player().play() returned. Leaving function.')
def _launch_selectApplicationToUse(self): raise AddonError('Implement me!')
def _launch_selectArgumentsToUse(self): raise AddonError('Implement me!')
def _launch_selectRomFileToUse(self): raise AddonError('Implement me!')
# ---------------------------------------------------------------------------------------------
# Misc methods
# ---------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# Read RetroarchLauncher.md
# -------------------------------------------------------------------------------------------------
class RetroarchLauncher(StandardRomLauncher):
#
# Handle in this constructor the creation of a new empty ROM Launcher.
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
#
def __init__(self, PATHS, settings, launcher_dic, objectRepository,
executorFactory, romsetRepository, statsStrategy):
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_RETROARCH
super(RetroarchLauncher, self).__init__(
PATHS, settings, launcher_dic, objectRepository, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'Retroarch launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_RETROARCH
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
def delete_from_disk(self):
# Object becomes invalid after deletion.
self.objectRepository.delete_launcher(self.entity_data)
self.entity_data = None
self.objectRepository = None
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Creates a new launcher using a wizard of dialogs.
#
def _builder_get_wizard(self, wizard):
log_debug('RetroarchLauncher::_builder_get_wizard() Starting ...')
wizard = WizardDialog_Dummy(wizard, 'application',
self._builder_get_retroarch_app_folder(self.settings))
wizard = WizardDialog_FileBrowse(wizard, 'application', 'Select the Retroarch path',
0, '')
wizard = WizardDialog_DictionarySelection(wizard, 'retro_config', 'Select the configuration',
self._builder_get_available_retroarch_configurations)
wizard = WizardDialog_FileBrowse(wizard, 'retro_config', 'Select the configuration',
0, '', None, self._builder_user_selected_custom_browsing)
wizard = WizardDialog_DictionarySelection(wizard, 'retro_core_info', 'Select the core',
self._builder_get_available_retroarch_cores, self._builder_load_selected_core_info)
wizard = WizardDialog_Keyboard(wizard, 'retro_core_info', 'Enter path to core file',
self._builder_load_selected_core_info, self._builder_user_selected_custom_browsing)
wizard = WizardDialog_FileBrowse(wizard, 'rompath', 'Select the ROMs path',
0, '')
wizard = WizardDialog_Keyboard(wizard, 'romext','Set files extensions, use "|" as separator. (e.g nes|zip)')
wizard = WizardDialog_Dummy(wizard, 'args',
self._builder_get_default_retroarch_arguments())
wizard = WizardDialog_Keyboard(wizard, 'args', 'Extra application arguments')
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
wizard = WizardDialog_Dummy(wizard, 'assets_path', '',
self._builder_get_value_from_rompath)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory',
0, '')
return wizard
#
# In all platforms except Android:
# 1) Check if user has configured the Retroarch executable, cores and system dir.
# 2) Check if user has configured the Retroarch cores dir.
# 3) Check if user has configured the Retroarch system dir.
#
# In Android:
# 1) ...
#
# If any condition fails abort Retroarch launcher creation.
#
def _build_pre_wizard_hook(self):
log_debug('RetroarchLauncher::_build_pre_wizard_hook() Starting ...')
return True
def _build_post_wizard_hook(self):
log_debug('RetroarchLauncher::_build_post_wizard_hook() Starting ...')
return super(RetroarchLauncher, self)._build_post_wizard_hook()
def _builder_get_retroarch_app_folder(self, settings):
if not is_android():
# --- All platforms except Android ---
retroarch_folder = FileName(settings['retroarch_system_dir'], isdir = True)
if retroarch_folder.exists():
return retroarch_folder.getPath()
else:
# --- Android ---
android_retroarch_folders = [
'/storage/emulated/0/Android/data/com.retroarch/',
'/data/data/com.retroarch/',
'/storage/sdcard0/Android/data/com.retroarch/',
'/data/user/0/com.retroarch'
]
for retroach_folder_path in android_retroarch_folders:
retroarch_folder = FileName(retroach_folder_path)
if retroarch_folder.exists():
return retroarch_folder.getPath()
return '/'
def _builder_get_available_retroarch_configurations(self, item_key, launcher):
configs = collections.OrderedDict()
configs['BROWSE'] = 'Browse for configuration'
retroarch_folders = []
retroarch_folders.append(FileName(launcher['application']))
if is_android():
retroarch_folders.append(FileName('/storage/emulated/0/Android/data/com.retroarch/'))
retroarch_folders.append(FileName('/data/data/com.retroarch/'))
retroarch_folders.append(FileName('/storage/sdcard0/Android/data/com.retroarch/'))
retroarch_folders.append(FileName('/data/user/0/com.retroarch/'))
for retroarch_folder in retroarch_folders:
log_debug("get_available_retroarch_configurations() scanning path '{0}'".format(retroarch_folder.getPath()))
files = retroarch_folder.recursiveScanFilesInPath('*.cfg')
if len(files) < 1: continue
for file in files:
log_debug("get_available_retroarch_configurations() adding config file '{0}'".format(file.getPath()))
configs[file.getPath()] = file.getBaseNoExt()
return configs
return configs
def _builder_get_available_retroarch_cores(self, item_key, launcher):
cores_sorted = collections.OrderedDict()
cores_ext = ''
if is_windows():
cores_ext = 'dll'
else:
cores_ext = 'so'
config_file = FileName(launcher['retro_config'])
if not config_file.exists():
log_warning('Retroarch config file not found: {}'.format(config_file.getPath()))
kodi_notify_error('Retroarch config file not found {}. Change path first.'.format(config_file.getPath()))
return cores_sorted
parent_dir = FileName(config_file.getDir())
configuration = config_file.readPropertyFile()
info_folder = self._create_path_from_retroarch_setting(configuration['libretro_info_path'], parent_dir)
cores_folder = self._create_path_from_retroarch_setting(configuration['libretro_directory'], parent_dir)
log_debug("get_available_retroarch_cores() scanning path '{0}'".format(cores_folder.getPath()))
if not info_folder.exists():
log_warning('Retroarch info folder not found {}'.format(info_folder.getPath()))
kodi_notify_error('Retroarch info folder not found {}. Read documentation'.format(info_folder.getPath()))
return cores_sorted
# scan based on info folder and files since Retroarch on Android has it's core files in
# the app folder which is not readable without root privileges. Changing the cores folder
# will not work since Retroarch won't be able to load cores from a different folder due
# to security reasons. Changing that setting under Android will only result in a reset
# of that value after restarting Retroarch ( https://forums.libretro.com/t/directory-settings-wont-save/12753/3 )
# So we will scan based on info files (which setting path can be changed) and guess that
# the core files will be available.
cores = {}
files = info_folder.scanFilesInPath('*.info')
for info_file in files:
if info_file.getBaseNoExt() == '00_example_libretro':
continue
log_debug("get_available_retroarch_cores() adding core using info '{0}'".format(info_file.getPath()))
# check if core exists, if android just skip and guess it exists
if not is_android():
core_file = self._switch_info_to_core_file(info_file, cores_folder, cores_ext)
if not core_file.exists():
log_warning('get_available_retroarch_cores() Cannot find "{}". Skipping info "{}"'.format(core_file.getPath(), info_file.getBase()))
continue
log_debug("get_available_retroarch_cores() using core '{0}'".format(core_file.getPath()))
core_info = info_file.readPropertyFile()
cores[info_file.getPath()] = core_info['display_name']
cores_sorted['BROWSE'] = 'Manual enter path to core'
for core_item in sorted(cores.items(), key=lambda x: x[1]):
cores_sorted[core_item[0]] = core_item[1]
return cores_sorted
def _builder_load_selected_core_info(self, input, item_key, launcher, ask_overwrite=False):
if input == 'BROWSE':
return input
if is_windows():
cores_ext = 'dll'
else:
cores_ext = 'so'
if input.endswith(cores_ext):
core_file = FileName(input)
launcher['retro_core'] = core_file.getPath()
return input
config_file = FileName(launcher['retro_config'])
parent_dir = FileName(config_file.getDir())
configuration = config_file.readPropertyFile()
cores_folder = self._create_path_from_retroarch_setting(configuration['libretro_directory'], parent_dir)
info_file = FileName(input)
core_file = self._switch_info_to_core_file(info_file, cores_folder, cores_ext)
core_info = info_file.readPropertyFile()
launcher[item_key] = info_file.getPath()
launcher['retro_core'] = core_file.getPath()
if ask_overwrite and not kodi_dialog_yesno('Do you also want to overwrite previous settings for platform, developer etc.'):
return input
launcher['romext'] = core_info['supported_extensions']
launcher['platform'] = core_info['systemname']
launcher['m_developer'] = core_info['manufacturer']
launcher['m_name'] = core_info['systemname']
return input
def _builder_get_default_retroarch_arguments(self):
args = ''
if is_android():
args += '-e IME com.android.inputmethod.latin/.LatinIME -e REFRESH 60'
return args
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('RetroarchLauncher::get_main_edit_options() Returning edit options')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['LAUNCHER_MANAGE_ROMS'] = 'Manage ROMs ...'
options['LAUNCHER_AUDIT_ROMS'] = 'Audit ROMs / Launcher view mode ...'
options['EXPORT_LAUNCHER_XML'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
def get_advanced_modification_options(self):
log_debug('RetroarchLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
multidisc_str = 'ON' if self.entity_data['multidisc'] else 'OFF'
options = collections.OrderedDict()
options['EDIT_APPLICATION'] = "Change Retroarch App path: '{0}'".format(self.entity_data['application'])
options['CHANGE_RETROARCH_CONF']= "Change config: '{0}'".format(self.entity_data['retro_config'])
options['CHANGE_RETROARCH_CORE']= "Change core: '{0}'".format(self.entity_data['retro_core'])
options['EDIT_ARGS'] = "Modify Arguments: '{0}'".format(self.entity_data['args'])
options['EDIT_ADDITIONAL_ARGS'] = "Modify aditional arguments ..."
options['EDIT_ROMPATH'] = "Change ROM path: '{0}'".format(self.entity_data['rompath'])
options['EDIT_ROMEXT'] = "Modify ROM extensions: '{0}'".format(self.entity_data['romext'])
options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
options['TOGGLE_MULTIDISC'] = "Multidisc ROM support (now {0})".format(multidisc_str)
return options
def get_available_cores(self):
return self._builder_get_available_retroarch_cores('retro_core_info', self.get_data_dic())
def get_available_configs(self):
return self._builder_get_available_retroarch_configurations('retro_config', self.get_data_dic())
def change_application(self):
current_application = self.entity_data['application']
selected_application = xbmcgui.Dialog().browse(0, 'Select the Retroarch App path', 'files',
'', False, False, current_application).decode('utf-8')
if selected_application is None or selected_application == current_application:
return False
self.entity_data['application'] = selected_application
return True
def change_config(self, config_path):
self.entity_data['retro_config'] = config_path
def change_core(self, selected_core_file):
self._builder_load_selected_core_info(selected_core_file, 'retro_core_info', self.entity_data, True)
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def _launch_selectApplicationToUse(self):
if is_windows():
self.application = FileName(self.entity_data['application'])
self.application = self.application.append('retroarch.exe')
return True
if is_android():
self.application = FileName('/system/bin/am')
return True
# TODO other os
self.application = ''
return False
def _launch_selectArgumentsToUse(self):
if is_windows() or is_linux():
self.arguments = '-L "$retro_core$" '
self.arguments += '-c "$retro_config$" '
self.arguments += '"$rom$"'
self.arguments += self.entity_data['args']
return True
if is_android():
android_app_path = self.entity_data['application']
android_app = next(s for s in reversed(android_app_path.split('/')) if s)
self.arguments = 'start --user 0 -a android.intent.action.MAIN -c android.intent.category.LAUNCHER '
self.arguments += '-n {}/com.retroarch.browser.retroactivity.RetroActivityFuture '.format(android_app)
self.arguments += '-e ROM \'$rom$\' '
self.arguments += '-e LIBRETRO $retro_core$ '
self.arguments += '-e CONFIGFILE $retro_config$ '
self.arguments += self.entity_data['args'] if 'args' in self.entity_data else ''
return True
# TODO: other OSes
return False
# ---------------------------------------------------------------------------------------------
# Misc methods
# ---------------------------------------------------------------------------------------------
def _create_path_from_retroarch_setting(self, path_from_setting, parent_dir):
if path_from_setting.startswith(':\\'):
path_from_setting = path_from_setting[2:]
return parent_dir.pjoin(path_from_setting, isdir=True)
else:
folder = FileName(path_from_setting, isdir=True)
# if '/data/user/0/' in folder.getPath():
# alternative_folder = folder.getPath()
# alternative_folder = alternative_folder.replace('/data/user/0/', '/data/data/')
# folder = FileName(alternative_folder, isdir=True)
return folder
def _switch_core_to_info_file(self, core_file, info_folder):
info_file = core_file.changeExtension('info')
if is_android():
info_file = info_folder.pjoin(info_file.getBase().replace('_android', ''))
else:
info_file = info_folder.pjoin(info_file.getBase())
return info_file
def _switch_info_to_core_file(self, info_file, cores_folder, cores_ext):
core_file = info_file.changeExtension(cores_ext)
if is_android():
core_file = cores_folder.pjoin(core_file.getBase().replace('.', '_android.'))
else:
core_file = cores_folder.pjoin(core_file.getBase())
return core_file
# -------------------------------------------------------------------------------------------------
# Launcher for .lnk files (windows)
# -------------------------------------------------------------------------------------------------
class LnkLauncher(StandardRomLauncher):
#
# Handle in this constructor the creation of a new empty ROM Launcher.
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
#
def __init__(self, PATHS, settings, launcher_dic, objectRepository,
executorFactory, romsetRepository, statsStrategy):
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_RETROARCH
super(LnkLauncher, self).__init__(
PATHS, settings, launcher_dic, objectRepository, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'LNK launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_LNK
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Creates a new launcher using a wizard of dialogs.
#
def _builder_get_wizard(self, wizard):
log_debug('LnkLauncher::_builder_get_wizard() Returning edit options')
wizard = WizardDialog_FileBrowse(wizard, 'rompath', 'Select the LNKs path',
0, '')
wizard = WizardDialog_Dummy(wizard, 'romext', 'lnk')
wizard = WizardDialog_Dummy(wizard, 'args', '$rom$')
wizard = WizardDialog_Dummy(wizard, 'm_name', '',
self._get_title_from_app_path)
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
wizard = WizardDialog_Dummy(wizard, 'assets_path', '',
self._get_value_from_rompath)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory',
0, '')
return wizard
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('LnkLauncher::get_main_edit_options() Returning edit options')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['LAUNCHER_MANAGE_ROMS'] = 'Manage ROMs ...'
options['EXPORT_LAUNCHER_XML'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
def get_advanced_modification_options(self):
log_debug('LnkLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
multidisc_str = 'ON' if self.entity_data['multidisc'] else 'OFF'
options = collections.OrderedDict()
options['EDIT_ROMPATH'] = "Change ROM path: '{0}'".format(self.entity_data['rompath'])
options['EDIT_ROMEXT'] = "Modify ROM extensions: '{0}'".format(self.entity_data['romext'])
options['EDIT_TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['EDIT_TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
options['EDIT_TOGGLE_MULTIDISC'] = "Multidisc ROM support (now {0})".format(multidisc_str)
return options
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Misc methods
# ---------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# Launcher to use with a local Steam application and account.
# -------------------------------------------------------------------------------------------------
class SteamLauncher(ROMLauncherABC):
def __init__(self, launcher_data, settings, executorFactory, romsetRepository, statsStrategy):
super(SteamLauncher, self).__init__(
launcher_data, settings, executorFactory, romsetRepository, statsStrategy, False
)
def get_launcher_type(self): return OBJ_LAUNCHER_STEAM
def get_launcher_type_name(self): return 'Steam launcher'
# --------------------------------------------------------------------------------------------
# Launcher specific functions
# --------------------------------------------------------------------------------------------
def get_steam_id(self): return self.entity_data['steamid']
def get_edit_options(self):
options = super(SteamLauncher, self).get_edit_options()
del options['AUDIT_ROMS']
return options
def get_advanced_modification_options(self):
log_debug('SteamLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
options = super(SteamLauncher, self).get_advanced_modification_options()
options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
return options
def _selectApplicationToUse(self):
self.application = FileName('steam://rungameid/')
return True
def _selectArgumentsToUse(self):
self.arguments = '$steamid$'
return True
def _selectRomFileToUse(self):
steam_id = self.rom.get_custom_attribute('steamid', '')
log_info('SteamLauncher._selectRomFileToUse() ROM ID {0}: @{1}"'.format(steam_id, self.title))
return True
#def launch(self):
#
# self.title = self.rom['m_name']
#
# url = 'steam://rungameid/'
#
# self.application = FileName('steam://rungameid/')
# self.arguments = str(self.rom['steamid'])
#
# log_info('SteamLauncher() ROM ID {0}: @{1}"'.format(self.rom['steamid'], self.rom['m_name']))
# self.statsStrategy.updateRecentlyPlayedRom(self.rom)
#
# super(SteamLauncher, self).launch()
# pass
def _get_builder_wizard(self, wizard):
wizard = WizardDialog_Dummy(wizard, 'application', 'Steam')
wizard = WizardDialog_Keyboard(wizard, 'steamid','Steam ID')
wizard = WizardDialog_Dummy(wizard, 'm_name', 'Steam')
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list, wizard)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory',
0, '')
wizard = WizardDialog_Dummy(wizard, 'rompath', '',
self._get_value_from_assetpath)
return wizard
def _get_value_from_assetpath(self, input, item_key, launcher):
if input: return input
romPath = FileName(launcher['assets_path'])
romPath = romPath.pjoin('games')
return romPath.getPath()
# -------------------------------------------------------------------------------------------------
# Launcher to use with Nvidia Gamestream servers.
# -------------------------------------------------------------------------------------------------
class NvidiaGameStreamLauncher(ROMLauncherABC):
#
# Handle in this constructor the creation of a new empty ROM Launcher.
# Concrete classes are responsible of creating a default entity_data dictionary
# with sensible defaults.
#
def __init__(self, PATHS, settings, launcher_dic, objectRepository,
executorFactory, romsetRepository, statsStrategy):
if launcher_dic is None:
launcher_dic = fs_new_launcher()
launcher_dic['id'] = misc_generate_random_SID()
launcher_dic['type'] = OBJ_LAUNCHER_NVGAMESTREAM
super(NvidiaGameStreamLauncher, self).__init__(
PATHS, settings, launcher_dic, objectRepository, executorFactory, romsetRepository, statsStrategy
)
# --------------------------------------------------------------------------------------------
# Core functions
# --------------------------------------------------------------------------------------------
def get_object_name(self): return 'NVIDIA GameStream launcher'
def get_assets_kind(self): return KIND_ASSET_LAUNCHER
def get_launcher_type(self): return OBJ_LAUNCHER_NVGAMESTREAM
def save_to_disk(self): self.objectRepository.save_launcher(self.entity_data)
def delete_from_disk(self):
# Object becomes invalid after deletion.
self.objectRepository.delete_launcher(self.entity_data)
self.entity_data = None
self.objectRepository = None
# --------------------------------------------------------------------------------------------
# Launcher specific functions
# --------------------------------------------------------------------------------------------
def get_server(self): return self.entity_data['server']
def set_server(self, value): self.entity_data['server'] = value
def get_certificates_path(self): return self._get_value_as_filename('certificates_path')
def get_server_id(self): return self.entity_data['server_id'] if 'server_id' in self.entity_data else 0
def set_server_id(self, value): self.entity_data['server_id'] = value
# --------------------------------------------------------------------------------------------
# Launcher build wizard methods
# --------------------------------------------------------------------------------------------
#
# Creates a new launcher using a wizard of dialogs.
#
def _builder_get_wizard(self, wizard):
#UTILS_OPENSSL_AVAILABLE
log_debug('NvidiaGameStreamLauncher::_builder_get_wizard() SSL: "{0}"'.format(UTILS_OPENSSL_AVAILABLE))
log_debug('NvidiaGameStreamLauncher::_builder_get_wizard() Crypto: "{0}"'.format(UTILS_CRYPTOGRAPHY_AVAILABLE))
log_debug('NvidiaGameStreamLauncher::_builder_get_wizard() PyCrypto: "{0}"'.format(UTILS_PYCRYPTO_AVAILABLE))
info_txt = 'To pair with your Geforce Experience Computer we need to make use of valid certificates. '
info_txt += 'Unfortunately at this moment we cannot create these certificates directly from within Kodi.'
info_txt += 'Please read the wiki for details how to create them before you go further.'
wizard = WizardDialog_FormattedMessage(wizard, 'certificates_path', 'Pairing with Gamestream PC',
info_txt)
wizard = WizardDialog_DictionarySelection(wizard, 'application', 'Select the client',
{'NVIDIA': 'Nvidia', 'MOONLIGHT': 'Moonlight'},
self._builder_check_if_selected_gamestream_client_exists, lambda pk, p: is_android())
wizard = WizardDialog_DictionarySelection(wizard, 'application', 'Select the client',
{'JAVA': 'Moonlight-PC (java)', 'EXE': 'Moonlight-Chrome (not supported yet)'},
None, lambda pk,p: not is_android())
wizard = WizardDialog_FileBrowse(wizard, 'application', 'Select the Gamestream client jar',
1, self._builder_get_appbrowser_filter, None, lambda pk, p: not is_android())
wizard = WizardDialog_Keyboard(wizard, 'args', 'Additional arguments',
None, lambda pk, p: not is_android())
wizard = WizardDialog_Input(wizard, 'server', 'Gamestream Server',
xbmcgui.INPUT_IPADDRESS, self._builder_validate_gamestream_server_connection)
wizard = WizardDialog_Keyboard(wizard, 'm_name','Set the title of the launcher',
self._builder_get_title_from_app_path)
wizard = WizardDialog_FileBrowse(wizard, 'assets_path', 'Select asset/artwork directory', 0, '')
wizard = WizardDialog_Dummy(wizard, 'rompath', '',
self._builder_get_value_from_assetpath)
# Pairing with pin code will be postponed untill crypto and certificate support in kodi
# wizard = WizardDialog_Dummy(wizard, 'pincode', None, _builder_generatePairPinCode)
wizard = WizardDialog_Dummy(wizard, 'certificates_path', None,
self._builder_try_to_resolve_path_to_nvidia_certificates)
wizard = WizardDialog_FileBrowse(wizard, 'certificates_path', 'Select the path with valid certificates',
0, '', self._builder_validate_nvidia_certificates)
wizard = WizardDialog_Selection(wizard, 'platform', 'Select the platform',
AEL_platform_list)
return wizard
def _build_pre_wizard_hook(self):
log_debug('NvidiaGameStreamLauncher::_build_pre_wizard_hook() Starting ...')
return True
def _build_post_wizard_hook(self):
log_debug('NvidiaGameStreamLauncher::_build_post_wizard_hook() Starting ...')
success = super(NvidiaGameStreamLauncher, self)._build_post_wizard_hook()
if not success:
return success
self.set_box_sizing(BOX_SIZE_STEAM)
return success
def _builder_generatePairPinCode(self, input, item_key, launcher):
return GameStreamServer(None, None).generatePincode()
def _builder_check_if_selected_gamestream_client_exists(self, input, item_key, launcher):
if input == 'NVIDIA':
nvidiaDataFolder = FileName('/data/data/com.nvidia.tegrazone3/', isdir = True)
nvidiaAppFolder = FileName('/storage/emulated/0/Android/data/com.nvidia.tegrazone3/')
if not nvidiaAppFolder.exists() and not nvidiaDataFolder.exists():
kodi_notify_warn("Could not find Nvidia Gamestream client. Make sure it's installed.")
elif input == 'MOONLIGHT':
moonlightDataFolder = FileName('/data/data/com.limelight/', isdir = True)
moonlightAppFolder = FileName('/storage/emulated/0/Android/data/com.limelight/')
if not moonlightAppFolder.exists() and not moonlightDataFolder.exists():
kodi_notify_warn("Could not find Moonlight Gamestream client. Make sure it's installed.")
return input
def _builder_try_to_resolve_path_to_nvidia_certificates(self, input, item_key, launcher):
path = GameStreamServer.try_to_resolve_path_to_nvidia_certificates()
return path
def _builder_validate_nvidia_certificates(self, input, item_key, launcher):
certificates_path = FileName(input)
gs = GameStreamServer(input, certificates_path)
if not gs.validate_certificates():
kodi_notify_warn(
'Could not find certificates to validate. Make sure you already paired with '
'the server with the Shield or Moonlight applications.')
return certificates_path.getPath()
def _builder_validate_gamestream_server_connection(self, input, item_key, launcher):
gs = GameStreamServer(input, None)
if not gs.connect():
kodi_notify_warn('Could not connect to gamestream server')
return input
launcher['server_id'] = 4 # not yet known what the origin is
launcher['server_uuid'] = gs.get_uniqueid()
launcher['server_hostname'] = gs.get_hostname()
log_debug('validate_gamestream_server_connection() Found correct gamestream server with id "{}" and hostname "{}"'.format(launcher['server_uuid'],launcher['server_hostname']))
return input
# --------------------------------------------------------------------------------------------
# Launcher edit methods
# --------------------------------------------------------------------------------------------
def get_main_edit_options(self, category):
log_debug('NvidiaGameStreamLauncher::get_main_edit_options() Returning edit options')
options = collections.OrderedDict()
options['EDIT_METADATA'] = 'Edit Metadata ...'
options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...'
options['EDIT_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...'
options['EDIT_LAUNCHER_CATEGORY'] = "Change Category: '{0}'".format(category.get_name())
options['EDIT_LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_finished_str())
options['LAUNCHER_ADVANCED_MODS'] = 'Advanced Modifications ...'
options['LAUNCHER_MANAGE_ROMS'] = 'Manage ROMs ...'
options['EXPORT_LAUNCHER_XML'] = 'Export Launcher XML configuration ...'
options['DELETE_LAUNCHER'] = 'Delete Launcher'
return options
#
# get_advanced_modification_options() is custom for every concrete launcher class.
#
def get_advanced_modification_options(self):
log_debug('NvidiaGameStreamLauncher::get_advanced_modification_options() Returning edit options')
toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF'
non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF'
streamClient = self.entity_data['application']
if streamClient == 'NVIDIA':
streamClient = 'Nvidia'
elif streamClient == 'MOONLIGHT':
streamClient = 'Moonlight'
options = collections.OrderedDict()
options['EDIT_APPLICATION'] = "Change Application: '{0}'".format(streamClient)
options['CHANGE_NVGS_SERVER_ID'] = "Change server ID: '{}'".format(self.get_server_id())
options['CHANGE_NVGS_HOST'] = "Change host: '{}'".format(self.entity_data['server'])
options['CHANGE_NVGS_CERTS'] = "Change certificates: '{}'".format(self.get_certificates_path().getPath())
options['UPDATE_NVGS_SERVER'] = "Update server info"
options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str)
options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str)
return options
def change_application(self):
current_application = self.entity_data['application']
if is_android():
options = {'NVIDIA': 'Nvidia', 'MOONLIGHT': 'Moonlight'}
else:
options = {'JAVA': 'Moonlight-PC (java)', 'EXE': 'Moonlight-Chrome (not supported yet)'}
dialog = KodiOrdDictionaryDialog()
selected_application = dialog.select('Select the client', options)
if is_android() and not self._builder_check_if_selected_gamestream_client_exists(selected_application, None, None):
return False
if not is_android() and selected_application == 'JAVA':
selected_application = xbmcgui.Dialog().browse(1, 'Select the Gamestream client jar', 'files',
self._builder_get_appbrowser_filter('application', self.entity_data),
False, False, current_application).decode('utf-8')
if selected_application is None or selected_application == current_application:
return False
self.entity_data['application'] = selected_application
return True
def change_certificate_path(self, path_str):
validated_path = self._builder_validate_nvidia_certificates(path_str, 'certificates_path', self.entity_data)
self.entity_data['certificates_path'] = validated_path
def update_server_info(self):
self._builder_validate_gamestream_server_connection(self.entity_data['server'],'server', self.entity_data)
# ---------------------------------------------------------------------------------------------
# Execution methods
# ---------------------------------------------------------------------------------------------
def _launch_selectApplicationToUse(self):
streamClient = self.entity_data['application']
# java application selected (moonlight-pc)
if '.jar' in streamClient:
self.application = FileName(os.getenv("JAVA_HOME"))
if is_windows():
self.application = self.application.pjoin('bin\\java.exe')
else:
self.application = self.application.pjoin('bin/java')
return True
if is_windows():
self.application = FileName(streamClient)
return True
if is_android():
self.application = FileName('/system/bin/am')
return True
return True
def _launch_selectArgumentsToUse(self):
streamClient = self.entity_data['application']
# java application selected (moonlight-pc)
if '.jar' in streamClient:
self.arguments = '-jar "$application$" '
self.arguments += '-host $server$ '
self.arguments += '-fs '
self.arguments += '-app "$gamestream_name$" '
self.arguments += self.entity_data['args']
return True
if is_android():
if streamClient == 'NVIDIA':
self.arguments = 'start --user 0 -a android.intent.action.VIEW '
self.arguments += '-n com.nvidia.tegrazone3/com.nvidia.grid.UnifiedLaunchActivity '
self.arguments += '-d nvidia://stream/target/$server_id$/$streamid$'
return True
elif streamClient == 'MOONLIGHT':
self.arguments = 'start --user 0 -a android.intent.action.MAIN '
self.arguments += '-c android.intent.category.LAUNCHER '
self.arguments += '-n com.limelight/com.limelight.ShortcutTrampoline '
self.arguments += '-e Host $server$ '
self.arguments += '-e AppId $streamid$ '
self.arguments += '-e AppName "$gamestream_name$" '
self.arguments += '-e PcName "$server_hostname$" '
self.arguments += '-e UUID $server_uuid$ '
self.arguments += '-e UniqueId {} '.format(misc_generate_random_SID())
return True
# else
self.arguments = self.entity_data['args']
return True
def _launch_selectRomFileToUse(self): return True
# ------------------------------------------------------------------------------------------------
# --- AEL Object Factory -------------------------------------------------------------------------
#
# * Used to create an AEL object that is a child of MetaDataItemACB().
#
# * A global and unique instance of AELObjectFactory is created in main.py to create all
# required objects in the addon.
#
# * For performance reasons the ListItem renderers access the databases directly using functions
# from disk_IO.py.
#
# * Only Launcher objects can create ROM objects.
#
# * Every object (Category, Launcher, ROM) must be able to save itself to disk. This is required
# to simplify the recursive Edit Object menu.
# For example:
# launcher.save_to_disk() Saves Launcher to disk
# rom.save_to_disk() Saves ROM to disk.
#
# * Abstract Factory Pattern
# See https://www.oreilly.com/library/view/head-first-design/0596007124/ch04.html
#
# --- Category creation and edition --------------------------------------------------------------
#
# 1. Create a new Category:
# category = AELObjectFactory.create_new(OBJ_CATEGORY)
# category.save_to_disk()
#
# 3. Retrieve a list of all Categories from disk, sorted alphabetically by Name:
# categories_list = AELObjectFactory.find_category_all()
#
# 2. Retrieve a Category from disk (for example, in Edit Category context menu):
# category = AELObjectFactory.find_category(category_id)
# category.save_to_disk()
#
# --- Launcher creation and edition --------------------------------------------------------------
#
# 4. Create a new real Launcher:
# launcher = AELObjectFactory.create_new(OBJ_LAUNCHER_ROM)
# OR
# launcher = AELObjectFactory.create_new(OBJ_LAUNCHER_ROM)
# launcher.build(category)
# launcher.save_to_disk()
#
# 5. Retrieve a list of all real Launchers in a Category, sorted alphabetically by Name:
# launcher_list = AELObjectFactory.find_launchers_in_cat(category_id)
#
# 6. Retrieve a real Launcher from disk (real launcher can be edited):
# launcher = AELObjectFactory.find_launcher(category_id, launcher_id)
# launcher.save_to_disk()
#
# --- ROM Collection creation and edition --------------------------------------------------------
#
# 7. Create a new ROM Collection. Category is implicit:
# collection = AELObjectFactory.create_new(OBJ_LAUNCHER_COLLECTION)
# collection.set_name('Sonic')
# collection.save_to_disk()
#
# 8. Retrieve a list of all ROM Collection launchers from disk:
# collections_list = AELObjectFactory.find_launchers_in_cat(VCATEGORY_COLLECTIONS_ID)
#
# 9. Retrieve a ROM Collection from disk (for example, in Edit Collection context menu):
# collection = AELObjectFactory.find_launcher(VCATEGORY_COLLECTIONS_ID, launcher_id)
# collection.save_to_disk()
#
# --- Virtual Launcher related functions ---------------------------------------------------------
#
# 10. Create a new Virtual Launcher:
# vlauncher = AELObjectFactory.create_new(OBJ_LAUNCHER_VIRTUAL, VCATEGORY_TITLE_ID)
# vlauncher.set_name('A')
# vlauncher.save_to_disk()
#
# 11. Retrieve a list of all Virtual Launchers of a given type:
# vlauncher_list = AELObjectFactory.find_launchers_in_cat(VCATEGORY_TITLE_ID)
#
# 12. Retrieve a Virtual Launcher from disk:
# vlauncher = AELObjectFactory.find_launcher(VCATEGORY_TITLE_ID, launcher_id)
#
# --- Creation of ROMs ---------------------------------------------------------------------------
#
# 13. Create a new ROM:
# launcher = AELObjectFactory.create_new(OBJ_LAUNCHER_ROM, category_id)
# ROM = launcher.create_new_ROM()
#
# 14. Retrieve a ROM in a Launcher for edition:
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# launcher.load_ROMs()
# ROM = launcher.find_ROM(rom_id)
# launcher.save_ROMs_disk()
#
# --- Favourite ROM creation ---------------------------------------------------------------------
#
# 16. Add ROM to Favourites:
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# ROM = launcher.create_new_ROM()
# favourites = AELObjectFactory.find_launcher(VCATEGORY_FAVOURITES_ID) # Launcher ID implicit
# favourites.add_ROM(ROM)
# favourites.save_to_disk()
#F
# 17. Add ROM to Collection:
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# ROM = launcher.create_new_ROM()
# collection = AELObjectFactory.find_launcher(VCATEGORY_COLLECTIONS_ID, launcher_id)
# collection.add_ROM(ROM)
# collection.save_to_disk()
#
# 18. Build Virtual Launchers:
# launcher_list = ...
# all_ROMs = []
# for launcher in launcher_list:
# roms = launcher...
# for rom in roms:
# all_ROMs.insert(rom)
# for name in names:
# vlauncher = AELObjectFactory.create_new(OBJ_LAUNCHER_VIRTUAL, VCATEGORY_TITLE_ID)
# vlauncher.set_name(name)
# vlauncher.add_ROM(ROM)
# vlauncher.save_to_disk()
#
# --- Render of ROMs in a Standard ROM Launcher --------------------------------------------------
#
# 19. Render ROMs in a Launcher:
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# for rom in launcher.find_ROMs_all():
# rom.get_name()
#
# 20. Render ROMs in a Launcher (filtered):
#
# 21. Render ROMs in a Collection:
# collection = AELObjectFactory.find_launcher(VCATEGORY_COLLECTIONS_ID, launcher_id)
# for rom in collection.find_ROMs_all():
# rom.get_name()
#
# --- View context menu --------------------------------------------------------------------------
#
# 22. Render Category database information:
# category = AELObjectFactory.find_category(category_id)
#
# 23. Render Launcher database information (Category is required):
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# category = AELObjectFactory.find_category(launcher.get_category_ID())
#
# 24. Render ROM database information (Category and Launcher required)
# launcher = AELObjectFactory.find_launcher(VCATEGORY_ACTUAL_LAUN_ID, launcher_id)
# category = AELObjectFactory.find_category(launcher.get_category_ID())
# ROM = launcher.find_ROM(rom_id)
#
class AELObjectFactory(object):
def __init__(self, PATHS, settings, objectRepository, executorFactory):
# PATHS and settings are used in the creation of all object.
# executorFactory is used in the creation of Launcher objects.
self.PATHS = PATHS
self.settings = settings
self.objectRepository = objectRepository
self.executorFactory = executorFactory
# --- Pool of skeleton dictionaries to create virtual categories/launchers ---
self.category_addon_root_dic = {
'id' : VCATEGORY_ADDONROOT_ID,
'type' : OBJ_CATEGORY_VIRTUAL,
'm_name' : 'Root category',
}
self.recently_played_roms_dic = {
'id' : VLAUNCHER_RECENT_ID,
'type' : OBJ_CATEGORY_VIRTUAL,
'm_name': 'Recently played',
'roms_base_noext': 'history',
'default_icon' : 's_icon',
'default_fanart' : 's_fanart',
'default_banner' : 's_banner',
'default_poster' : 's_poster',
'default_clearlogo' : 's_clearlogo',
'default_controller' : 's_controller',
'roms_default_icon' : 's_boxfront',
'roms_default_fanart' : 's_fanart',
'roms_default_banner' : 's_banner',
'roms_default_poster' : 's_flyer',
'roms_default_clearlogo' : 's_clearlogo'
}
self.most_played_roms_dic = {
'id' : VLAUNCHER_MOST_PLAYED_ID,
'type' : OBJ_CATEGORY_VIRTUAL,
'm_name': 'Most played',
'roms_base_noext': 'most_played',
'default_icon' : 's_icon',
'default_fanart' : 's_fanart',
'default_banner' : 's_banner',
'default_poster' : 's_poster',
'default_clearlogo' : 's_clearlogo',
'default_controller' : 's_controller',
'roms_default_icon' : 's_boxfront',
'roms_default_fanart' : 's_fanart',
'roms_default_banner' : 's_banner',
'roms_default_poster' : 's_flyer',
'roms_default_clearlogo' : 's_clearlogo'
}
self.favourites_roms_dic = {
'id' : VLAUNCHER_FAVOURITES_ID,
'type' : OBJ_CATEGORY_VIRTUAL,
'm_name': 'Favourites',
'roms_base_noext': 'favourites',
'default_icon' : 's_icon',
'default_fanart' : 's_fanart',
'default_banner' : 's_banner',
'default_poster' : 's_poster',
'default_clearlogo' : 's_clearlogo',
'default_controller' : 's_controller',
'roms_default_icon' : 's_boxfront',
'roms_default_fanart' : 's_fanart',
'roms_default_banner' : 's_banner',
'roms_default_poster' : 's_flyer',
'roms_default_clearlogo' : 's_clearlogo'
}
#
# Creates an empty Launcher derived object with default values when only the launcher type
# is available, for example, when creating a new launcher in the context menu.
#
def create_new(self, obj_type):
log_debug('AELObjectFactory::create_new() Creating empty {0}'.format(obj_type))
return self._load(obj_type)
#
# DEPRECATED: this function must be internal callable only.
# Creates a Launcher derived object when the data dictionary is available.
# The type of object is the 'type' field in the dictionary.
#
def create_from_dic(self, obj_dic):
id = obj_dic['id']
obj_type = obj_dic['type']
log_debug('AELObjectFactory::create_new() Creating {0} ID {1}'.format(obj_type, id))
return self._load(obj_type, obj_dic)
#
# Retrieves a Category object from the database.
# This method also creates Virtual Category objects.
# Returns a Category object or None.
#
def find_category(self, category_id):
category_dic = self.objectRepository.find_category(category_id)
if category_dic is not None:
category_obj = self._load(OBJ_CATEGORY, category_dic)
elif category_id == VCATEGORY_ADDONROOT_ID:
category_obj = self.create_from_dic(self.category_addon_root_dic)
else:
category_obj = None
return category_obj
#
# Retrieves a list of Category objects, sorted alphabetically by Name.
#
def find_category_all(self):
category_obj_list = []
category_dic_list = self.objectRepository.find_category_all()
# dump_object_to_log('category_dic_list', category_dic_list)
for category_dic in category_dic_list:
category_obj_list.append(self._load(OBJ_CATEGORY, category_dic))
return category_obj_list
#
# Retrieves a Launcher object from the database.
# This method also creates Virtual Launchers (Favourites, ROM Collection, etc.)
# category_id is not used for Standard Launchers, but is is important for Virtual Launchers.
# Returns a Launcher object or None.
#
def find_launcher(self, category_id, launcher_id):
if launcher_id in VLAUNCHERS:
return self._load(launcher_id)
launcher_dic = self.objectRepository.find_launcher(launcher_id)
if launcher_dic is None:
return None
return self._load(launcher_dic['type'], launcher_dic)
#
# Retrieves a list of Launcher objects in a category.
# This method also works for Virtual Categories (ROM Collections, Browse by Title, etc.)
#
def find_launchers_in_cat(self, category_id):
launcher_obj_list = []
launcher_dic_list = self.objectRepository.find_launchers_by_category_id(category_id)
# dump_object_to_log('launcher_dic_list', launcher_dic_list)
for launcher_dic in launcher_dic_list:
launcher_obj_list.append(self._load(launcher_dic['type'], launcher_dic))
return launcher_obj_list
#
# To show "Select Launcher type" dialog. Only return real Launcher and not Virtual Launchers.
#
def get_launcher_types_odict(self):
typeOptions = collections.OrderedDict()
typeOptions[OBJ_LAUNCHER_STANDALONE] = 'Standalone launcher (Game/Application)'
typeOptions[OBJ_LAUNCHER_ROM] = 'ROM launcher (Emulator)'
typeOptions[OBJ_LAUNCHER_RETROPLAYER] = 'ROM launcher (Kodi Retroplayer)'
typeOptions[OBJ_LAUNCHER_RETROARCH] = 'ROM launcher (Retroarch)'
if is_windows():
typeOptions[OBJ_LAUNCHER_LNK] = 'LNK launcher (Windows only)'
typeOptions[OBJ_LAUNCHER_NVGAMESTREAM] = 'Nvidia GameStream'
if not is_android():
typeOptions[OBJ_LAUNCHER_STEAM] = 'Steam launcher'
# --- Disabled. AEL must not access favourites.xml ---
# typeOptions[OBJ_LAUNCHER_KODI_FAVOURITES] = 'Kodi favourite launcher'
return typeOptions
#
# obj_type is mandatory.
# obj_dic may be a dictionary (database objects) or None (new objest).
# Object constructor is responsible for filling the database dictionary with sensible
# defaults if obj_dic = None.
# ROM objects are created by Launcher objects and NOT here.
#
def _load(self, obj_type, obj_dic = None):
# --- Categories ---
if obj_type == OBJ_CATEGORY:
return Category(self.PATHS, self.settings, obj_dic, self.objectRepository)
elif obj_type == OBJ_CATEGORY_VIRTUAL:
return VirtualCategory(self.PATHS, self.settings, obj_dic, self.objectRepository)
# --- Virtual launchers ---
elif obj_type == VLAUNCHER_RECENT_ID:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
statsStrategy = VirtualROMStatisticsStrategy(self.PATHS, self.settings)
return VirtualLauncher(self.PATHS, self.settings, self.recently_played_roms_dic,
None, ROMRepository, statsStrategy
)
elif obj_type == VLAUNCHER_MOST_PLAYED_ID:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
statsStrategy = VirtualROMStatisticsStrategy(self.PATHS, self.settings)
return VirtualLauncher(self.PATHS, self.settings, self.most_played_roms_dic,
None, ROMRepository, statsStrategy
)
elif obj_type == VLAUNCHER_FAVOURITES_ID:
ROMRepository = ROMSetRepository(self.PATHS, self.settings, True)
statsStrategy = VirtualROMStatisticsStrategy(self.PATHS, self.settings)
return VirtualLauncher(self.PATHS, self.settings, self.favourites_roms_dic,
None, ROMRepository, statsStrategy
)
# --- Real launchers ---
elif obj_type == OBJ_LAUNCHER_STANDALONE:
return StandaloneLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory)
elif obj_type == OBJ_LAUNCHER_COLLECTION:
# romsetRepository = ROMSetRepository(self.PATHS.COLLECTIONS_FILE_PATH, False)
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return CollectionLauncher(self.PATHS, self.settings, obj_dic, romsetRepository)
elif obj_type == OBJ_LAUNCHER_ROM:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return StandardRomLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
elif obj_type == OBJ_LAUNCHER_RETROPLAYER:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return RetroplayerLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
elif obj_type == OBJ_LAUNCHER_RETROARCH:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return RetroarchLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
# LNK launchers available only on Windows
elif obj_type == OBJ_LAUNCHER_LNK:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return LnkLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
elif obj_type == OBJ_LAUNCHER_STEAM:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return SteamLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
elif obj_type == OBJ_LAUNCHER_NVGAMESTREAM:
ROMRepository = ROMSetRepository(self.PATHS, self.settings)
recent_played_launcher = self._load(VLAUNCHER_RECENT_ID)
most_played_launcher = self._load(VLAUNCHER_MOST_PLAYED_ID)
statsStrategy = ROMStatisticsStrategy(self.PATHS, self.settings, recent_played_launcher, most_played_launcher)
return NvidiaGameStreamLauncher(self.PATHS, self.settings, obj_dic, self.objectRepository,
self.executorFactory, ROMRepository, statsStrategy)
# --- Disabled. AEL must not access favourites.xml ---
# elif obj_type == OBJ_LAUNCHER_KODI_FAVOURITES:
# return KodiLauncher(launcher_data, self.settings, self.executorFactory)
else:
log_error('Unsupported requested type "{0}"'.format(obj_type))
return None
# #################################################################################################
# #################################################################################################
# Executors
# #################################################################################################
# #################################################################################################
class ExecutorABC():
__metaclass__ = abc.ABCMeta
def __init__(self, logFile):
self.logFile = logFile
@abc.abstractmethod
def execute(self, application, arguments, non_blocking): pass
class XbmcExecutor(ExecutorABC):
# --- Execute Kodi built-in function under certain conditions ---
def execute(self, application, arguments, non_blocking):
xbmc.executebuiltin('XBMC.{0}'.format(arguments))
#
# --- Linux ---
# New in AEL 0.9.7: always close all file descriptions except 0, 1 and 2 on the child
# process. This is to avoid Kodi opens sockets be inherited by the child process. A
# wrapper script may terminate Kodi using JSON RPC and if file descriptors are not
# closed Kodi will complain that the remote interfacte cannot be initialised. I believe
# the cause is that the socket is kept open by the wrapper script.
#
class LinuxExecutor(ExecutorABC):
def __init__(self, logFile, lirc_state):
self.lirc_state = lirc_state
super(LinuxExecutor, self).__init__(logFile)
def execute(self, application, arguments, non_blocking):
log_debug('LinuxExecutor::execute() Starting ...')
arg_list = shlex.split(arguments, posix = True)
command = [application.getPath()] + arg_list
# >> Old way of launching child process. os.system() is deprecated and should not
# >> be used anymore.
# os.system('"{0}" {1}'.format(application, arguments).encode('utf-8'))
# >> New way of launching, uses subproces module. Also, save child process stdout.
if non_blocking:
# >> In a non-blocking launch stdout/stderr of child process cannot be recorded.
log_info('Launching non-blocking process subprocess.Popen()')
p = subprocess.Popen(command, close_fds = True)
else:
if self.lirc_state: xbmc.executebuiltin('LIRC.stop')
with open(self.logFile.getPath(), 'w') as f:
retcode = subprocess.call(
command, stdout = f, stderr = subprocess.STDOUT, close_fds = True)
log_info('Process retcode = {0}'.format(retcode))
if self.lirc_state: xbmc.executebuiltin('LIRC.start')
log_debug('LinuxExecutor::execute() function ENDS')
class AndroidExecutor(ExecutorABC):
def __init__(self):
super(AndroidExecutor, self).__init__(None)
def execute(self, application, arguments, non_blocking):
log_debug('AndroidExecutor::execute() Starting ...')
retcode = os.system("{0} {1}".format(application.getPath(), arguments).encode('utf-8'))
log_info('Process retcode = {0}'.format(retcode))
log_debug('AndroidExecutor::execute() function ENDS')
class OSXExecutor(ExecutorABC):
def execute(self, application, arguments, non_blocking):
log_debug('OSXExecutor::execute() Starting ...')
arg_list = shlex.split(arguments, posix = True)
command = [application.getPath()] + arg_list
# >> Old way.
# os.system('"{0}" {1}'.format(application, arguments).encode('utf-8'))
# >> New way.
with open(self.logFile.getPath(), 'w') as f:
retcode = subprocess.call(command, stdout = f, stderr = subprocess.STDOUT)
log_info('Process retcode = {0}'.format(retcode))
log_debug('OSXExecutor::execute() function ENDS')
class WindowsLnkFileExecutor(ExecutorABC):
def execute(self, application, arguments, non_blocking):
log_debug('WindowsLnkFileExecutor::execute() Starting ...')
log_debug('Launching LNK application')
# os.system('start "AEL" /b "{0}"'.format(application).encode('utf-8'))
retcode = subprocess.call('start "AEL" /b "{0}"'.format(application.getPath()).encode('utf-8'), shell = True)
log_info('LNK app retcode = {0}'.format(retcode))
log_debug('WindowsLnkFileExecutor::execute() function ENDS')
#
# CMD/BAT files in Windows
#
class WindowsBatchFileExecutor(ExecutorABC):
def __init__(self, logFile, show_batch_window):
self.show_batch_window = show_batch_window
super(WindowsBatchFileExecutor, self).__init__(logFile)
def execute(self, application, arguments, non_blocking):
log_debug('WindowsBatchFileExecutor::execute() Starting ...')
arg_list = shlex.split(arguments, posix = True)
command = [application.getPath()] + arg_list
apppath = application.getDir()
# --- Workaround to run UNC paths in Windows ---
# >> Retroarch now support ROMs in UNC paths (Samba remotes)
new_command = list(command)
for i, _ in enumerate(command):
if command[i][0] == '\\':
new_command[i] = '\\' + command[i]
log_debug('Executor (Windows BatchFile): Before arg #{0} = "{1}"'.format(i, command[i]))
log_debug('Executor (Windows BatchFile): Now arg #{0} = "{1}"'.format(i, new_command[i]))
command = list(new_command)
log_debug('Executor (Windows BatchFile): command = {0}'.format(command))
log_debug('Executor (Windows BatchFile) Launching BAT application')
log_debug('Executor (Windows BatchFile) Ignoring setting windows_cd_apppath')
log_debug('Executor (Windows BatchFile) Ignoring setting windows_close_fds')
log_debug('Executor (Windows BatchFile) show_batch_window = {0}'.format(self.show_batch_window))
info = subprocess.STARTUPINFO()
info.dwFlags = 1
info.wShowWindow = 5 if self.show_batch_window else 0
retcode = subprocess.call(command, cwd = apppath.encode('utf-8'), close_fds = True, startupinfo = info)
log_info('Executor (Windows BatchFile) Process BAR retcode = {0}'.format(retcode))
log_debug('WindowsBatchFileExecutor::execute() function ENDS')
#
# --- Windoze ---
# NOTE subprocess24_hack.py was hacked to always set CreateProcess() bInheritHandles to 0.
# bInheritHandles [in] If this parameter TRUE, each inheritable handle in the calling
# process is inherited by the new process. If the parameter is FALSE, the handles are not
# inherited. Note that inherited handles have the same value and access rights as the original handles.
# See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx
#
# Same behaviour can be achieved in current version of subprocess with close_fds.
# If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the
# child process is executed. (Unix only). Or, on Windows, if close_fds is true then no handles
# will be inherited by the child process. Note that on Windows, you cannot set close_fds to
# true and also redirect the standard handles by setting stdin, stdout or stderr.
#
# If I keep old launcher behaviour in Windows (close_fds = True) then program output cannot
# be redirected to a file.
#
class WindowsExecutor(ExecutorABC):
def __init__(self, logFile, cd_apppath, close_fds):
self.windows_cd_apppath = cd_apppath
self.windows_close_fds = close_fds
super(WindowsExecutor, self).__init__(logFile)
def execute(self, application, arguments, non_blocking):
log_debug('WindowsExecutor::execute() Starting ...')
arg_list = shlex.split(arguments, posix = True)
command = [application.getPath()] + arg_list
apppath = application.getDir()
# --- Workaround to run UNC paths in Windows ---
# >> Retroarch now support ROMs in UNC paths (Samba remotes)
new_command = list(command)
for i, _ in enumerate(command):
if command[i][0] == '\\':
new_command[i] = '\\' + command[i]
log_debug('WindowsExecutor: Before arg #{0} = "{1}"'.format(i, command[i]))
log_debug('WindowsExecutor: Now arg #{0} = "{1}"'.format(i, new_command[i]))
command = list(new_command)
log_debug('WindowsExecutor: command = {0}'.format(command))
# >> cwd = apppath.encode('utf-8') fails if application path has Unicode on Windows
# >> A workaraound is to use cwd = apppath.encode(sys.getfilesystemencoding()) --> DOES NOT WORK
# >> For the moment AEL cannot launch executables on Windows having Unicode paths.
log_debug('Launching regular application')
log_debug('windows_cd_apppath = {0}'.format(self.windows_cd_apppath))
log_debug('windows_close_fds = {0}'.format(self.windows_close_fds))
# >> Note that on Windows, you cannot set close_fds to true and also redirect the
# >> standard handles by setting stdin, stdout or stderr.
if self.windows_cd_apppath and self.windows_close_fds:
retcode = subprocess.call(command, cwd = apppath.encode('utf-8'), close_fds = True)
elif self.windows_cd_apppath and not self.windows_close_fds:
with open(self.logFile.getPath(), 'w') as f:
retcode = subprocess.call(command, cwd = apppath.encode('utf-8'), close_fds = False,
stdout = f, stderr = subprocess.STDOUT)
elif not self.windows_cd_apppath and self.windows_close_fds:
retcode = subprocess.call(command, close_fds = True)
elif not self.windows_cd_apppath and not self.windows_close_fds:
with open(self.logFile.getPath(), 'w') as f:
retcode = subprocess.call(command, close_fds = False, stdout = f, stderr = subprocess.STDOUT)
else:
raise AddonError('Logical error')
log_info('Process retcode = {0}'.format(retcode))
log_debug('WindowsExecutor::execute() function ENDS')
class WebBrowserExecutor(ExecutorABC):
def execute(self, application, arguments, non_blocking):
log_debug('WebBrowserExecutor::execute() Starting ...')
command = application.getPath() + arguments
log_debug('Launching URL "{0}"'.format(command))
webbrowser.open(command)
log_debug('WebBrowserExecutor::execute() function ENDS')
# -------------------------------------------------------------------------------------------------
# Abstract Factory Pattern
# See https://www.oreilly.com/library/view/head-first-design/0596007124/ch04.html
# -------------------------------------------------------------------------------------------------
class ExecutorFactory(object):
def __init__(self, g_PATHS, settings):
self.settings = settings
self.logFile = g_PATHS.LAUNCHER_REPORT_FILE_PATH
def create_from_pathstring(self, application_string):
return self.create(FileName(application_string))
def create(self, application):
if application.getBase().lower().replace('.exe' , '') == 'xbmc' \
or 'xbmc-fav-' in application.getPath() or 'xbmc-sea-' in application.getPath():
return XbmcExecutor(self.logFile)
elif re.search('.*://.*', application.getPath()):
return WebBrowserExecutor(self.logFile)
elif is_windows():
# >> BAT/CMD file.
if application.getExt().lower() == '.bat' or application.getExt().lower() == '.cmd' :
return WindowsBatchFileExecutor(self.logFile, self.settings['show_batch_window'])
# >> Standalone launcher where application is a LNK file
elif application.getExt().lower() == '.lnk':
return WindowsLnkFileExecutor(self.logFile)
# >> Standard Windows executor
return WindowsExecutor(self.logFile,
self.settings['windows_cd_apppath'], self.settings['windows_close_fds'])
elif is_android():
return AndroidExecutor()
elif is_linux():
return LinuxExecutor(self.logFile, self.settings['lirc_state'])
elif is_osx():
return OSXExecutor(self.logFile)
else:
log_error('ExecutorFactory::create() Cannot determine the running platform')
kodi_notify_warn('Cannot determine the running platform')
return None
# #################################################################################################
# #################################################################################################
# ROM scanners
# #################################################################################################
# #################################################################################################
class RomScannersFactory(object):
def __init__(self, PATHS, settings):
self.settings = settings
self.reports_dir = PATHS.REPORTS_DIR
self.addon_dir = PATHS.ADDON_DATA_DIR
def create(self, launcher, scraping_strategy, progress_dialog):
launcherType = launcher.get_launcher_type()
log_info('RomScannersFactory: Creating romscanner for {}'.format(launcherType))
if not launcher.supports_launching_roms():
return NullScanner(launcher, self.settings, progress_dialog)
if launcherType == OBJ_LAUNCHER_STEAM:
return SteamScanner(self.reports_dir, self.addon_dir, launcher, self.settings, scraping_strategy, progress_dialog)
if launcherType == OBJ_LAUNCHER_NVGAMESTREAM:
return NvidiaStreamScanner(self.reports_dir, self.addon_dir, launcher, self.settings, scraping_strategy, progress_dialog)
return RomFolderScanner(self.reports_dir, self.addon_dir, launcher, self.settings, scraping_strategy, progress_dialog)
class ScannerStrategyABC(object):
__metaclass__ = abc.ABCMeta
def __init__(self, launcher, settings, progress_dialog):
self.launcher = launcher
self.settings = settings
self.progress_dialog = progress_dialog
super(ScannerStrategyABC, self).__init__()
#
# Scans for new roms based on the type of launcher.
#
@abc.abstractmethod
def scan(self):
return {}
#
# Cleans up ROM collection.
# Remove Remove dead/missing ROMs ROMs
#
@abc.abstractmethod
def cleanup(self):
return {}
class NullScanner(ScannerStrategyABC):
def scan(self):
return {}
def cleanup(self):
return {}
class RomScannerStrategy(ScannerStrategyABC):
__metaclass__ = abc.ABCMeta
def __init__(self, reports_dir, addon_dir, launcher, settings, scraping_strategy, progress_dialog):
self.reports_dir = reports_dir
self.addon_dir = addon_dir
self.scraping_strategy = scraping_strategy
super(RomScannerStrategy, self).__init__(launcher, settings, progress_dialog)
def scan(self):
# --- Open ROM scanner report file ---
launcher_report = FileReporter(self.reports_dir, self.launcher.get_data_dic(), LogReporter(self.launcher.get_data_dic()))
launcher_report.open('RomScanner() Starting ROM scanner')
# >> Check if there is an XML for this launcher. If so, load it.
# >> If file does not exist or is empty then return an empty dictionary.
launcher_report.write('Loading launcher ROMs ...')
roms = self.launcher.get_roms()
if roms is None:
roms = []
num_roms = len(roms)
launcher_report.write('{} ROMs currently in database'.format(num_roms))
launcher_report.write('Collecting candidates ...')
candidates = self._getCandidates(launcher_report)
num_candidates = len(candidates)
launcher_report.write('{} candidates found'.format(num_candidates))
# --- Scan all files in extra ROM path ---------------------------------------------------
if self.launcher.has_extra_rompath():
log_info('Scanning candidates in extra ROM path.')
extra_candidates = self._getCandidates(launcher_report, self.launcher.get_extra_rompath())
log_info('{} extra candidate files found'.format(len(extra_candidates)))
else:
log_info('Extra ROM path empty. Skipping scanning.')
extra_candidates = []
launcher_report.write('Removing dead ROMs ...')
num_removed_roms = self._removeDeadRoms(candidates, roms)
if num_removed_roms > 0:
kodi_notify('{0} dead ROMs removed successfully'.format(num_removed_roms))
log_info('{0} dead ROMs removed successfully'.format(num_removed_roms))
else:
log_info('No dead ROMs found')
# --- Prepare list of candidates to be processed ----------------------------------------------
# List has tuples (candidate, extra_ROM_flag). List already sorted alphabetically.
candidates_combined = []
for candidate in sorted(candidates): candidates_combined.append((candidate, False))
for candidate in sorted(extra_candidates): candidates_combined.append((candidate, True))
new_roms = self._processFoundItems(candidates_combined, roms, launcher_report)
if not new_roms:
return None
num_new_roms = len(new_roms)
roms = roms + new_roms
launcher_report.write('******************** ROM scanner finished. Report ********************')
launcher_report.write('Removed dead ROMs {0:6d}'.format(num_removed_roms))
launcher_report.write('Files checked {0:6d}'.format(num_candidates))
launcher_report.write('Extra files checked {0:6d}'.format(len(extra_candidates)))
launcher_report.write('New added ROMs {0:6d}'.format(num_new_roms))
if len(roms) == 0:
launcher_report.write('WARNING Launcher has no ROMs!')
launcher_report.close()
kodi_dialog_OK('No ROMs found! Make sure launcher directory and file extensions are correct.')
return None
if num_new_roms == 0:
kodi_notify('Added no new ROMs. Launcher has {0} ROMs'.format(len(roms)))
else:
kodi_notify('Added {0} new ROMs'.format(num_new_roms))
# --- Close ROM scanner report file ---
launcher_report.write('*** END of the ROM scanner report ***')
launcher_report.close()
return roms
def cleanup(self):
launcher_report = LogReporter(self.launcher.get_data_dic())
launcher_report.open('RomScanner() Starting Dead ROM cleaning')
log_debug('RomScanner() Starting Dead ROM cleaning')
roms = self.launcher.get_roms()
if roms is None:
launcher_report.close()
log_info('RomScanner() No roms available to cleanup')
return {}
num_roms = len(roms)
launcher_report.write('{0} ROMs currently in database'.format(num_roms))
launcher_report.write('Collecting candidates ...')
candidates = self._getCandidates(launcher_report)
num_candidates = len(candidates)
log_info('{0} candidates found'.format(num_candidates))
launcher_report.write('Removing dead ROMs ...')
num_removed_roms = self._removeDeadRoms(candidates, roms)
if num_removed_roms > 0:
kodi_notify('{0} dead ROMs removed successfully'.format(num_removed_roms))
log_info('{0} dead ROMs removed successfully'.format(num_removed_roms))
else:
log_info('No dead ROMs found')
launcher_report.close()
return roms
# ~~~ Scan for new files (*.*) and put them in a list ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@abc.abstractmethod
def _getCandidates(self, launcher_report, rom_path = None):
return []
# --- Remove dead entries -----------------------------------------------------------------
@abc.abstractmethod
def _removeDeadRoms(self, candidates, roms):
return 0
# ~~~ Now go processing item by item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@abc.abstractmethod
def _processFoundItems(self, items, roms, launcher_report):
return []
class RomFolderScanner(RomScannerStrategy):
# ~~~ Scan for new files (*.*) and put them in a list ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getCandidates(self, launcher_report, rom_path = None):
self.progress_dialog.startProgress('Scanning and caching files in ROM path ...')
files = []
if rom_path is None: rom_path = self.launcher.get_rom_path()
launcher_report.write('Scanning files in {}'.format(rom_path.getPath()))
if self.settings['scan_recursive']:
log_info('Recursive scan activated')
files = rom_path.recursiveScanFilesInPath('*.*')
else:
log_info('Recursive scan not activated')
files = rom_path.scanFilesInPath('*.*')
num_files = len(files)
launcher_report.write(' File scanner found {} files'.format(num_files))
self.progress_dialog.endProgress()
return files
# --- Remove dead entries -----------------------------------------------------------------
def _removeDeadRoms(self, candidates, roms):
num_roms = len(roms)
num_removed_roms = 0
if num_roms == 0:
log_info('Launcher is empty. No dead ROM check.')
return num_removed_roms
log_debug('Starting dead items scan')
i = 0
self.progress_dialog.startProgress('Checking for dead ROMs ...', num_roms)
for rom in reversed(roms):
fileName = rom.get_file()
log_debug('Searching {0}'.format(fileName.getPath()))
self.progress_dialog.updateProgress(i)
if not fileName.exists():
log_debug('Not found')
log_debug('Deleting from DB {0}'.format(fileName.getPath()))
roms.remove(rom)
num_removed_roms += 1
i += 1
self.progress_dialog.endProgress()
return num_removed_roms
# ~~~ Now go processing item by item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _processFoundItems(self, items, roms, launcher_report):
num_items = len(items)
new_roms = []
self.progress_dialog.startProgress('Scanning found items', num_items)
log_debug('============================== Processing ROMs ==============================')
launcher_report.write('Processing files ...')
num_items_checked = 0
allowedExtensions = self.launcher.get_rom_extensions()
launcher_multidisc = self.launcher.supports_multidisc()
skip_if_scraping_failed = self.settings['scan_skip_on_scraping_failure']
for ROM_file, extra_ROM_flag in sorted(items):
self.progress_dialog.updateProgress(num_items_checked)
# --- Get all file name combinations ---
launcher_report.write('>>> {0}'.format(ROM_file.getPath()).encode('utf-8'))
# ~~~ Update progress dialog ~~~
file_text = 'ROM {0}'.format(ROM_file.getBase())
self.progress_dialog.updateMessages(file_text, 'Checking if has ROM extension ...')
# --- Check if filename matchs ROM extensions ---
# The recursive scan has scanned all files. Check if this file matches some of
# the ROM extensions. If this file isn't a ROM skip it and go for next one in the list.
processROM = False
for ext in allowedExtensions:
if ROM_file.getExt() == '.' + ext:
launcher_report.write(" Expected '{0}' extension detected".format(ext))
processROM = True
break
if not processROM:
launcher_report.write(' File has not an expected extension. Skipping file.')
continue
# --- Check if ROM belongs to a multidisc set ---
self.progress_dialog.updateMessages(file_text, 'Checking if ROM belongs to multidisc set..')
MultiDiscInROMs = False
MDSet = text_get_multidisc_info(ROM_file)
if MDSet.isMultiDisc and launcher_multidisc:
log_info('ROM belongs to a multidisc set.')
log_info('isMultiDisc "{0}"'.format(MDSet.isMultiDisc))
log_info('setName "{0}"'.format(MDSet.setName))
log_info('discName "{0}"'.format(MDSet.discName))
log_info('extension "{0}"'.format(MDSet.extension))
log_info('order "{0}"'.format(MDSet.order))
launcher_report.write(' ROM belongs to a multidisc set.')
# >> Check if the set is already in launcher ROMs.
MultiDisc_rom_id = None
for new_rom in new_roms:
temp_FN = new_rom.get_file()
if temp_FN.getBase() == MDSet.setName:
MultiDiscInROMs = True
MultiDisc_rom = new_rom
break
log_info('MultiDiscInROMs is {0}'.format(MultiDiscInROMs))
# >> If the set is not in the ROMs then this ROM is the first of the set.
# >> Add the set
if not MultiDiscInROMs:
log_info('First ROM in the set. Adding to ROMs ...')
# >> Manipulate ROM so filename is the name of the set
ROM_dir = FileName(ROM_file.getDir())
ROM_file_original = ROM_file
ROM_temp = ROM_dir.pjoin(MDSet.setName)
log_info('ROM_temp P "{0}"'.format(ROM_temp.getPath()))
ROM_file = ROM_temp
# >> If set already in ROMs, just add this disk into the set disks field.
else:
log_info('Adding additional disk "{0}"'.format(MDSet.discName))
MultiDisc_rom.add_disk(MDSet.discName)
# >> Reorder disks like Disk 1, Disk 2, ...
# >> Process next file
log_info('Processing next file ...')
continue
elif MDSet.isMultiDisc and not launcher_multidisc:
launcher_report.write(' ROM belongs to a multidisc set but Multidisc support is disabled.')
else:
launcher_report.write(' ROM does not belong to a multidisc set.')
# --- Check that ROM is not already in the list of ROMs ---
# >> If file already in ROM list skip it
self.progress_dialog.updateMessages(file_text, 'Checking if ROM is not already in collection...')
repeatedROM = False
for rom in roms:
rpath = rom.get_file()
if rpath == ROM_file:
repeatedROM = True
if repeatedROM:
launcher_report.write(' File already into launcher ROM list. Skipping file.')
continue
else:
launcher_report.write(' File not in launcher ROM list. Processing it ...')
# --- Ignore BIOS ROMs ---
# Name of bios is: '[BIOS] Rom name example (Rev A).zip'
if self.settings['scan_ignore_bios']:
BIOS_re = re.findall('\[BIOS\]', ROM_file.getBase())
if len(BIOS_re) > 0:
log_info("BIOS detected. Skipping ROM '{0}'".format(ROM_file.getPath()))
continue
# ~~~~~ Process new ROM and add to the list ~~~~~
# --- Create new rom dictionary ---
# >> Database always stores the original (non transformed/manipulated) path
new_rom = ROM()
new_rom.set_file(ROM_file)
if extra_ROM_flag: new_rom.set_as_extra_ROM()
# checksums
ROM_checksums = ROM_file_original if MDSet.isMultiDisc and launcher_multidisc else ROM_file
scraping_succeeded = True
self.progress_dialog.updateMessages(file_text, 'Scraping {0}...'.format(ROM_file.getBaseNoExt()))
try:
self.scraping_strategy.scanner_process_ROM(new_rom, ROM_checksums)
except Exception as ex:
scraping_succeeded = False
log_error('(Exception) Object type "{}"'.format(type(ex)))
log_error('(Exception) Message "{}"'.format(str(ex)))
log_warning('Could not scrape "{}"'.format(ROM_file.getBaseNoExt()))
#log_debug(traceback.format_exc())
if not scraping_succeeded and skip_if_scraping_failed:
kodi_display_user_message({
'dialog': KODI_MESSAGE_NOTIFY_WARN,
'msg': 'Scraping "{}" failed. Skipping.'.format(ROM_file.getBaseNoExt())
})
else:
# --- This was the first ROM in a multidisc set ---
if launcher_multidisc and MDSet.isMultiDisc and not MultiDiscInROMs:
log_info('Adding to ROMs dic first disk "{0}"'.format(MDSet.discName))
new_rom.add_disk(MDSet.discName)
new_roms.append(new_rom)
# ~~~ Check if user pressed the cancel button ~~~
if self.progress_dialog.isCanceled():
self.progress_dialog.endProgress()
kodi_dialog_OK('Stopping ROM scanning. No changes have been made.')
log_info('User pressed Cancel button when scanning ROMs. ROM scanning stopped.')
return None
num_items_checked += 1
self.progress_dialog.endProgress()
return new_roms
class SteamScanner(RomScannerStrategy):
# ~~~ Scan for new items not yet in the rom collection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getCandidates(self, launcher_report, rom_path = None):
log_debug('Reading Steam account')
self.progress_dialog.startProgress('Reading Steam account...')
apikey = self.settings['steam-api-key']
steamid = self.launcher.get_steam_id()
url = 'http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?key={}&steamid={}&include_appinfo=1'.format(apikey, steamid)
self.progress_dialog.updateProgress(70)
body = net_get_URL_original(url)
self.progress_dialog.updateProgress(80)
steamJson = json.loads(body)
games = steamJson['response']['games']
self.progress_dialog.endProgress()
return games
# --- Remove dead entries -----------------------------------------------------------------
def _removeDeadRoms(self, candidates, roms):
if roms is None or len(roms) == 0:
log_info('Launcher is empty. No dead ROM check.')
return 0
log_debug('Starting dead items scan')
num_roms = len(roms)
num_removed_roms = 0
i = 0
self.progress_dialog.startProgress('Checking for dead ROMs ...', num_roms)
steamGameIds = set(steamGame['appid'] for steamGame in candidates)
for rom in reversed(roms):
romSteamId = rom.get_custom_attribute('steamid')
log_debug('Searching {0}'.format(romSteamId))
self.progress_dialog.updateProgress(i)
i += 1
if romSteamId not in steamGameIds:
log_debug('Not found. Deleting from DB: "{0}"'.format(rom.get_name()))
roms.remove(rom)
num_removed_roms += 1
self.progress_dialog.endProgress()
return num_removed_roms
def _processFoundItems(self, items, roms, launcher_report):
if items is None or len(items) == 0:
log_info('No steam games available.')
return []
new_roms = []
num_games = len(items)
num_items_checked = 0
self.progress_dialog.startProgress('Checking for new ROMs ...', num_games)
steamIdsAlreadyInCollection = set(rom.get_custom_attribute('steamid') for rom in roms)
for steamGame, extra_ROM_flag in items:
steamId = steamGame['appid']
log_debug('Searching {} with #{}'.format(steamGame['name'], steamId))
self.progress_dialog.updateProgress(num_items_checked, steamGame['name'])
if steamId not in steamIdsAlreadyInCollection:
log_debug('========== Processing Steam game ==========')
launcher_report.write('>>> title: {}'.format(steamGame['name']))
launcher_report.write('>>> ID: {}'.format(steamGame['appid']))
log_debug('Not found. Item {} is new'.format(steamGame['name']))
launcher_path = self.launcher.get_rom_path()
fake_file_name = text_str_to_filename_str(steamGame['name'])
romPath = launcher_path.pjoin('{0}.rom'.format(fake_file_name))
# ~~~~~ Process new ROM and add to the list ~~~~~
# --- Create new rom dictionary ---
# >> Database always stores the original (non transformed/manipulated) path
new_rom = ROM()
new_rom.set_file(romPath)
if extra_ROM_flag: new_rom.set_as_extra_ROM()
new_rom.set_custom_attribute('steamid', steamGame['appid'])
new_rom.set_custom_attribute('steam_name', steamGame['name']) # so that we always have the original name
new_rom.set_name(steamGame['name'])
scraping_succeeded = True
self.progress_dialog.updateMessages(steamGame['name'], 'Scraping {}...'.format(steamGame['name']))
try:
self.scraping_strategy.scanner_process_ROM(new_rom, None)
except Exception as ex:
scraping_succeeded = False
log_error('(Exception) Object type "{}"'.format(type(ex)))
log_error('(Exception) Message "{}"'.format(str(ex)))
log_warning('Could not scrape "{}"'.format(steamGame['name']))
#log_debug(traceback.format_exc())
if not scraping_succeeded and skip_if_scraping_failed:
kodi_display_user_message({
'dialog': KODI_MESSAGE_NOTIFY_WARN,
'msg': 'Scraping "{}" failed. Skipping.'.format(steamGame['name'])
})
else:
new_roms.append(new_rom)
# ~~~ Check if user pressed the cancel button ~~~
if self._isProgressCanceled():
self.progress_dialog.endProgress()
kodi_dialog_OK('Stopping ROM scanning. No changes have been made.')
log_info('User pressed Cancel button when scanning ROMs. ROM scanning stopped.')
return None
num_items_checked += 1
self.progress_dialog.endProgress()
return new_roms
class NvidiaStreamScanner(RomScannerStrategy):
# ~~~ Scan for new items not yet in the rom collection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getCandidates(self, launcher_report, rom_path = None):
log_debug('Reading Nvidia GameStream server')
self.progress_dialog.startProgress('Reading Nvidia GameStream server...')
server_host = self.launcher.get_server()
certificates_path = self.launcher.get_certificates_path()
streamServer = GameStreamServer(server_host, certificates_path, True)
connected = streamServer.connect()
if not connected:
kodi_notify_error('Unable to connect to gamestream server')
return None
self.progress_dialog.updateProgress(50)
games = streamServer.getApps()
self.progress_dialog.endProgress()
return games
# --- Remove dead entries -----------------------------------------------------------------
def _removeDeadRoms(self, candidates, roms):
if roms is None or len(roms) == 0:
log_info('Launcher is empty. No dead ROM check.')
return 0
log_debug('Starting dead items scan')
num_roms = len(roms)
num_removed_roms = 0
i = 0
self.progress_dialog.startProgress('Checking for dead ROMs ...', num_roms)
streamIds = set(streamableGame['ID'] for streamableGame in candidates)
for rom in reversed(roms):
romStreamId = rom.get_custom_attribute('streamid')
log_debug('Searching {0}'.format(romStreamId))
self.progress_dialog.updateProgress(i)
i += 1
if romStreamId not in streamIds:
log_debug('Not found. Deleting from DB {0}'.format(rom.get_name()))
roms.remove(rom)
num_removed_roms += 1
self.progress_dialog.endProgress()
return num_removed_roms
def _processFoundItems(self, items, roms, launcher_report):
if items is None or len(items) == 0:
log_info('No Nvidia Gamestream games available.')
return []
new_roms = []
num_games = len(items)
num_items_checked = 0
self.progress_dialog.startProgress('Checking for new ROMs ...', num_games)
streamIdsAlreadyInCollection = set(rom.get_custom_attribute('streamid') for rom in roms)
skip_if_scraping_failed = self.settings['scan_skip_on_scraping_failure']
for streamableGame, extra_ROM_flag in items:
streamId = streamableGame['ID']
log_debug('Searching {} with #{}'.format(streamableGame['AppTitle'], streamId))
self.progress_dialog.updateProgress(num_items_checked, streamableGame['AppTitle'])
if streamId in streamIdsAlreadyInCollection:
log_debug('Game "{}" with #{} already in collection'.format(streamableGame['AppTitle'], streamId))
continue
log_debug('========== Processing Nvidia Gamestream game ==========')
launcher_report.write('>>> title: {0}'.format(streamableGame['AppTitle']))
launcher_report.write('>>> ID: {0}'.format(streamableGame['ID']))
log_debug('Not found. Item {0} is new'.format(streamableGame['AppTitle']))
launcher_path = self.launcher.get_rom_path()
fake_file_name = text_str_to_filename_str(streamableGame['AppTitle'])
romPath = launcher_path.pjoin('{0}.rom'.format(fake_file_name))
# ~~~~~ Process new ROM and add to the list ~~~~~
# --- Create new rom dictionary ---
# >> Database always stores the original (non transformed/manipulated) path
new_rom = ROM()
new_rom.set_file(romPath)
if extra_ROM_flag: new_rom.set_as_extra_ROM()
new_rom.set_custom_attribute('streamid', streamableGame['ID'])
new_rom.set_custom_attribute('gamestream_name', streamableGame['AppTitle']) # so that we always have the original name
new_rom.set_name(streamableGame['AppTitle'])
scraping_succeeded = True
self.progress_dialog.updateMessages(streamableGame['AppTitle'], 'Scraping {0}...'.format(streamableGame['AppTitle']))
try:
self.scraping_strategy.scanner_process_ROM(new_rom, None)
except Exception as ex:
scraping_succeeded = False
log_error('(Exception) Object type "{}"'.format(type(ex)))
log_error('(Exception) Message "{}"'.format(str(ex)))
log_warning('Could not scrape "{}"'.format(streamableGame['AppTitle']))
#log_debug(traceback.format_exc())
if not scraping_succeeded and skip_if_scraping_failed:
kodi_display_user_message({
'dialog': KODI_MESSAGE_NOTIFY_WARN,
'msg': 'Scraping "{}" failed. Skipping.'.format(streamableGame['AppTitle'])
})
else:
new_roms.append(new_rom)
# ~~~ Check if user pressed the cancel button ~~~
if self.progress_dialog.isCanceled():
self.progress_dialog.endProgress()
kodi_dialog_OK('Stopping ROM scanning. No changes have been made.')
log_info('User pressed Cancel button when scanning ROMs. ROM scanning stopped.')
return None
num_items_checked += 1
self.progress_dialog.endProgress()
return new_roms
# #################################################################################################
# #################################################################################################
# DAT files and ROM audit
# #################################################################################################
# #################################################################################################
class RomDatFileScanner(object):
def __init__(self, settings):
self.settings = settings
super(RomDatFileScanner, self).__init__()
#
# Helper function to update ROMs No-Intro status if user configured a No-Intro DAT file.
# Dictionaries are mutable, so roms can be changed because passed by assigment.
# This function also creates the Parent/Clone indices:
# 1) ADDON_DATA_DIR/db_ROMs/roms_base_noext_PClone_index.json
# 2) ADDON_DATA_DIR/db_ROMs/roms_base_noext_parents.json
#
# A) If there are Unkown ROMs, a fake rom with name [Unknown ROMs] and id UNKNOWN_ROMS_PARENT_ID
# is created. This fake ROM is the parent of all Unknown ROMs.
# This fake ROM is added to roms_base_noext_parents.json database.
# This fake ROM is not present in the main JSON ROM database.
#
# Returns:
# True -> ROM audit was OK
# False -> There was a problem with the audit.
#
#def _roms_update_NoIntro_status(self, roms, nointro_xml_file_FileName):
def update_roms_NoIntro_status(self, launcher, roms):
__debug_progress_dialogs = False
__debug_time_step = 0.0005
# --- Reset the No-Intro status and removed No-Intro missing ROMs ---
audit_have = audit_miss = audit_unknown = 0
self._startProgressPhase('Advanced Emulator Launcher', 'Deleting Missing/Dead ROMs and clearing flags ...')
self.roms_reset_NoIntro_status(launcher, roms)
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
# --- Check if DAT file exists ---
nointro_xml_file_FileName = launcher.get_nointro_xml_filepath()
if not nointro_xml_file_FileName.exists():
log_warning('_roms_update_NoIntro_status() Not found {0}'.format(nointro_xml_file_FileName.getPath()))
return False
self._updateProgress(0, 'Loading No-Intro/Redump XML DAT file ...')
roms_nointro = audit_load_NoIntro_XML_file(nointro_xml_file_FileName)
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
if not roms_nointro:
log_warning('_roms_update_NoIntro_status() Error loading {0}'.format(nointro_xml_file_FileName.getPath()))
return False
# --- Remove BIOSes from No-Intro ROMs ---
if self.settings['scan_ignore_bios']:
log_info('_roms_update_NoIntro_status() Removing BIOSes from No-Intro ROMs ...')
self._updateProgress(0, 'Removing BIOSes from No-Intro ROMs ...')
num_items = len(roms_nointro)
item_counter = 0
filtered_roms_nointro = {}
for rom_id in roms_nointro:
rom_data = roms_nointro[rom_id]
BIOS_str_list = re.findall('\[BIOS\]', rom_data['name'])
if not BIOS_str_list:
filtered_roms_nointro[rom_id] = rom_data
else:
log_debug('_roms_update_NoIntro_status() Removed BIOS "{0}"'.format(rom_data['name']))
item_counter += 1
self._updateProgress((item_counter*100)/num_items)
if __debug_progress_dialogs: time.sleep(__debug_time_step)
roms_nointro = filtered_roms_nointro
self._updateProgress(100)
else:
log_info('_roms_update_NoIntro_status() User wants to include BIOSes.')
# --- Put No-Intro ROM names in a set ---
# >> Set is the fastest Python container for searching elements (implements hashed search).
# >> No-Intro names include tags
self._updateProgress(0, 'Creating No-Intro and ROM sets ...')
roms_nointro_set = set(roms_nointro.keys())
roms_set = set()
for rom in roms:
# >> Use the ROM basename.
ROMFileName = rom.get_file()
roms_set.add(ROMFileName.getBaseNoExt())
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
# --- Traverse Launcher ROMs and check if they are in the No-Intro ROMs list ---
self._updateProgress(0, 'Audit Step 1/4: Checking Have and Unknown ROMs ...')
num_items = len(roms)
item_counter = 0
for rom in roms:
ROMFileName = rom.get_file()
if ROMFileName.getBaseNoExt() in roms_nointro_set:
rom.set_nointro_status(NOINTRO_STATUS_HAVE)
audit_have += 1
log_debug('_roms_update_NoIntro_status() HAVE "{0}"'.format(ROMFileName.getBaseNoExt()))
else:
rom.set_nointro_status(NOINTRO_STATUS_UNKNOWN)
audit_unknown += 1
log_debug('_roms_update_NoIntro_status() UNKNOWN "{0}"'.format(ROMFileName.getBaseNoExt()))
item_counter += 1
self._updateProgress((item_counter*100)/num_items)
if __debug_progress_dialogs: time.sleep(__debug_time_step)
self._updateProgress(100)
# --- Mark Launcher dead ROMs as missing ---
self._updateProgress(0, 'Audit Step 2/4: Checking Missing ROMs ...')
num_items = len(roms)
item_counter = 0
for rom in roms:
ROMFileName = rom.get_file()
if not ROMFileName.exists():
rom.set_nointro_status(NOINTRO_STATUS_MISS)
audit_miss += 1
log_debug('_roms_update_NoIntro_status() MISSING "{0}"'.format(ROMFileName.getBaseNoExt()))
item_counter += 1
self._updateProgress((item_counter*100)/num_items)
if __debug_progress_dialogs: time.sleep(__debug_time_step)
self._updateProgress(100)
# --- Now add missing ROMs to Launcher ---
# >> Traverse the No-Intro set and add the No-Intro ROM if it's not in the Launcher
# >> Added/Missing ROMs have their own romID.
self._updateProgress(0, 'Audit Step 3/4: Adding Missing ROMs ...')
num_items = len(roms_nointro_set)
item_counter = 0
ROMPath = launcher.get_rom_path()
for nointro_rom in sorted(roms_nointro_set):
# log_debug('_roms_update_NoIntro_status() Checking "{0}"'.format(nointro_rom))
if nointro_rom not in roms_set:
# Add new "fake" missing ROM. This ROM cannot be launched!
# Added ROMs have special extension .nointro
rom = ROM()
rom.set_file(ROMPath.pjoin(nointro_rom + '.nointro'))
rom.set_name(nointro_rom)
rom.set_nointro_status(NOINTRO_STATUS_MISS)
roms.append(rom)
audit_miss += 1
log_debug('_roms_update_NoIntro_status() ADDED "{0}"'.format(rom.get_name()))
# log_debug('_roms_update_NoIntro_status() OP "{0}"'.format(rom['filename']))
item_counter += 1
self._updateProgress((item_counter*100)/num_items)
if __debug_progress_dialogs: time.sleep(__debug_time_step)
self._updateProgress(100)
# --- Detect if the DAT file has PClone information or not ---
dat_pclone_dic = audit_make_NoIntro_PClone_dic(roms_nointro)
num_dat_clones = 0
for parent_name in dat_pclone_dic: num_dat_clones += len(dat_pclone_dic[parent_name])
log_verb('No-Intro/Redump DAT has {0} clone ROMs'.format(num_dat_clones))
# --- Generate main pclone dictionary ---
# >> audit_unknown_roms is an int of list = ['Parents', 'Clones']
# log_debug("settings['audit_unknown_roms'] = {0}".format(self.settings['audit_unknown_roms']))
unknown_ROMs_are_parents = True if self.settings['audit_unknown_roms'] == 0 else False
log_debug('unknown_ROMs_are_parents = {0}'.format(unknown_ROMs_are_parents))
# if num_dat_clones == 0 and self.settings['audit_create_pclone_groups']:
# # --- If DAT has no PClone information and user want then generate filename-based PClone groups ---
# # >> This feature is taken from NARS (NARS Advanced ROM Sorting)
# log_verb('Generating filename-based Parent/Clone groups')
# pDialog(0, 'Building filename-based Parent/Clone index ...')
# roms_pclone_index = audit_generate_filename_PClone_index(roms, roms_nointro, unknown_ROMs_are_parents)
# pDialog(100)
# if __debug_progress_dialogs: time.sleep(0.5)
# else:
# # --- Make a DAT-based Parent/Clone index ---
# # >> Here we build a roms_pclone_index with info from the DAT file. 2 issues:
# # >> A) Redump DATs do not have cloneof information.
# # >> B) Also, it is at this point where a region custom parent may be chosen instead of
# # >> the default one.
# log_verb('Generating DAT-based Parent/Clone groups')
# pDialog(0, 'Building DAT-based Parent/Clone index ...')
# roms_pclone_index = audit_generate_DAT_PClone_index(roms, roms_nointro, unknown_ROMs_are_parents)
# pDialog(100)
# if __debug_progress_dialogs: time.sleep(0.5)
# --- Make a DAT-based Parent/Clone index ---
# >> For 0.9.7 only use the DAT to make the PClone groups. In 0.9.8 decouple the audit
# >> code from the PClone generation code.
log_verb('Generating DAT-based Parent/Clone groups')
self._updateProgress(0, 'Building DAT-based Parent/Clone index ...')
roms_pclone_index = audit_generate_DAT_PClone_index(roms, roms_nointro, unknown_ROMs_are_parents)
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
# --- Make a Clone/Parent index ---
# >> This is made exclusively from the Parent/Clone index
self._updateProgress(0, 'Building Clone/Parent index ...')
clone_parent_dic = {}
for parent_id in roms_pclone_index:
for clone_id in roms_pclone_index[parent_id]:
clone_parent_dic[clone_id] = parent_id
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
# --- Set ROMs pclone_status flag and update launcher statistics ---
self._updateProgress(0, 'Audit Step 4/4: Setting Parent/Clone status and cloneof fields...')
num_items = len(roms)
item_counter = 0
audit_parents = audit_clones = 0
for rom in roms:
rom_id = rom.get_id()
if rom_id in roms_pclone_index:
rom.set_pclone_status(PCLONE_STATUS_PARENT)
audit_parents += 1
else:
rom.set_clone(clone_parent_dic[rom_id])
rom.set_pclone_status(PCLONE_STATUS_CLONE)
audit_clones += 1
item_counter += 1
self._updateProgress((item_counter*100)/num_items)
if __debug_progress_dialogs: time.sleep(__debug_time_step)
self._updateProgress(100)
launcher.set_audit_stats(len(roms), audit_parents, audit_clones, audit_have, audit_miss, audit_unknown)
# --- Make a Parent only ROM list and save JSON ---
# >> This is to speed up rendering of launchers in 1G1R display mode
self._updateProgress(0, 'Building Parent/Clone index and Parent dictionary ...')
parent_roms = audit_generate_parent_ROMs_dic(roms, roms_pclone_index)
self._updateProgress(100)
if __debug_progress_dialogs: time.sleep(0.5)
# --- Save JSON databases ---
self._updateProgress(0, 'Saving NO-Intro/Redump JSON databases ...')
fs_write_JSON_file(ROMs_dir, launcher['roms_base_noext'] + '_index_PClone', roms_pclone_index)
self._updateProgress(30)
fs_write_JSON_file(ROMS_DIR, launcher['roms_base_noext'] + '_index_CParent', clone_parent_dic)
self._updateProgress(60)
fs_write_JSON_file(ROMS_DIR, launcher['roms_base_noext'] + '_parents', parent_roms)
self._updateProgress(100)
self._endProgressPhase()
# --- Update launcher number of ROMs ---
self.audit_have = audit_have
self.audit_miss = audit_miss
self.audit_unknown = audit_unknown
self.audit_total = len(roms)
self.audit_parents = audit_parents
self.audit_clones = audit_clones
# --- Report ---
log_info('********** No-Intro/Redump audit finished. Report ***********')
log_info('Have ROMs {0:6d}'.format(self.audit_have))
log_info('Miss ROMs {0:6d}'.format(self.audit_miss))
log_info('Unknown ROMs {0:6d}'.format(self.audit_unknown))
log_info('Total ROMs {0:6d}'.format(self.audit_total))
log_info('Parent ROMs {0:6d}'.format(self.audit_parents))
log_info('Clone ROMs {0:6d}'.format(self.audit_clones))
return True
#
# Resets the No-Intro status
# 1) Remove all ROMs which does not exist.
# 2) Set status of remaining ROMs to nointro_status = AUDIT_STATUS_NONE
#
def roms_reset_NoIntro_status_roms_reset_NoIntro_status(self, launcher, roms):
log_info('roms_reset_NoIntro_status() Launcher has {0} ROMs'.format(len(roms)))
if len(roms) < 1: return
# >> Step 1) Delete missing/dead ROMs
num_removed_roms = self._roms_delete_missing_ROMs(roms)
log_info('roms_reset_NoIntro_status() Removed {0} dead/missing ROMs'.format(num_removed_roms))
# >> Step 2) Set No-Intro status to AUDIT_STATUS_NONE and
# set PClone status to PCLONE_STATUS_NONE
log_info('roms_reset_NoIntro_status() Resetting No-Intro status of all ROMs to None')
for rom in roms:
rom.set_nointro_status(AUDIT_STATUS_NONE)
rom.set_pclone_status(PCLONE_STATUS_NONE)
log_info('roms_reset_NoIntro_status() Now launcher has {0} ROMs'.format(len(roms)))
# >> Step 3) Delete PClone index and Parent ROM list.
launcher.reset_parent_and_clone_roms()
# Deletes missing ROMs
#
def _roms_delete_missing_ROMs(self, roms):
num_removed_roms = 0
num_roms = len(roms)
log_info('_roms_delete_missing_ROMs() Launcher has {0} ROMs'.format(num_roms))
if num_roms > 0:
log_verb('_roms_delete_missing_ROMs() Starting dead items scan')
for rom in reversed(roms):
ROMFileName = rom.get_file()
if not ROMFileName:
log_debug('_roms_delete_missing_ROMs() Skip "{0}"'.format(rom.get_name()))
continue
log_debug('_roms_delete_missing_ROMs() Test "{0}"'.format(ROMFileName.getBase()))
# --- Remove missing ROMs ---
if not ROMFileName.exists():
log_debug('_roms_delete_missing_ROMs() RM "{0}"'.format(ROMFileName.getBase()))
roms.remove(rom)
num_removed_roms += 1
if num_removed_roms > 0:
log_info('_roms_delete_missing_ROMs() {0} dead ROMs removed successfully'.format(num_removed_roms))
else:
log_info('_roms_delete_missing_ROMs() No dead ROMs found.')
else:
log_info('_roms_delete_missing_ROMs() Launcher is empty. No dead ROM check.')
return num_removed_roms
# #################################################################################################
# #################################################################################################
# Gamestream
# #################################################################################################
# #################################################################################################
class GameStreamServer(object):
def __init__(self, host, certificates_path, debug_mode = False):
self.host = host
self.unique_id = random.getrandbits(16)
self.debug_mode = debug_mode
if certificates_path:
self.certificates_path = certificates_path
self.certificate_file_path = self.certificates_path.pjoin('nvidia.crt')
self.certificate_key_file_path = self.certificates_path.pjoin('nvidia.key')
else:
self.certificates_path = FileName('')
self.certificate_file_path = FileName('')
self.certificate_key_file_path = FileName('')
log_debug('GameStreamServer() Using certificate key file {}'.format(self.certificate_key_file_path.getPath()))
log_debug('GameStreamServer() Using certificate file {}'.format(self.certificate_file_path.getPath()))
self.pem_cert_data = None
self.key_cert_data = None
def _perform_server_request(self, end_point, useHttps=True, parameters = None):
if useHttps:
url = "https://{0}:47984/{1}?uniqueid={2}&uuid={3}".format(self.host, end_point, self.unique_id, uuid.uuid4().hex)
else:
url = "http://{0}:47989/{1}?uniqueid={2}&uuid={3}".format(self.host, end_point, self.unique_id, uuid.uuid4().hex)
if parameters:
for key, value in parameters.iteritems():
url = url + "&{0}={1}".format(key, value)
handler = HTTPSClientAuthHandler(self.certificate_key_file_path.getPath(), self.certificate_file_path.getPath())
page_data = net_get_URL_using_handler(url, handler)
if page_data is None:
return None
root = ET.fromstring(page_data)
if self.debug_mode:
log_debug(ET.tostring(root,encoding='utf8',method='xml'))
return root
def connect(self):
log_debug('Connecting to gamestream server {}'.format(self.host))
self.server_info = self._perform_server_request("serverinfo")
if not self.is_connected():
self.server_info = self._perform_server_request("serverinfo", False)
return self.is_connected()
def is_connected(self):
if self.server_info is None:
log_debug('No succesfull connection to the server has been made')
return False
if self.server_info.find('state') is None:
log_debug('Server state {0}'.format(self.server_info.attrib['status_code']))
else:
log_debug('Server state {0}'.format(self.server_info.find('state').text))
return self.server_info.attrib['status_code'] == '200'
def get_server_version(self):
appVersion = self.server_info.find('appversion')
return VersionNumber(appVersion.text)
def get_uniqueid(self):
uniqueid = self.server_info.find('uniqueid').text
return uniqueid
def get_hostname(self):
hostname = self.server_info.find('hostname').text
return hostname
def generatePincode(self):
i1 = random.randint(1, 9)
i2 = random.randint(1, 9)
i3 = random.randint(1, 9)
i4 = random.randint(1, 9)
return '{0}{1}{2}{3}'.format(i1, i2, i3, i4)
def is_paired(self):
if not self.is_connected():
log_warning('Connect first')
return False
pairStatus = self.server_info.find('PairStatus')
return pairStatus.text == '1'
def pairServer(self, pincode):
if not self.is_connected():
log_warning('Connect first')
return False
version = self.get_server_version()
log_info("Pairing with server generation: {0}".format(version.getFullString()))
majorVersion = version.getMajor()
if majorVersion >= 7:
# Gen 7+ uses SHA-256 hashing
hashAlgorithm = HashAlgorithm(256)
else:
# Prior to Gen 7, SHA-1 is used
hashAlgorithm = HashAlgorithm(1)
log_debug('Pin {0}'.format(pincode))
# Generate a salt for hashing the PIN
salt = randomBytes(16)
# Combine the salt and pin
saltAndPin = salt + bytearray(pincode, 'utf-8')
# Create an AES key from them
aes_cypher = AESCipher(saltAndPin, hashAlgorithm)
# get certificates ready
log_debug('Getting local certificate files')
client_certificate = self.getCertificateBytes()
client_key_certificate = self.getCertificateKeyBytes()
certificate_signature = getCertificateSignature(client_certificate)
# Start pairing with server
log_debug('Start pairing with server')
pairing_result = self._perform_server_request('pair', False, {
'devicename': 'ael',
'updateState': 1,
'phrase': 'getservercert',
'salt': binascii.hexlify(salt),
'clientcert': binascii.hexlify(client_certificate)
})
if pairing_result is None:
log_error('Failed to pair with server. No XML received.')
return False
isPaired = pairing_result.find('paired').text
if isPaired != '1':
log_error('Failed to pair with server. Server returned failed state.')
return False
server_cert_data = pairing_result.find('plaincert').text
if server_cert_data is None:
log_error('Failed to pair with server. A different pairing session might be in progress.')
return False
# Generate a random challenge and encrypt it with our AES key
challenge = randomBytes(16)
encrypted_challenge = aes_cypher.encryptToHex(challenge)
# Send the encrypted challenge to the server
log_debug('Sending encrypted challenge to the server')
pairing_challenge_result = self._perform_server_request('pair', False, {
'devicename': 'ael',
'updateState': 1,
'clientchallenge': encrypted_challenge })
if pairing_challenge_result is None:
log_error('Failed to pair with server. No XML received.')
return False
isPaired = pairing_challenge_result.find('paired').text
if isPaired != '1':
log_error('Failed to pair with server. Server returned failed state.')
self._perform_server_request('unpair', False)
return False
# Decode the server's response and subsequent challenge
log_debug('Decoding server\'s response and challenge response')
server_challenge_hex = pairing_challenge_result.find('challengeresponse').text
server_challenge_bytes = bytearray.fromhex(server_challenge_hex)
server_challenge_decrypted = aes_cypher.decrypt(server_challenge_bytes)
server_challenge_firstbytes = server_challenge_decrypted[:hashAlgorithm.digest_size()]
server_challenge_lastbytes = server_challenge_decrypted[hashAlgorithm.digest_size():hashAlgorithm.digest_size()+16]
# Using another 16 bytes secret, compute a challenge response hash using the secret, our cert sig, and the challenge
client_secret = randomBytes(16)
challenge_response = server_challenge_lastbytes + certificate_signature + client_secret
challenge_response_hashed = hashAlgorithm.hash(challenge_response)
challenge_response_encrypted= aes_cypher.encryptToHex(challenge_response_hashed)
# Send the challenge response to the server
log_debug('Sending the challenge response to the server')
pairing_secret_response = self._perform_server_request('pair', False, {
'devicename': 'ael',
'updateState': 1,
'serverchallengeresp': challenge_response_encrypted })
if pairing_secret_response is None:
log_error('Failed to pair with server. No XML received.')
return False
isPaired = pairing_secret_response.find('paired').text
if isPaired != '1':
log_error('Failed to pair with server. Server returned failed state.')
self._perform_server_request('unpair', False)
return False
# Get the server's signed secret
log_debug('Verifiying server signature')
server_secret_response = bytearray.fromhex(pairing_secret_response.find('pairingsecret').text)
server_secret = server_secret_response[:16]
server_signature = server_secret_response[16:272]
server_cert = server_cert_data.decode('hex')
is_verified = verify_signature(str(server_secret), server_signature, server_cert)
if not is_verified:
# Looks like a MITM, Cancel the pairing process
log_error('Failed to verify signature. (MITM warning)')
self._perform_server_request('unpair', False)
return False
# Ensure the server challenge matched what we expected (aka the PIN was correct)
log_debug('Confirming PIN with entered value')
server_cert_signature = getCertificateSignature(server_cert)
server_secret_combination = challenge + server_cert_signature + server_secret
server_secret_hashed = hashAlgorithm.hash(server_secret_combination)
if server_secret_hashed != server_challenge_firstbytes:
# Probably got the wrong PIN
log_error("Wrong PIN entered")
self._perform_server_request('unpair', False)
return False
log_debug('Pin is confirmed')
# Send the server our signed secret
log_debug('Sending server our signed secret')
signed_client_secret = sign_data(client_secret, client_key_certificate)
client_pairing_secret = client_secret + signed_client_secret
client_pairing_secret_response = self._perform_server_request('pair', False, {
'devicename': 'ael',
'updateState': 1,
'clientpairingsecret': binascii.hexlify(client_pairing_secret)})
isPaired = client_pairing_secret_response.find('paired').text
if isPaired != '1':
log_error('Failed to pair with server. Server returned failed state.')
self._perform_server_request('unpair', False)
return False
# Do the initial challenge over https
log_debug('Initial challenge again')
pair_challenge_response = self._perform_server_request('pair', True, {
'devicename': 'ael',
'updateState': 1,
'phrase': 'pairchallenge'})
isPaired = pair_challenge_response.find('paired').text
if isPaired != '1':
log_error('Failed to pair with server. Server returned failed state.')
self._perform_server_request('unpair', False)
return False
return True
def getApps(self):
apps_response = self._perform_server_request('applist', True)
if apps_response is None:
kodi_notify_error('Failure to connect to GameStream server')
return []
appnodes = apps_response.findall('App')
apps = []
for appnode in appnodes:
app = {}
for appnode_attr in appnode:
if len(list(appnode_attr)) > 1:
continue
xml_text = appnode_attr.text if appnode_attr.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = appnode_attr.tag
app[xml_tag] = xml_text
apps.append(app)
return apps
def getCertificateBytes(self):
if self.pem_cert_data:
return self.pem_cert_data
if not self.certificate_file_path.exists():
log_info('Client certificate file does not exist. Creating')
create_self_signed_cert("NVIDIA GameStream Client", self.certificate_file_path, self.certificate_key_file_path)
log_info('Loading client certificate data from {0}'.format(self.certificate_file_path.getPath()))
self.pem_cert_data = self.certificate_file_path.loadFileToStr('ascii')
return str(self.pem_cert_data)
def getCertificateKeyBytes(self):
if self.key_cert_data:
return self.key_cert_data
if not self.certificate_key_file_path.exists():
log_info('Client certificate file does not exist. Creating')
create_self_signed_cert("NVIDIA GameStream Client", self.certificate_file_path, self.certificate_key_file_path)
log_info('Loading client certificate data from {0}'.format(self.certificate_key_file_path.getPath()))
self.key_cert_data = self.certificate_key_file_path.loadFileToStr('ascii')
return str(self.key_cert_data)
def validate_certificates(self):
if self.certificate_file_path.exists() and self.certificate_key_file_path.exists():
log_debug('validate_certificates(): Certificate files exist. Done')
return True
certificate_files = self.certificates_path.scanFilesInPath('*.crt')
key_files = self.certificates_path.scanFilesInPath('*.key')
if len(certificate_files) < 1:
log_warning('validate_certificates(): No .crt files found at given location.')
return False
if not self.certificate_file_path.exists():
log_debug('validate_certificates(): Copying .crt file to nvidia.crt')
certificate_files[0].copy(self.certificate_file_path)
if len(key_files) < 1:
log_warning('validate_certificates(): No .key files found at given location.')
return False
if not self.certificate_key_file_path.exists():
log_debug('validate_certificates(): Copying .key file to nvidia.key')
key_files[0].copy(certificate_key_file_path)
return True
@staticmethod
def try_to_resolve_path_to_nvidia_certificates():
home = expanduser("~")
homePath = FileName(home)
possiblePath = homePath.pjoin('Moonlight/')
if possiblePath.exists():
return possiblePath.getPath()
possiblePath = homePath.pjoin('Limelight/')
if possiblePath.exists():
return possiblePath.getPath()
return homePath.getPath()
| Wintermute0110/advanced-emulator-launcher | resources/objects.py | Python | gpl-2.0 | 305,546 |
"""
Support for RFXtrx sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.rfxtrx/
"""
import logging
import voluptuous as vol
import homeassistant.components.rfxtrx as rfxtrx
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from homeassistant.components.rfxtrx import (
ATTR_AUTOMATIC_ADD, ATTR_NAME, ATTR_FIREEVENT,
CONF_DEVICES, ATTR_DATA_TYPE, DATA_TYPES, ATTR_ENTITY_ID)
DEPENDENCIES = ['rfxtrx']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = vol.Schema({
vol.Required("platform"): rfxtrx.DOMAIN,
vol.Optional(CONF_DEVICES, default={}): vol.All(dict, rfxtrx.valid_sensor),
vol.Optional(ATTR_AUTOMATIC_ADD, default=False): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the RFXtrx platform."""
# pylint: disable=too-many-locals
from RFXtrx import SensorEvent
sensors = []
for packet_id, entity_info in config['devices'].items():
event = rfxtrx.get_rfx_object(packet_id)
device_id = "sensor_" + slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
continue
_LOGGER.info("Add %s rfxtrx.sensor", entity_info[ATTR_NAME])
sub_sensors = {}
data_types = entity_info[ATTR_DATA_TYPE]
if len(data_types) == 0:
data_type = "Unknown"
for data_type in DATA_TYPES:
if data_type in event.values:
data_types = [data_type]
break
for _data_type in data_types:
new_sensor = RfxtrxSensor(None, entity_info[ATTR_NAME],
_data_type, entity_info[ATTR_FIREEVENT])
sensors.append(new_sensor)
sub_sensors[_data_type] = new_sensor
rfxtrx.RFX_DEVICES[device_id] = sub_sensors
add_devices_callback(sensors)
def sensor_update(event):
"""Callback for sensor updates from the RFXtrx gateway."""
if not isinstance(event, SensorEvent):
return
device_id = "sensor_" + slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
sensors = rfxtrx.RFX_DEVICES[device_id]
for key in sensors:
sensor = sensors[key]
sensor.event = event
# Fire event
if sensors[key].should_fire_event:
sensor.hass.bus.fire(
"signal_received", {
ATTR_ENTITY_ID:
sensors[key].entity_id,
}
)
return
# Add entity if not exist and the automatic_add is True
if not config[ATTR_AUTOMATIC_ADD]:
return
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
_LOGGER.info("Automatic add rfxtrx.sensor: %s",
device_id)
data_type = "Unknown"
for _data_type in DATA_TYPES:
if _data_type in event.values:
data_type = _data_type
break
new_sensor = RfxtrxSensor(event, pkt_id, data_type)
sub_sensors = {}
sub_sensors[new_sensor.data_type] = new_sensor
rfxtrx.RFX_DEVICES[device_id] = sub_sensors
add_devices_callback([new_sensor])
if sensor_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(sensor_update)
class RfxtrxSensor(Entity):
"""Representation of a RFXtrx sensor."""
def __init__(self, event, name, data_type, should_fire_event=False):
"""Initialize the sensor."""
self.event = event
self._name = name
self.should_fire_event = should_fire_event
if data_type not in DATA_TYPES:
data_type = "Unknown"
self.data_type = data_type
self._unit_of_measurement = DATA_TYPES[data_type]
def __str__(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self.event:
return self.event.values[self.data_type]
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.event:
return self.event.values
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
| devdelay/home-assistant | homeassistant/components/sensor/rfxtrx.py | Python | mit | 4,779 |
'''Compares a simulation of a ball sliding down a plane to the analytical solution.'''
import os
import sys
import argparse
import math
import h5py
parser = argparse.ArgumentParser(description='Compares a simulation of a ball sliding down a plane to the analytical solution.')
parser.add_argument('-i', metavar='input_hdf5_file', type=str, nargs=1, help='input HDF5 file name', required=True)
parser.add_argument('-t', metavar='tolerances', type=float, nargs=4, help='tolerances for x, y, vx, and vy', required=True)
args = parser.parse_args()
if not all([t >= 0.0 for t in args.t]):
sys.exit('Error, all tolerances must be non-negative.')
input_file_name = args.i[0]
if not os.path.isfile(input_file_name):
sys.exit('Error, input file \'' + input_file_name + '\' does not exist.')
print 'Validating:', input_file_name
try:
with h5py.File(input_file_name, 'r') as h5_file:
q = h5_file['q'][:]
v = h5_file['v'][:]
pln_nrmls = h5_file['static_plane_normals'][:]
iteration = h5_file['iteration'][0, 0]
timestep = h5_file['timestep'][0, 0]
except IOError as io_exception:
sys.exit('HDF5 IO Error: ' + io_exception.message)
except KeyError as key_exception:
sys.exit('HDF5 Key Error: ' + key_exception.message)
time = iteration * timestep
# Note: Gravity and friction are hardcoded, for now
gravity_magnitude = 10.0
mu = 0.5
# Compute the angle of the plane
theta = abs(math.atan2(pln_nrmls[0, 0], pln_nrmls[1, 0]))
# Compute the analytical position of the ball
dplane = 0.5 * time * time * (-gravity_magnitude * math.sin(theta) + gravity_magnitude * mu * math.cos(theta))
dx = dplane * math.cos(theta)
if pln_nrmls[0, 0] > 0:
dx *= -1.0
dy = dplane * math.sin(theta)
# Compute the analytical velocity of the ball
vplane = time * (-gravity_magnitude * math.sin(theta) + gravity_magnitude * mu * math.cos(theta))
vx = vplane * math.cos(theta)
if pln_nrmls[0, 0] > 0:
vx *= -1.0
vy = vplane * math.sin(theta)
succeeded = True
if abs(q[0] - dx) > args.t[0]:
print 'q[0] residual', abs(q[0] - dx)
print 'First q component incorect'
succeeded = False
if abs(q[1] - dy) > args.t[1]:
print 'q[1] residual', abs(q[1] - dy)
print 'Second q component incorect'
succeeded = False
if abs(vx - v[0]) > args.t[2]:
print 'v[0] residual', abs(vx - v[0])
print 'First v component incorrect'
succeeded = False
if abs(vy - v[1]) > args.t[3]:
print 'v[1] residual', abs(vy - v[1])
print 'Second v component incorrect'
succeeded = False
if not succeeded:
sys.exit(1)
sys.exit(0)
| breannansmith/scisim | assets/ball2d/shell_scripts/sliding_particle_test.py | Python | apache-2.0 | 2,585 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Example setting the locale using environment variable(s).
"""
#end_pymotw_header
import locale
import os
import pprint
import codecs
import sys
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
# Default settings based on the user's environment.
locale.setlocale(locale.LC_ALL, '')
print 'Environment settings:'
for env_name in [ 'LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE' ]:
print '\t%s = %s' % (env_name, os.environ.get(env_name, ''))
# What is the locale?
print
print 'Locale from environment:', locale.getlocale()
template = """
Numeric formatting:
Decimal point : "%(decimal_point)s"
Grouping positions : %(grouping)s
Thousands separator: "%(thousands_sep)s"
Monetary formatting:
International currency symbol : "%(int_curr_symbol)r"
Local currency symbol : %(currency_symbol)r (%(currency_symbol_u)s)
Symbol precedes positive value : %(p_cs_precedes)s
Symbol precedes negative value : %(n_cs_precedes)s
Decimal point : "%(mon_decimal_point)s"
Digits in fractional values : %(frac_digits)s
Digits in fractional values, international: %(int_frac_digits)s
Grouping positions : %(mon_grouping)s
Thousands separator : "%(mon_thousands_sep)s"
Positive sign : "%(positive_sign)s"
Positive sign position : %(p_sign_posn)s
Negative sign : "%(negative_sign)s"
Negative sign position : %(n_sign_posn)s
"""
sign_positions = {
0 : 'Surrounded by parentheses',
1 : 'Before value and symbol',
2 : 'After value and symbol',
3 : 'Before value',
4 : 'After value',
locale.CHAR_MAX : 'Unspecified',
}
info = {}
info.update(locale.localeconv())
info['p_sign_posn'] = sign_positions[info['p_sign_posn']]
info['n_sign_posn'] = sign_positions[info['n_sign_posn']]
# convert the currency symbol to unicode
info['currency_symbol_u'] = info['currency_symbol'].decode('utf-8')
print (template % info)
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/locale/locale_env_example.py | Python | gpl-3.0 | 2,201 |
"""
Created on Fri Apr 7 21:49:16 2017
@author: cpkmanchee
"""
import numpy as np
import os
import csv
import pickle
import warnings
from beamtools.file_formats import file_formats
from beamtools.common import DataObj
__all__ = ['import_data_file', 'list_atr','list_filetypes']
def list_filetypes():
'''Display all filetypes in dictionary
'''
[print(k,v) for k,v in file_formats['filetype'].items()]
return
def list_atr(given_filetype):
'''List the attributes of resultant object from data import.
'''
filetype = filetype_lookup(file_formats,given_filetype.lower())
column_labels = file_formats.get(filetype).get('column_labels')
print(column_labels)
return
def filetype_lookup(file_dict, given_type):
'''Identify file type for given input. Only first found match is returned.
'''
for k,v in file_dict.items():
if given_type in file_dict.get(k).get('alias'):
return(k)
raise RuntimeError('File type lookup failed. File type "%s" not found' %(given_type))
return(None)
def import_data_file(file, given_filetype):
'''Imports data of given filetype.
Data returned as object with appropriate attributes
'''
filetype = filetype_lookup(file_formats,given_filetype.lower())
header_lines = file_formats.get(filetype).get('header_lines')
delimiter = file_formats.get(filetype).get('delimiter')
column_labels = file_formats.get(filetype).get('column_labels')
#initialize header and output dictionary
header=[]
output={}
[output.update({c:[]}) for c in column_labels]
with open(file, 'r') as f:
#extract header information only
data = csv.reader(f, delimiter = delimiter)
for i in range(header_lines):
header.append(data.__next__())
#write rest of data to dictionary, keys are column_labels, values = data
[[(output[c].append(row[c_ind].strip())) for c_ind,c in enumerate(column_labels)] for row in data]
#convert data to float
v = []
for c in output.keys():
try:
output[c] = np.asarray(output[c],dtype=np.float)
v.append(~np.isnan(output[c]))
except ValueError:
warnings.warn('Unable to convert to float')
try:
output[c] = np.asarray(output[c])
except ValueError:
warning.warn('Unable to cast as array')
#nan correction
try:
v = np.asarray(v)
if not (v.size == 0):
valid = np.prod(v,0).astype(bool)
for c in output.keys():
output[c] = output[c][valid]
except:
warnings.warn('Nan processing failure')
output.update({'header': header})
output.update({'filetype': filetype})
output_obj = DataObj(output)
return output_obj | kikimaroca/beamtools | beamtools/import_data_file.py | Python | mit | 2,861 |
"""
WSGI config for sporteasy_breaking_mvc project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sporteasy_breaking_mvc.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| sporteasy/sporteasy_breaking_mvc | sporteasy_breaking_mvc/wsgi.py | Python | mit | 1,166 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0002_auto_20150908_2259'),
]
operations = [
migrations.CreateModel(
name='Members',
fields=[
('id', models.CharField(max_length=10, serialize=False, primary_key=True)),
('last_name', models.CharField(max_length=50)),
('first_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
],
),
migrations.DeleteModel(
name='MembersAdmin',
),
]
| RocketSoftware/PythonBetaProject | Django/src/members/migrations/0003_auto_20150908_2304.py | Python | mit | 715 |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'http://instagram\.com/p/(?P<id>.*?)/'
_TEST = {
'url': 'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'uploader_id': 'naomipq',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
webpage, 'uploader id', fatal=False)
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
fatal=False)
return {
'id': video_id,
'url': self._og_search_video_url(webpage, secure=False),
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': uploader_id,
'description': desc,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'http://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('username')
entries = []
page_count = 0
media_url = 'http://instagram.com/%s/media' % uploader_id
while True:
page = self._download_json(
media_url, uploader_id,
note='Downloading page %d ' % (page_count + 1),
)
page_count += 1
for it in page['items']:
if it.get('type') != 'video':
continue
like_count = int_or_none(it.get('likes', {}).get('count'))
user = it.get('user', {})
formats = [{
'format_id': k,
'height': v.get('height'),
'width': v.get('width'),
'url': v['url'],
} for k, v in it['videos'].items()]
self._sort_formats(formats)
thumbnails_el = it.get('images', {})
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
title = it.get('caption', {}).get('text', it['id'])
entries.append({
'id': it['id'],
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'webpage_url': it.get('link'),
'uploader': user.get('full_name'),
'uploader_id': user.get('username'),
'like_count': like_count,
'timestamp': int_or_none(it.get('created_time')),
})
if not page['items']:
break
max_id = page['items'][-1]['id']
media_url = (
'http://instagram.com/%s/media?max_id=%s' % (
uploader_id, max_id))
return {
'_type': 'playlist',
'entries': entries,
'id': uploader_id,
'title': uploader_id,
}
| MiLk/youtube-dl | youtube_dl/extractor/instagram.py | Python | unlicense | 3,554 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class PythonRunIntegrationTest(PantsRunIntegrationTest):
testproject = 'testprojects/src/python/interpreter_selection'
def test_run_26(self):
self._maybe_run_version('2.6')
def test_run_27(self):
self._maybe_run_version('2.7')
def test_run_27_and_then_26(self):
with temporary_dir() as interpreters_cache:
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['run2', '{}:echo_interpreter_version_2.7'.format(self.testproject)],
config=pants_ini_config
)
self.assert_success(pants_run_27)
pants_run_26 = self.run_pants(
command=['run2', '{}:echo_interpreter_version_2.6'.format(self.testproject),
'--pyprep-interpreter-constraints=CPython>=2.6,<3',
'--pyprep-interpreter-constraints=CPython>=3.3'],
config=pants_ini_config
)
self.assert_success(pants_run_26)
def test_die(self):
command = ['run2',
'{}:die'.format(self.testproject),
'--pyprep-interpreter-constraints=CPython>=2.6,<3',
'--pyprep-interpreter-constraints=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
assert pants_run.returncode == 57
def _maybe_run_version(self, version):
if self.has_python_version(version):
print('Found python {}. Testing running on it.'.format(version))
echo = self._run_echo_version(version)
v = echo.split('.') # E.g., 2.6.8.
self.assertTrue(len(v) > 2, 'Not a valid version string: {}'.format(v))
self.assertEquals(version, '{}.{}'.format(v[0], v[1]))
else:
print('No python {} found. Skipping.'.format(version))
self.skipTest('No python {} on system'.format(version))
def _run_echo_version(self, version):
binary_name = 'echo_interpreter_version_{}'.format(version)
binary_target = '{}:{}'.format(self.testproject, binary_name)
# Build a pex.
# Avoid some known-to-choke-on interpreters.
command = ['run2',
binary_target,
'--pyprep-interpreter-constraints=CPython>=2.6,<3',
'--pyprep-interpreter-constraints=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
return pants_run.stdout_data.rstrip().split('\n')[-1]
| mateor/pants | tests/python/pants_test/backend/python/tasks2/test_python_run_integration.py | Python | apache-2.0 | 2,791 |
__author__ = 'phili'
| SNET-Entrance/Entrance-UM | src/viewer/__init__.py | Python | apache-2.0 | 21 |
import cv2
cameraCapture = cv2.VideoCapture(0)
fps = 30 # an assumption
size = (int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
videoWriter = cv2.VideoWriter(
'MyOutputVid.avi', cv2.cv.CV_FOURCC('I', '4', '2', '0'), fps, size)
success, frame = cameraCapture.read()
numFramesRemaining = 10 * fps - 1
while success and numFramesRemaining > 0:
videoWriter.write(frame)
success, frame = cameraCapture.read()
numFramesRemaining -= 1 | Masesi/DeepLearning | ComputerVision/camera_frames.py | Python | mit | 499 |
from pulp.client.commands import options
from pulp.client.commands.repo import cudl, importer_config
from pulp.client.commands.repo.importer_config import ImporterConfigMixin
from pulp.client.extensions.core import TAG_SUCCESS
from pulp.common.compat import json
from pulp.common.plugins import importer_constants as constants
from pulp_win.common import ids
from pulp_win.extensions.admin import repo_options
from pulp_win.extensions.admin import repo_create_update
from ...testbase import PulpClientTests
class RepoCreateCommandTests(PulpClientTests):
def setUp(self):
super(RepoCreateCommandTests, self).setUp()
self.options_bundle = importer_config.OptionsBundle()
def test_create_structure(self):
command = repo_create_update.PkgRepoCreateCommand(self.context)
self.assertTrue(isinstance(command, ImporterConfigMixin))
# Ensure the required option groups
found_group_names = set([o.name for o in command.option_groups])
self.assertTrue(repo_options.NAME_PUBLISHING in found_group_names)
# Ensure the correct method is wired up
self.assertEqual(command.method, command.run)
# Ensure the correct metadata
self.assertEqual(command.name, 'create')
self.assertEqual(command.description, cudl.DESC_CREATE)
def test_run(self):
# Setup
data = {
options.OPTION_REPO_ID.keyword: 'test-repo',
options.OPTION_NAME.keyword: 'Test Name',
options.OPTION_DESCRIPTION.keyword: 'Test Description',
options.OPTION_NOTES.keyword: {'a': 'a'},
self.options_bundle.opt_feed.keyword: 'http://localhost',
self.options_bundle.opt_validate.keyword: True,
self.options_bundle.opt_remove_missing.keyword: True,
repo_options.OPT_SKIP.keyword: [ids.TYPE_ID_MSM],
repo_options.OPT_RELATIVE_URL.keyword: '/repo',
repo_options.OPT_SERVE_HTTP.keyword: True,
repo_options.OPT_SERVE_HTTPS.keyword: True,
}
self.server_mock.request.return_value = 201, {}
# Test
command = repo_create_update.PkgRepoCreateCommand(self.context)
command.run(**data)
# Verify
self.assertEqual(1, self.server_mock.request.call_count)
body = self.server_mock.request.call_args[0][2]
body = json.loads(body)
self.assertEqual(body['display_name'], 'Test Name')
self.assertEqual(body['description'], 'Test Description')
self.assertEqual(body['notes'], {'_repo-type': 'win-repo', 'a': 'a'})
self.assertEqual(ids.TYPE_ID_IMPORTER_WIN, body['importer_type_id'])
importer_config = body['importer_config']
self.assertEqual(importer_config[constants.KEY_FEED], 'http://localhost')
self.assertEqual(importer_config[repo_create_update.CONFIG_KEY_SKIP], [ids.TYPE_ID_MSM])
self.assertEqual(importer_config[constants.KEY_UNITS_REMOVE_MISSING], True)
# The API will be changing to be a dict for each distributor, not a
# list. This code will have to change to look up the parts by key
# instead of index.
yum_distributor = body['distributors'][0]
self.assertEqual(ids.TYPE_ID_DISTRIBUTOR_WIN, yum_distributor['distributor_type_id'])
self.assertEqual(True, yum_distributor['auto_publish'])
self.assertEqual(ids.TYPE_ID_DISTRIBUTOR_WIN, yum_distributor['distributor_id'])
yum_config = yum_distributor['distributor_config']
self.assertEqual(yum_config['relative_url'], '/repo')
self.assertEqual(yum_config['http'], True)
self.assertEqual(yum_config['https'], True)
self.assertEqual(yum_config['skip'], [ids.TYPE_ID_MSM])
self.assertEqual([TAG_SUCCESS], self.prompt.get_write_tags())
def test_run_through_cli(self):
# Setup
self.server_mock.request.return_value = 201, {}
# Test
command = repo_create_update.PkgRepoCreateCommand(self.context)
self.cli.add_command(command)
cmd = ["create", "--repo-id", "r", "--validate", "true"]
self.cli.run(cmd)
# Verify
self.assertEqual(1, self.server_mock.request.call_count)
body = self.server_mock.request.call_args[0][2]
body = json.loads(body)
self.assertEqual(body['id'], 'r')
self.assertEqual(body['importer_config'][constants.KEY_VALIDATE],
True) # not the string "true"
dconfig = body['distributors'][0]['distributor_config']
def test_process_relative_url_with_feed(self):
# Setup
repo_id = 'feed-repo'
importer_config = {constants.KEY_FEED: 'http://localhost/foo/bar/baz'}
distributor_config = {} # will be populated in this call
command = repo_create_update.PkgRepoCreateCommand(self.context)
# Test
command.process_relative_url(repo_id, importer_config, distributor_config)
# Verify
self.assertTrue('relative_url' in distributor_config)
self.assertEqual(distributor_config['relative_url'], '/foo/bar/baz')
def test_process_relative_url_no_feed(self):
# Setup
repo_id = 'no-feed-repo'
importer_config = {}
distributor_config = {} # will be populated in this call
command = repo_create_update.PkgRepoCreateCommand(self.context)
# Test
command.process_relative_url(repo_id, importer_config, distributor_config)
# Verify
self.assertTrue('relative_url' in distributor_config)
self.assertEqual(distributor_config['relative_url'], repo_id)
def test_process_relative_url_specified(self):
# Setup
repo_id = 'specified'
importer_config = {}
distributor_config = {'relative_url': 'wombat'}
command = repo_create_update.PkgRepoCreateCommand(self.context)
# Test
command.process_relative_url(repo_id, importer_config, distributor_config)
# Verify
self.assertTrue('relative_url' in distributor_config)
self.assertEqual(distributor_config['relative_url'], 'wombat')
def test_process_yum_distributor_serve_protocol_defaults(self):
# Setup
distributor_config = {} # will be populated in this call
command = repo_create_update.PkgRepoCreateCommand(self.context)
# Test
command.process_distributor_serve_protocol(distributor_config)
# Verify
self.assertEqual(distributor_config['http'], False)
self.assertEqual(distributor_config['https'], True)
def test_process_distributor_serve_protocol_new_values(self):
# Setup
distributor_config = {'http': True, 'https': False}
command = repo_create_update.PkgRepoCreateCommand(self.context)
# Test
command.process_distributor_serve_protocol(distributor_config)
# Verify
self.assertEqual(distributor_config['http'], True)
self.assertEqual(distributor_config['https'], False)
class RepoUpdateCommandTests(PulpClientTests):
def setUp(self):
super(RepoUpdateCommandTests, self).setUp()
self.options_bundle = importer_config.OptionsBundle()
def test_create_structure(self):
command = repo_create_update.PkgRepoUpdateCommand(self.context)
self.assertTrue(isinstance(command, ImporterConfigMixin))
# Ensure the required option groups
found_group_names = set([o.name for o in command.option_groups])
self.assertTrue(repo_options.NAME_PUBLISHING in found_group_names)
# Ensure the correct method is wired up
self.assertEqual(command.method, command.run)
# Ensure the correct metadata
self.assertEqual(command.name, 'update')
self.assertEqual(command.description, cudl.DESC_UPDATE)
def test_run_202(self):
# Setup
data = {
options.OPTION_REPO_ID.keyword: 'test-repo',
options.OPTION_NAME.keyword: 'Test Name',
options.OPTION_DESCRIPTION.keyword: 'Test Description',
options.OPTION_NOTES.keyword: {'b': 'b'},
self.options_bundle.opt_feed.keyword: 'http://localhost',
repo_options.OPT_SERVE_HTTP.keyword: True,
repo_options.OPT_SERVE_HTTPS.keyword: True,
repo_options.OPT_SKIP.keyword: [ids.TYPE_ID_MSM],
}
self.server_mock.request.return_value = 202, {}
# Test
command = repo_create_update.PkgRepoUpdateCommand(self.context)
command.run(**data)
# Verify that things at least didn't blow up, which they were for BZ 1096931
self.assertEqual(1, self.server_mock.request.call_count)
def test_run(self):
# Setup
data = {
options.OPTION_REPO_ID.keyword: 'test-repo',
options.OPTION_NAME.keyword: 'Test Name',
options.OPTION_DESCRIPTION.keyword: 'Test Description',
options.OPTION_NOTES.keyword: {'b': 'b'},
self.options_bundle.opt_feed.keyword: 'http://localhost',
repo_options.OPT_SERVE_HTTP.keyword: True,
repo_options.OPT_SERVE_HTTPS.keyword: True,
repo_options.OPT_SKIP.keyword: [ids.TYPE_ID_MSM],
}
self.server_mock.request.return_value = 200, {}
# Test
command = repo_create_update.PkgRepoUpdateCommand(self.context)
command.run(**data)
# Verify
self.assertEqual(1, self.server_mock.request.call_count)
body = self.server_mock.request.call_args[0][2]
body = json.loads(body)
delta = body['delta']
self.assertEqual(delta['display_name'], 'Test Name')
self.assertEqual(delta['description'], 'Test Description')
self.assertEqual(delta['notes'], {'b': 'b'})
yum_imp_config = body['importer_config']
self.assertEqual(yum_imp_config[constants.KEY_FEED], 'http://localhost')
self.assertEqual(yum_imp_config[repo_create_update.CONFIG_KEY_SKIP], [ids.TYPE_ID_MSM])
yum_dist_config = body['distributor_configs'][ids.TYPE_ID_DISTRIBUTOR_WIN]
self.assertEqual(yum_dist_config['http'], True)
self.assertEqual(yum_dist_config['https'], True)
self.assertEqual(yum_dist_config['skip'], [ids.TYPE_ID_MSM])
def test_run_through_cli(self):
"""
See the note in test_run_through_cli under the create tests for
more info.
"""
# Setup
self.server_mock.request.return_value = 201, {}
# Test
command = repo_create_update.PkgRepoUpdateCommand(self.context)
self.cli.add_command(command)
cmd = ("update --repo-id r --validate true")
self.cli.run(cmd.split())
# Verify
self.assertEqual(1, self.server_mock.request.call_count)
body = self.server_mock.request.call_args[0][2]
body = json.loads(body)
self.assertEqual(body['importer_config'][constants.KEY_VALIDATE],
True) # not the string "true"
def test_remove_skip_types(self):
# Setup
self.server_mock.request.return_value = 201, {}
# Test
command = repo_create_update.PkgRepoUpdateCommand(self.context)
self.cli.add_command(command)
cmd = ("update --repo-id r --skip")
self.cli.run(cmd.split() + [''])
# Verify
self.assertEqual(1, self.server_mock.request.call_count)
body = self.server_mock.request.call_args[0][2]
body = json.loads(body)
self.assertEqual(body['importer_config']['type_skip_list'], None)
self.assertEqual(body['distributor_configs'][ids.TYPE_ID_DISTRIBUTOR_WIN]['skip'], None)
| lsjostro/pulp_win | extensions_admin/test/unit/extensions/admin/test_repo_create_update.py | Python | gpl-2.0 | 11,724 |
# test plugin
from __future__ import absolute_import
from bot.pluginDespatch import Plugin
from django.conf import settings
from django.contrib.auth.models import User
from bot.logos_decorators import login_required
from .models import BotNetGroups
import logging
logger = logging.getLogger(__name__)
logging.config.dictConfig(settings.LOGGING)
class BotNetPlugin(Plugin):
plugin = ("botnet", "BotNet Plugin")
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.relay_groups_init()
self.commands = None
# self.commands = (\
# (r'activate', self.activate, "Activate BotNet"),
# )
def relay_groups_init(self):
self.relay_groups=[]
for group in BotNetGroups.objects.all():
relay_group = {'channels':[], 'factories':[]}
for room in group.botnetrooms_set.all():
network = room.network
room = room.room
relay_group['channels'].append((network, room))
for f in self.factory.factories:
if f.network == network:
if f not in relay_group['factories']:
relay_group['factories'].append(f)
self.relay_groups.append(relay_group)
def nicklist(self, channel):
# Just choose first matching group at this point
for group in self.relay_groups:
if (self.network, channel.lower()) in group['channels']:
userlist = []
for f in group['factories']:
for network, chan in group['channels']:
# Skip the network and channel the nick is already on
# (They already have the irc nick list for their own
# network)
if self.network == network and chan == channel.lower():
continue
if network == f.network:
nicks = f.conn.nicks_db.get_room_nicks(channel)
for nick1 in nicks:
yield (f.network, chan, nick1)
break # Just do for first matching group
def privmsg(self, user, channel, message):
nick,_ = user.split('!')
username = self.get_auth().get_username(nick)
for group in self.relay_groups:
if (self.network, channel.lower()) in group['channels']:
for f in group['factories']:
for net, ch in group['channels']:
if f.network == net and (net != self.network or ch != channel.lower()):
msg = "{}/{}/{} ** {}".format(self.network, channel, nick, message)
f.conn.say(str(ch), msg)
pass
def userJoined(self, nick, channel):
""" Notify nick of who is in room when they join room """
for network, chan, nnick in self.nicklist(channel):
print (network, chan, nnick)
msg = "{}/{}/{}".format(network, chan, nnick)
self.notice(nick, msg)
def userLeft(self, nick, channel):
pass
| kiwiheretic/logos-v2 | botnet/bot_plugin.py | Python | apache-2.0 | 3,218 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cx_setup
~~~~~~~~
Converts example scripts to Windows executables.
"""
import glob
import sys
from cx_Freeze import setup, Executable
if not len(sys.argv[1:]):
sys.argv.append('install_exe')
install_exe_options = {'install_dir': './Autocad tools'}
build_exe_options = {'excludes': ['bz2', '_hashlib', 'unittest', 'tests']}
exclude_scripts = [r'examples\__init__.py', r'examples\dev_get_table_info.py']
scripts_to_build = [name for name in glob.glob('examples/*.py') if
name not in exclude_scripts]
setup(name="Autocad tools",
version="0.1",
description="Generate cable list, get drawing names etc.",
options=dict(install_exe=install_exe_options,
build_exe=build_exe_options),
executables=[Executable(script=script) for script in scripts_to_build]) | reclosedev/pyautocad | cx_setup.py | Python | bsd-2-clause | 905 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import inspect
from warnings import warn
import logging
def _hush_llvm():
# Necessary for current stable release 0.11.
# Not necessary (and unimplemented) in numba >= 0.12 (February 2014)
# See http://stackoverflow.com/a/20663852/1221924
try:
import numba.codegen.debug
llvmlogger = logging.getLogger('numba.codegen.debug')
llvmlogger.setLevel(logging.INFO)
except ImportError:
pass
ENABLE_NUMBA_ON_IMPORT = True
_registered_functions = list() # functions that can be numba-compiled
NUMBA_AVAILABLE = False
try:
import numba
except ImportError:
message = ("To use numba-accelerated variants of core "
"functions, you must install numba.")
else:
v = numba.__version__
major, minor, micro = v.split('.')
if major == '0' and minor == '12' and micro == '0':
# Importing numba code will take forever. Disable numba.
message = ("Trackpy does not support numba 0.12.0. "
"Version {0} is currently installed. Trackpy will run "
"with numba disabled. Please downgrade numba to version "
"0.11, or update to latest version.".format(v))
warn(message)
else:
NUMBA_AVAILABLE = True
_hush_llvm()
class RegisteredFunction(object):
"Enable toggling between original function and numba-compiled one."
def __init__(self, func, fallback=None, autojit_kw=None):
self.func = func
# This covers a Python 2/3 change not covered by six
try:
self.func_name = func.__name__
except AttributeError:
self.func_name = func.func_name
module_name = inspect.getmoduleinfo(
six.get_function_globals(func)['__file__']).name
module_name = '.'.join(['trackpy', module_name])
self.module_name = module_name
self.autojit_kw = autojit_kw
if fallback is not None:
self.ordinary = fallback
else:
self.ordinary = func
@property
def compiled(self):
# Compile it if this is the first time.
if (not hasattr(self, '_compiled')) and NUMBA_AVAILABLE:
if self.autojit_kw is not None:
self._compiled = numba.autojit(**self.autojit_kw)(self.func)
else:
self._compiled = numba.autojit(self.func)
return self._compiled
def point_to_compiled_func(self):
setattr(sys.modules[self.module_name], self.func_name, self.compiled)
def point_to_ordinary_func(self):
setattr(sys.modules[self.module_name], self.func_name, self.ordinary)
def try_numba_autojit(func=None, **kw):
"""Wrapper for numba.autojit() that treats the function as pure Python if numba is missing.
Usage is as with autojit(): Either as a bare decorator (no parentheses), or with keyword
arguments.
The resulting compiled numba function can subsequently be turned on or off with
enable_numba() and disable_numba(). It will be on by default."""
def return_decorator(func):
# Register the function with a global list of numba-enabled functions.
f = RegisteredFunction(func, autojit_kw=kw)
_registered_functions.append(f)
if ENABLE_NUMBA_ON_IMPORT and NUMBA_AVAILABLE:
# Overwrite the function's reference with a numba-compiled function.
# This can be undone by calling disable_numba()
return f.compiled
else:
return f.ordinary
if func is None:
return return_decorator
else:
return return_decorator(func)
def disable_numba():
"Do not use numba-accelerated functions, even if numba is available."
for f in _registered_functions:
f.point_to_ordinary_func()
def enable_numba():
"Use numba-accelerated variants of core functions."
if NUMBA_AVAILABLE:
for f in _registered_functions:
f.point_to_compiled_func()
else:
raise ImportError(message)
| daniorerio/trackpy | trackpy/try_numba.py | Python | bsd-3-clause | 4,135 |
from flask import Flask, render_template, request, redirect, Response, url_for, session, abort
import time, os, json, base64, hmac, urllib, random, string, datetime
from hashlib import sha1
from ConfigParser import SafeConfigParser
from logging import Formatter
import requests
import boto3
application = Flask(__name__)
##
## Set debugging and logging
# application.debug = True
application.debug = False
if application.debug is not True:
import logging
from logging.handlers import RotatingFileHandler
loglevel = logging.INFO
# loglevel = logging.DEBUG
logFormatStr = '%(asctime)s %(levelname)s: %(message)s'
# if loglevel == logging.DEBUG:
# logFormatStr += ' [in %(pathname)s:%(lineno)d]'
logFilename = 'filedrop.log'
logging.basicConfig(format = logFormatStr, filename = logFilename, level=loglevel)
application.logger.info("Logging started.")
##
## Read configuration
config = SafeConfigParser()
config_file_path = os.path.join (os.path.dirname(__file__),'filedrop-config.ini')
application.logger.info("Reading config from: {}".format(config_file_path))
config.read ( config_file_path )
application.secret_key = config.get('flask','secret_key')
##
## Process Flask routes
@application.route("/reCAPTCHA", methods=['GET', 'POST'])
def reCAPTCHA():
application.logger.debug("in reCAPTCHA")
if request.method == 'POST':
application.logger.debug("checking capcha.")
r = requests.post("https://www.google.com/recaptcha/api/siteverify",
data = {"secret":config.get('reCAPTCHA','secret_key'), "response":request.form['g-recaptcha-response']})
if 'success' in r.json() and r.json()['success']:
session['reCAPTCHA'] = True
session['s3_numsigs'] = 0
application.logger.debug("capcha successful.")
return redirect(url_for('index'))
if 'reCAPTCHA' not in session or not session['reCAPTCHA']:
application.logger.debug("reCAPTCHA: 'reCAPTCHA' not in session.")
return (render_template('reCAPTCHA.html', reCAPTCHA_site_key = config.get('reCAPTCHA','site_key')))
application.logger.debug("reCAPTCHA: 'reCAPTCHA' IS in session.")
return (redirect(url_for('index')))
@application.route("/", methods=['GET'])
def index():
application.logger.debug("in index")
# resp = []
# resp.append("<pre>")
# for key in os.environ.keys():
# resp.append ("%30s %s \n" % (key, os.environ[key]))
# resp.append("</pre>")
if 'reCAPTCHA' not in session or not session['reCAPTCHA']:
application.logger.debug("index: 'reCAPTCHA' not in session.")
return (redirect(url_for('reCAPTCHA')))
application.logger.debug("index: not a robot")
return (render_template('index.html'))
# Listen for GET requests to yourdomain.com/sign_s3/
@application.route('/sign_s3/', methods=['GET', 'POST'])
def sign_s3():
application.logger.debug("in sign_s3")
if 'reCAPTCHA' not in session or not session['reCAPTCHA']:
application.logger.debug("sign_s3: 'reCAPTCHA' not in session.")
abort (401)
if session['s3_numsigs'] > int(config.get('flask','max_session_sigs')):
application.logger.debug("sign_s3: max_session_sigs exceeded")
session.pop('reCAPTCHA', None)
session.pop('s3_numsigs', None)
abort (401)
application.logger.debug("sign_s3: not a robot: {}/{} sigs in this session".format (session['s3_numsigs'],config.get('flask','max_session_sigs')))
application.logger.info("sign_s3: request remote addr: {}".format (request.remote_addr))
folder = "{} {}".format (str(request.remote_addr), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# Get the application credentials and set a policy for the S3 bucket
sts = boto3.client(
'sts',
aws_access_key_id=config.get('S3', 'aws_access_key'),
aws_secret_access_key=config.get('S3', 'aws_secret_access_key'),
region_name=config.get('S3', 'region')
)
policy = json.dumps({ "Version":"2012-10-17",
"Statement" : [{
"Effect" : "Allow",
"Action" : "s3:*",
"Resource" : ["arn:aws:s3:::{}/{}/*".format(config.get('S3', 'bucket'), folder)]
}]
})
# Get a federation token (temporary credentials based on application credentials + policy)
credentials = sts.get_federation_token(
Name='filedrop',
Policy=policy,
DurationSeconds= 60 * 60,
)
application.logger.debug("sign_s3: sts credentials")
application.logger.debug(" AccessKeyId = {}".format (credentials['Credentials']['AccessKeyId']))
# application.logger.debug(" SessionToken = {}".format (credentials['Credentials']['SessionToken']))
# application.logger.debug(" SecretAccessKey = {}".format (credentials['Credentials']['SecretAccessKey']))
application.logger.debug("Bucket = {}".format (config.get('S3', 'bucket')))
application.logger.debug("Folder = {}".format (folder))
# Send the federation token back to the caller:
content = json.dumps({
'AccessKeyId': credentials['Credentials']['AccessKeyId'],
'SessionToken': credentials['Credentials']['SessionToken'],
'SecretAccessKey': credentials['Credentials']['SecretAccessKey'],
'Region': config.get('S3', 'region'),
'Bucket': config.get('S3', 'bucket'),
'Folder': folder,
})
if 's3_numsigs' in session:
session['s3_numsigs'] += 1
else:
session['s3_numsigs'] = 1
return (content)
@application.errorhandler(Exception)
def internal_error(error):
application.logger.error(error)
return (repr(error))
# return render_template('500.html', error)
if __name__ == "__main__":
application.run(host='0.0.0.0')
| igg/filedrop | filedrop/filedrop.py | Python | lgpl-2.1 | 5,365 |
#coding:UTF-8
#Lib
#
#[Beautiful Soup 4]
#(https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html)
#[Requests]
#(http://docs.python-requests.org/en/master/)
#[lxml]
#(http://lxml.de/)
#[selenium]
#(http://selenium-python.readthedocs.io/index.html)
import os
import bs4
import urllib
import requests
from selenium import webdriver
class UrlCorrector:
def __init__(self):
pass
def __init__(self, url : str):
self.set_default(url)
def set_default(self, url : str):
scheme, netloc, *_3 = urllib.parse.urlparse(url)
self.__scheme__ = scheme
self.__netloc__ = netloc
def url(self, url : str):
scheme, netloc, *_3 = urllib.parse.urlparse(url)
if not scheme: scheme = self.__scheme__
if not netloc: netloc = self.__netloc__
url = urllib.parse.urlunparse((scheme, netloc, *_3))
return url
def res(self, url : str):
scheme, netloc, path, *_4 = urllib.parse.urlparse(url)
if not scheme: scheme = self.__scheme__
if not netloc: netloc = self.__netloc__
link = urllib.parse.urlunparse((scheme, netloc, path, *_4))
name = path[path.rfind('/') + 1:]
type = name[name.rfind('.'):]
return (link, name, type)
class Site:
url = ""
payload = {}
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'
}
if __name__ == "__main__":
sogou = Site()
sogou.url = r"http://pic.sogou.com/pics"
sogou.payload = { "query": "lena" }
baidu = Site()
baidu.url = r"https://image.baidu.com/search/index"
baidu.payload = { "tn": "baiduimage", "word" : "lena"}
test = Site()
test.url = r"http://www.bilibili.com"
dir = os.path.join("crawler")
site = test
with requests.session() as sess:
sess.params .update(site.payload)
sess.headers.update(site.headers)
try:
resp = sess.get(site.url, timeout=4.)
except requests.ReadTimeout:
print("Access: timeout")
exit()
if resp.status_code != 200:
print("Access:", resp.status_code)
exit()
else:
print("Access:", resp.url, "\n")
soup = bs4.BeautifulSoup(resp.text, "lxml")
imgs = soup.find_all("img")
if not imgs:
print("No Images")
exit()
if not os.path.exists(dir):
os.makedirs(dir)
msg = "GET [{0:^6}]: {1}\n" + 14*" " + "{2}"
corr = UrlCorrector(site.url)
for img in imgs:
if not img.has_attr("src") : continue
link, name, ext = corr.res(img["src"])
if not link or not name:
print(msg.format("Failed", link, name))
continue
try:
resp = sess.get(link, timeout=4.)
except requests.ReadTimeout:
print(msg.format("Failed", link, "timeout"))
continue
if resp.status_code != 200:
print(msg.format("Failed", link, resp.status_code))
continue
name = os.path.join(dir, name)
with open(name, 'wb') as f:
f.write(resp.content)
print(msg.format("Done", link, name))
# note:
#
# [Best way to check if a list is empty]
# (https://stackoverflow.com/a/53522)
# [AttributeError: 'module' object has no attribute 'webdriver']
# (https://stackoverflow.com/q/37801823)
| wiryls/HomeworkCollectionOfMYLS | 2017.DigitalImageProcessing/src/module/crawler/old/crawler.py | Python | mit | 3,534 |
import os
import json
import asyncio
from unittest import mock
import pytest
from tests import utils as test_utils
from boto.glacier.exceptions import UnexpectedHTTPResponseError
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.osfstorage import settings
from waterbutler.providers.osfstorage.tasks import utils
from waterbutler.providers.osfstorage.tasks import backup
from waterbutler.providers.osfstorage.tasks import parity
from waterbutler.providers.osfstorage.tasks import exceptions
from waterbutler.providers.osfstorage import settings as osf_settings
@pytest.fixture
def credentials():
return {
}
@pytest.fixture
def settings():
return {
'storage': {
'provider': 'cloud',
'container': 'butt',
},
}
@pytest.fixture
def mock_provider(monkeypatch):
mock_provider = test_utils.MockProvider1({}, {}, {})
mock_provider.copy = test_utils.MockCoroutine()
mock_provider.move = test_utils.MockCoroutine()
mock_provider.delete = test_utils.MockCoroutine()
mock_provider.upload = test_utils.MockCoroutine()
mock_provider.download = test_utils.MockCoroutine()
mock_provider.metadata = test_utils.MockCoroutine()
mock_make_provider = mock.Mock(return_value=mock_provider)
monkeypatch.setattr(parity, 'make_provider', mock_make_provider)
return mock_provider
class TestParityTask:
def test_main_delays(self, monkeypatch, event_loop, credentials, settings):
task = mock.Mock()
monkeypatch.setattr(parity, '_parity_create_files', task)
fut = parity.main('The Best', credentials, settings)
event_loop.run_until_complete(fut)
task.delay.assert_called_once_with('The Best', credentials, settings)
def test_creates_upload_futures(self, monkeypatch, event_loop, credentials, settings):
paths = range(10)
future = asyncio.Future()
future.set_result(None)
mock_upload_parity = mock.Mock()
mock_upload_parity.return_value = future
mock_create_parity = mock.Mock(return_value=paths)
monkeypatch.setattr(parity, '_upload_parity', mock_upload_parity)
monkeypatch.setattr(parity.utils, 'create_parity_files', mock_create_parity)
parity._parity_create_files('Triangles', credentials, settings)
mock_create_parity.assert_called_once_with(
os.path.join(osf_settings.FILE_PATH_COMPLETE, 'Triangles'),
redundancy=osf_settings.PARITY_REDUNDANCY,
)
for num in reversed(range(10)):
mock_upload_parity.assert_any_call(num, credentials, settings)
@pytest.mark.asyncio
async def test_uploads(self, monkeypatch, tmpdir, mock_provider):
tempfile = tmpdir.join('test.file')
stream = parity.streams.FileStreamReader(tempfile)
monkeypatch.setattr(parity.streams, 'FileStreamReader', lambda x: stream)
tempfile.write('foo')
path = tempfile.strpath
await parity._upload_parity(path, {}, {})
assert mock_provider.upload.called
mock_provider.upload.assert_called_once_with(
stream,
WaterButlerPath('/' + os.path.split(path)[1])
)
def test_exceptions_get_raised(self, monkeypatch):
mock_sp_call = mock.Mock(return_value=7)
monkeypatch.setattr(utils.subprocess, 'call', mock_sp_call)
path = 'foo/bar/baz'
args = ['par2', 'c', '-r5', 'baz.par2', path]
with pytest.raises(exceptions.ParchiveError) as e:
utils.create_parity_files(path)
assert e.value == '{0} failed with code {1}'.format(' '.join(args), 7)
with open(os.devnull, 'wb') as DEVNULL:
mock_sp_call.assert_called_once_with(args, stdout=DEVNULL, stderr=DEVNULL)
def test_skip_empty_files(self, monkeypatch):
mock_stat = mock.Mock(return_value=mock.Mock(st_size=0))
mock_sp_call = mock.Mock()
monkeypatch.setattr(os, 'stat', mock_stat)
monkeypatch.setattr(utils.subprocess, 'call', mock_sp_call)
path = 'foo/bar/baz'
paths = utils.create_parity_files(path)
assert paths == []
assert not mock_sp_call.called
class TestBackUpTask:
def test_main_delays(self, monkeypatch, event_loop):
task = mock.Mock()
monkeypatch.setattr(backup, '_push_file_archive', task)
fut = backup.main('The Best', 0, None, {}, {})
event_loop.run_until_complete(fut)
task.delay.assert_called_once_with('The Best', 0, None, {}, {})
def test_tries_upload(self, monkeypatch):
mock_vault = mock.Mock()
mock_vault.name = 'ThreePoint'
mock_vault.upload_archive.return_value = 3
mock_get_vault = mock.Mock()
mock_get_vault.return_value = mock_vault
mock_complete = mock.Mock()
monkeypatch.setattr(backup, 'get_vault', mock_get_vault)
monkeypatch.setattr(backup, '_push_archive_complete', mock_complete)
backup._push_file_archive('Triangles', None, None, {}, {})
mock_vault.upload_archive.assert_called_once_with('Triangles', description='Triangles')
def test_calls_complete(self, monkeypatch, credentials, settings):
mock_vault = mock.Mock()
mock_complete = mock.Mock()
mock_vault.name = 'ThreePoint'
mock_vault.upload_archive.return_value = 3
mock_get_vault = mock.Mock()
mock_get_vault.return_value = mock_vault
monkeypatch.setattr(backup, 'get_vault', mock_get_vault)
monkeypatch.setattr(backup, '_push_archive_complete', mock_complete)
backup._push_file_archive('Triangles', 0, None, credentials, settings)
mock_complete.delay.assert_called_once_with(
0,
None,
{
'vault': 'ThreePoint',
'archive': 3
},
)
def test_upload_error_empty_file(self, monkeypatch):
mock_vault = mock.Mock()
mock_vault.name = 'ThreePoint'
mock_response = mock.Mock()
mock_response.status = 400
mock_response.read.return_value = json.dumps({
'status': 400,
'message': 'Invalid Content-Length: 0',
}).encode('utf-8')
error = UnexpectedHTTPResponseError(200, mock_response)
mock_vault.upload_archive.side_effect = error
mock_get_vault = mock.Mock()
mock_get_vault.return_value = mock_vault
mock_complete = mock.Mock()
monkeypatch.setattr(backup, 'get_vault', mock_get_vault)
monkeypatch.setattr(backup, '_push_archive_complete', mock_complete)
backup._push_file_archive('Triangles', None, None, {}, {})
mock_vault.upload_archive.assert_called_once_with('Triangles', description='Triangles')
assert not mock_complete.called
def test_upload_error(self, monkeypatch):
mock_vault = mock.Mock()
mock_vault.name = 'ThreePoint'
mock_response = mock.Mock()
mock_response.status = 400
mock_response.read.return_value = json.dumps({
'status': 400,
'message': 'Jean Valjean means nothing now',
}).encode('utf-8')
error = UnexpectedHTTPResponseError(200, mock_response)
mock_vault.upload_archive.side_effect = error
mock_get_vault = mock.Mock()
mock_get_vault.return_value = mock_vault
mock_complete = mock.Mock()
monkeypatch.setattr(backup, 'get_vault', mock_get_vault)
monkeypatch.setattr(backup, '_push_archive_complete', mock_complete)
with pytest.raises(UnexpectedHTTPResponseError):
backup._push_file_archive('Triangles', None, None, {}, {})
assert not mock_complete.called
| Johnetordoff/waterbutler | tests/providers/osfstorage/test_tasks.py | Python | apache-2.0 | 7,757 |
from openupgradelib import openupgrade
from odoo import SUPERUSER_ID, api, tools
from odoo.modules.module import get_module_resource
def _load_demo(env):
def load_file(env, module, *args):
tools.convert_file(
env.cr,
"account_wallet",
get_module_resource(module, *args),
{},
"init",
False,
"demo",
)
load_file(env, "account_wallet", "demo", "ir_sequence.xml")
load_file(env, "account_wallet", "demo", "account_journal.xml")
load_file(env, "account_wallet", "demo", "account_account.xml")
load_file(env, "account_wallet", "demo", "account_wallet_type.xml")
# TODO: remove when migrating to next version
def _rename_cagnotte(env):
"""
As module has changed its name, we migrate columns here
:param env: [description]
:type env: [type]
"""
if not openupgrade.table_exists(env.cr, "cagnotte_type"):
return
tables = [
("cagnotte_type", "account_wallet_type"),
("account_cagnotte", "account_wallet"),
]
openupgrade.rename_tables(env.cr, tables)
columns = {
"account_wallet": [
("cagnotte_type_id", "wallet_type_id"),
("solde_cagnotte", "balance"),
],
# Field on account_move coming from former account_invoice should
# be migrated manually - Keep for record
# "account_move": [
# ("cagnotte_type_id", "account_wallet_type_id"),
# ],
"account_move_line": [
("account_cagnotte_id", "account_wallet_id"),
],
}
openupgrade.rename_columns(env.cr, columns)
def pre_init_hook(cr):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
_rename_cagnotte(env)
def post_init_hook(cr, registry):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
wallet = env["ir.module.module"].search([("name", "=", "account_wallet")])
l10n_generic = env["ir.module.module"].search(
[("name", "=", "l10n_generic_coa"), ("state", "=", "installed")]
)
if l10n_generic and wallet.demo:
_load_demo(env)
| acsone/acsone-addons | account_wallet/hooks.py | Python | agpl-3.0 | 2,218 |
__author__ = 'calvin'
import os
import time
import datetime
from anonymoususage import AnonymousUsageTracker
usage_tracker = AnonymousUsageTracker(config='./anonymoususage.cfg',
uuid="ASDFGH",
filepath='./test.db',
submit_interval_s=datetime.timedelta(seconds=10),
check_interval_s=datetime.timedelta(minutes=2))
usage_tracker.track_statistic('grids')
usage_tracker.track_statistic('lines')
while 1:
usage_tracker['grids'] += 1
time.sleep(2) | lobocv/anonymoususage | test.py | Python | mit | 605 |
name1_0_1_0_1_0_0 = None
name1_0_1_0_1_0_1 = None
name1_0_1_0_1_0_2 = None
name1_0_1_0_1_0_3 = None
name1_0_1_0_1_0_4 = None | asedunov/intellij-community | python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_1/_pkg1_0_1_0/_pkg1_0_1_0_1/_mod1_0_1_0_1_0.py | Python | apache-2.0 | 128 |
# Generated by Django 1.9.13 on 2017-05-11 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publisher', '0047_auto_20170413_1010'),
]
operations = [
migrations.AlterField(
model_name='historicalorganizationuserrole',
name='role',
field=models.CharField(choices=[('partner_manager', 'Partner Manager'), ('project_coordinator', 'Project Coordinator'), ('marketing_reviewer', 'Marketing Reviewer'), ('publisher', 'Publisher')], max_length=63, verbose_name='Organization Role'),
),
migrations.AlterField(
model_name='organizationuserrole',
name='role',
field=models.CharField(choices=[('partner_manager', 'Partner Manager'), ('project_coordinator', 'Project Coordinator'), ('marketing_reviewer', 'Marketing Reviewer'), ('publisher', 'Publisher')], max_length=63, verbose_name='Organization Role'),
),
]
| edx/course-discovery | course_discovery/apps/publisher/migrations/0048_auto_20170511_1059.py | Python | agpl-3.0 | 989 |
"""SCons.Tool.sgiar
Tool-specific initialization for SGI ar (library archive). If CC
exists, static libraries should be built with it, so the prelinker has
a chance to resolve C++ template instantiations.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgiar.py 4369 2009/09/19 15:58:29 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-ar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Tool/sgiar.py | Python | isc | 2,570 |
import pytest
import dcos_installer.config
from dcos_installer import cli
def test_default_arg_parser():
parser = cli.parse_args([])
assert parser.verbose is False
assert parser.port == 9000
assert parser.action is None
def test_set_arg_parser():
parser = cli.parse_args(['-v', '-p 12345'])
assert parser.verbose is True
assert parser.port == 12345
parser = cli.parse_args(['--web'])
assert parser.action == 'web'
parser = cli.parse_args(['--genconf'])
assert parser.action == 'genconf'
parser = cli.parse_args(['--preflight'])
assert parser.action == 'preflight'
parser = cli.parse_args(['--postflight'])
assert parser.action == 'postflight'
parser = cli.parse_args(['--deploy'])
assert parser.action == 'deploy'
parser = cli.parse_args(['--validate-config'])
assert parser.action == 'validate-config'
parser = cli.parse_args(['--uninstall'])
assert parser.action == 'uninstall'
parser = cli.parse_args(['--hash-password', 'foo'])
assert parser.hash_password == 'foo'
assert parser.action is None
# Can't do two at once
with pytest.raises(SystemExit):
cli.parse_args(['--validate', '--hash-password', 'foo'])
def test_stringify_config():
stringify = dcos_installer.config.stringify_configuration
# Basic cases pass right through
assert dict() == stringify(dict())
assert {"foo": "bar"} == stringify({"foo": "bar"})
assert {"a": "b", "c": "d"} == stringify({"a": "b", "c": "d"})
# booleans are converted to lower case true / false
assert {"a": "true"} == stringify({"a": True})
assert {"a": "false"} == stringify({"a": False})
assert {"a": "b", "c": "false"} == stringify({"a": "b", "c": False})
# integers are made into strings
assert {"a": "1"} == stringify({"a": 1})
assert {"a": "4123"} == stringify({"a": 4123})
assert {"a": "b", "c": "9999"} == stringify({"a": "b", "c": 9999})
# Dict and list are converted to JSON
assert {"a": '["b"]'} == stringify({"a": ['b']})
assert {"a": '["b\\"a"]'} == stringify({"a": ['b"a']})
assert {"a": '[1]'} == stringify({"a": [1]})
assert {"a": '[1, 2, 3, 4]'} == stringify({"a": [1, 2, 3, 4]})
assert {"a": '[true, false]'} == stringify({"a": [True, False]})
assert {"a": '{"b": "c"}'} == stringify({"a": {"b": "c"}})
assert {"a": '{"b": 1}'} == stringify({"a": {"b": 1}})
assert {"a": '{"b": true}'} == stringify({"a": {"b": True}})
assert {"a": '{"b": null}'} == stringify({"a": {"b": None}})
# Random types produce an error.
with pytest.raises(Exception):
stringify({"a": set()})
# All the handled types at once
assert {
"a": "b",
"c": "true",
"d": "1",
"e": "[1]",
"f": '{"g": "h"}'
} == stringify({"a": "b", "c": True, "d": 1, "e": [1], "f": {"g": "h"}})
| movicha/dcos | pytest/test_installer_init.py | Python | apache-2.0 | 2,886 |
#!/usr/bin/env python
import re
with open("sicp.texi") as sicp:
r = sicp.read()
r = re.sub("@dots{}", "...", r)
r = re.sub("@section (.*?)\n", "<h2> \\1 </h2> \n", r)
r = re.sub("@subsection (.*?)\n", "<h3> \\1 </h3> \n", r)
r = re.sub("@subsubheading (.*?)\n", "<h4> \\1 </h4> \n", r)
r = re.sub("@example", "<pre>", r)
r = re.sub("@end example", "</pre>", r)
r = re.sub("@anchor{(.*?)}", "", r, flags=re.DOTALL)
r = re.sub("@ref{(.*?)}", "\\1", r, flags=re.DOTALL)
r = re.sub("@newterm{(.*?)}", "<tt> \\1 </tt> \n", r, flags=re.DOTALL)
r = re.sub("@code{(.*?)}", "<tt> \\1 </tt>", r, flags=re.DOTALL)
r = re.sub("@var{(.*?)}", "\\1", r, flags=re.DOTALL)
r = re.sub("@i{(.*?)}", "\\1", r, flags=re.DOTALL)
r = re.sub("@emph{(.*?)}", "<em>\\1</em>", r, flags=re.DOTALL)
r = re.sub("@r{(.*?)}", "\\1", r, flags=re.DOTALL)
r = re.sub("@strong{(.*?)}", "<b>\\1</b>", r, flags=re.DOTALL)
r = re.sub("@footnote{(.*?)}", "<div class='footnote'> \\1 </div>", r, flags=re.DOTALL)
r = re.sub("@lisp", '<div id="">', r)
r = re.sub("@end lisp", "</div>\n<script>\nprompt();\n</script>", r)
r = re.sub("@noindent", "", r)
r = re.sub("@quotation", '<div class="exercise">', r)
r = re.sub("@end quotation", '</div>', r)
r = re.sub("@itemize @bullet", "<ul>", r)
r = re.sub("@end itemize", "</ul>", r)
r = re.sub("@item", "<li>", r)
r = re.sub("@enumerate .*?\n", "<ul>", r)
r = re.sub("@end enumerate", "</ul>", r)
r = re.sub("@node(.*?)\n", "", r)
print r
| sliz1/isicp | sicp_raw/texi.py | Python | mit | 1,550 |
#! /usr/bin/env python
"""
edits mothur taxonomy summary file
transfers last name that is not "unclassified" or "uncultured" to "unclassified" or "uncultured" assignment
make sure that the file has default sorting (by rankID)
Copyright:
tax_summary_edit edits mothur taxonomy summary file
Copyright (C) 2016 William Brazelton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
infilename = sys.argv[1]
outfilename = infilename + '.renamed.txt'
outfile = open(outfilename,'a')
infile = open(infilename)
for line in infile:
if "unclassified" in line:
columns = line.split('\t')
tax = columns[2]
newtax = tax + ' ' + lasttax
outfile.write(columns[0])
outfile.write('\t')
outfile.write(columns[1])
outfile.write('\t')
outfile.write(newtax)
for tab in columns[3:]:
outfile.write('\t')
outfile.write(tab)
elif "uncultured" in line:
columns = line.split('\t')
tax = columns[2]
newtax = tax + ' ' + lasttax
outfile.write(columns[0])
outfile.write('\t')
outfile.write(columns[1])
outfile.write('\t')
outfile.write(newtax)
for tab in columns[3:]:
outfile.write('\t')
outfile.write(tab)
else:
outfile.write(line)
columns = line.split('\t')
lasttax = columns[2]
infile.close()
outfile.close()
| Brazelton-Lab/lab_scripts | 16S/tax_summary_edit.py | Python | gpl-2.0 | 1,875 |
import unittest
from ddt import ddt, data, file_data, unpack
from test.mycode import larger_than_two, has_three_elements, is_a_greeting
class Mylist(list):
pass
def annotated(a, b):
r = Mylist([a, b])
setattr(r, "__name__", "test_%d_greater_than_%d" % (a, b))
return r
@ddt
class FooTestCase(unittest.TestCase):
def test_undecorated(self):
self.assertTrue(larger_than_two(24))
@data(3, 4, 12, 23)
def test_larger_than_two(self, value):
self.assertTrue(larger_than_two(value))
@data(1, -3, 2, 0)
def test_not_larger_than_two(self, value):
self.assertFalse(larger_than_two(value))
@data(annotated(2, 1), annotated(10, 5))
def test_greater(self, value):
a, b = value
self.assertGreater(a, b)
@file_data('test_data_dict.json')
def test_file_data_dict(self, value):
self.assertTrue(has_three_elements(value))
@file_data('test_data_list.json')
def test_file_data_list(self, value):
self.assertTrue(is_a_greeting(value))
@data((3, 2), (4, 3), (5, 3))
@unpack
def test_tuples_extracted_into_arguments(self, first_value, second_value):
self.assertTrue(first_value > second_value)
@data([3, 2], [4, 3], [5, 3])
@unpack
def test_list_extracted_into_arguments(self, first_value, second_value):
self.assertTrue(first_value > second_value)
@unpack
@data({'first': 1, 'second': 3, 'third': 2},
{'first': 4, 'second': 6, 'third': 5})
def test_dicts_extracted_into_kwargs(self, first, second, third):
self.assertTrue(first < third < second)
@data(u'ascii', u'non-ascii-\N{SNOWMAN}')
def test_unicode(self, value):
self.assertIn(value, (u'ascii', u'non-ascii-\N{SNOWMAN}'))
| edx/ddt | test/test_example.py | Python | mit | 1,773 |
import pylab as pl
import matplotlib.cm as cm
import numpy.ma as ma
import numpy as np
# utility functions
from mpl_toolkits.axes_grid1 import make_axes_locatable
def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
"""Wrapper around pl.imshow"""
if cmap is None:
cmap = cm.jet
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
pl.colorbar(im, cax=cax)
def make_mosaic(imgs, nrows, ncols, border=1):
"""
Given a set of images with all the same shape, makes a
mosaic with nrows and ncols
"""
# nimgs = imgs.shape[2]
# imshape = imgs.shape[1:]
nimgs = imgs.shape[2]
imshape = imgs.shape[:2]
mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
ncols * imshape[1] + (ncols - 1) * border),
dtype=np.float32)
paddedh = imshape[0] + border
paddedw = imshape[1] + border
for i in xrange(nimgs):
row = int(np.floor(i / ncols))
col = i % ncols
mosaic[row * paddedh:row * paddedh + imshape[0],
col * paddedw:col * paddedw + imshape[1]] = imgs[:, :, i]
return mosaic
def make_activation_mosaic(imgs, nrows, ncols, border=1):
"""
Given a set of images with all the same shape, makes a
mosaic with nrows and ncols
"""
# nimgs = imgs.shape[2]
# imshape = imgs.shape[1:]
nimgs = imgs.shape[3]
imshape = imgs.shape[1:3]
mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
ncols * imshape[1] + (ncols - 1) * border),
dtype=np.float32)
paddedh = imshape[0] + border
paddedw = imshape[1] + border
for i in xrange(nimgs):
row = int(np.floor(i / ncols))
col = i % ncols
print('-------------')
print row * paddedh
print row * paddedh + imshape[0]
print col * paddedw
print col * paddedw + imshape[1]
print imgs[0, :, :, i].shape
mosaic[row * paddedh:row * paddedh + imshape[0],
col * paddedw:col * paddedw + imshape[1]] = imgs[0,:, :, i]
return mosaic | charterscruz/auto-encoder-tests | visualization/visualization_utils.py | Python | mit | 2,348 |
#!/usr/bin/env python3
import os
import repl
import sys
from vm import VM
if __name__ == '__main__':
if len(sys.argv) == 1:
repl.run()
filepath = sys.argv[1]
if not os.path.exists(filepath):
raise FileNotFoundError(filepath)
else:
vm = VM()
vm.evalFile(filepath)
| hachibu/setack | setack.py | Python | mit | 317 |
# -*- coding: utf-8 -*-
import jft
fin = open("chichi.csv", "r")
fout = open("food.json", "w")
fout.write('[')
preb = ""
prer = ""
for line in fin.readlines():
while line[-1] == '\n' or line[-1] == '\r':
line = line[0:-1]
line = line.replace('\\', '\\\\')
arr = line.split('\t')
photographer = arr[0]
name = arr[1]
taste = arr[2]
restaurant = arr[3]
if (restaurant == '哈乐餐厅'):
building = "哈乐"
else:
building = restaurant[0:6]
restaurant = restaurant[6:]
price = arr[4]
description = arr[5]
url = arr[6]
genre = ""
if (preb == ''):
fout.write('{'
+ '"building":"' + building + '",'
+ '"restaurants":[{'
'"restaurant":"' + restaurant + '",'
+ '"foods":['
)
else:
if (prer != restaurant):
fout.write(']}')
if (preb != building):
fout.write(']},'
+ '{'
+ '"building":"' + building + '",'
+ '"restaurants":['
)
else:
fout.write(',')
fout.write('{' +
'"restaurant":"' + restaurant + '",'
+ '"foods":['
)
else:
fout.write(',')
fout.write('{'
+ '"name":"' + jft.j2f("utf8", "utf8", name) + '",'
+ '"url":"' + url + '",'
+ '"genre":"' + genre + '",'
+ '"price":"¥' + price + '",'
+ '"taste":"' + taste + '",'
+ '"description":"' + description + '",'
+ '"photographer":"' + photographer + '"'
+ '}'
)
preb = building
prer = restaurant
fout.write(']}]}]')
| YangMann/ChiChiApp_Android | json/get.py | Python | gpl-3.0 | 1,848 |
from unittest import TestCase
class TestApp(TestCase):
def test_make_app(self):
from pyramid.router import Router
from brouz.app import make_app
global_settings = {}
settings = {'brouz.db_url': 'sqlite://',
'brouz.secret': 'secret'}
wsgi_app = make_app(global_settings, **settings)
self.assertIsInstance(wsgi_app, Router)
| dbaty/Brouz | brouz/tests/test_app.py | Python | bsd-3-clause | 397 |
# Author: Philippe Katz <philippe.katz@gmail.com>,
# Sylvain Takerkart <Sylvain.Takerkart@incm.cnrs-mrs.fr>
# License: BSD Style.
# Frame0_pipe. This pipeline is a component of blankbased_pipeline. It is used to choice in GUI between frame0_division and frame0_subtraction.
# Makes the link between blankbased_pipeline and those processes.
from neuroProcesses import *
name=_t_('Frame0 Pipe')
category = _t_('Blank Based Denoising')
userLevel=3 # Never visible
# Parameters
signature=Signature(
"data", ReadDiskItem( 'OI 2D+t Image' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ), # Input image path
"frame0", String(), # The datas of a time serie used to subtract the global time serie (time1,time2)
"f0_data", WriteDiskItem( 'OI BkSD' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ),
) # The path of the image after frame0 subtraction
def selected( self,subproc ):
"""Controls the selection between the two process
Parameters
----------
subroc : bool
The choice box value
"""
if subproc._selected: # If frame0_subtraction is selected
self.f0_data=self._executionNode.frame0_subtraction.f0_data # Initialization of pipeline's f0_data with frame0_subtraction process's f0_data
self.frame0=self._executionNode.frame0_subtraction.frame0 # Initialization of pipeline's frame0 with frame0_subtraction process' f0_data
self._executionNode.addLink('f0_data','frame0_subtraction.f0_data' ) # Link between pipeline's f0_data and frame0_subtraction process' frame0
self._executionNode.removeLink( 'f0_data','frame0_division.f0_data' ) # Remove link between pipeline's f0_data and frame0_division process' frame0
else: # If frame0_division is selected
self.f0_data=self._executionNode.frame0_division.f0_data # Initialization of pipeline's f0_data with frame0_division process' f0_data
self.frame0=self._executionNode.frame0_division.frame0 # Initialization of pipeline's frame0 with frame0_division process' f0_data
self._executionNode.addLink('f0_data','frame0_division.f0_data' ) # Link between pipeline's f0_data and frame0_division process' frame0
self._executionNode.removeLink('f0_data','frame0_subtraction.f0_data' )# Remove link between pipeline's f0_data and frame0_subtraction process' frame0
def initialization( self ):
"""Parameters values initialization
"""
self.frame0='[,]' # frame0 initialization
eNode = SelectionExecutionNode( self.name, parameterized = self ) # Declaration of a new selection pipeline
eNode.addChild( 'frame0_subtraction',ProcessExecutionNode('frame0_subtraction', optional = 1 ) ) # Add frame0_subtraction to pipeline
eNode.addChild( 'frame0_division',ProcessExecutionNode('frame0_division', optional = 1 )) # Add frame0_division to pipeline
# Change of frame0_subtraction signature
f0sSignature=[ "data", ReadDiskItem( 'OI 2D+t Image' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ),
"frame0", String(),
"f0_data", WriteDiskItem( 'OI BkSD' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ),
]
# Change of frame0_division signature
f0dSignature=[ "data", ReadDiskItem( 'OI 2D+t Image' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ),
"frame0", String(),
"f0_data", WriteDiskItem( 'OI BkSD' , ['NIFTI-1 image','gz compressed NIFTI-1 image'] ),
]
# Applies changes on signatures
f0s=Signature( *f0sSignature )
eNode.frame0_subtraction.changeSignature( f0s )
f0d=Signature( *f0dSignature )
eNode.frame0_division.changeSignature( f0d )
self.signature['data'].browseUserLevel=2 # Browse only visible for expert user
self.signature['f0_data'].browseUserLevel=2 # Browse only visible for expert user
eNode.frame0_subtraction.signature['data'].browseUserLevel=3 # Browse never visible
eNode.frame0_subtraction.signature['data'].databaseUserLevel=3 # Database never visible
eNode.frame0_division.signature['data'].browseUserLevel=3 # Browse never visible
eNode.frame0_division.signature['data'].databaseUserLevel=3 # Database never visible
eNode.frame0_subtraction.signature['f0_data'].browseUserLevel=2 # Browse only visible for expert user
eNode.frame0_subtraction.signature['f0_data'].databaseUserLevel=2 # Database only visible for expert user
eNode.frame0_division.signature['f0_data'].browseUserLevel=2 # Browse only visible for expert user
eNode.frame0_division.signature['f0_data'].databaseUserLevel=2 # Database only visible for expert user
eNode.addLink( 'frame0_subtraction.data','data' ) # Link between frame0_subtraction process' data and pipeline's data
eNode.addLink( 'frame0_division.data','data' ) # Link between data frame0_division process and data pipeline
eNode.addLink( 'data','frame0_subtraction.data' ) # Link between pipeline's data and frame0_subtraction process' data
eNode.addLink( 'data','frame0_division.data' ) # Link between pipeline's data and frame0_division process' data
# Initialization of pipeline
if eNode.frame0_subtraction.isSelected(): # If frame0_subtraction is selected
eNode.addLink('f0_data','frame0_subtraction.f0_data' ) # Link between pipeline's f0_data and frame0_division process' f0_data
else: # If frame0_division is selected
eNode.addLink('f0_data','frame0_division.f0_data' ) # Link between pipeline's f0_data and frame0_division process' f0_data
eNode.frame0_subtraction._selectionChange.add( self.selected ) # Add a track of change on selection
eNode.addLink( 'frame0_subtraction.frame0','frame0') # Link between frame0_subtraction process' frame0 and pipeline's frame0
eNode.addLink( 'frame0_division.frame0','frame0' ) # Link between frame0_division process' frame0 and pipeline's frame0
self.setExecutionNode( eNode ) # Pipeline creation
| SylvainTakerkart/vobi_one | brainvisa/toolboxes/vobi_one/processes/Trial Analysis/Blank Based Denoising/frame0_pipe.py | Python | gpl-3.0 | 5,962 |
#
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2015 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.views.generic import View, TemplateView
from django.shortcuts import HttpResponse
from django.http import HttpResponseBadRequest
from django.core import serializers
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage
from django.db.models import Q
from orm.models import Project, ProjectLayer, Layer_Version
from django.template import Context, Template
from django.core.serializers.json import DjangoJSONEncoder
from django.core.exceptions import FieldError
from django.conf.urls import url, patterns
import types
import json
import collections
import operator
from toastergui.views import objtojson
class ToasterTable(TemplateView):
def __init__(self, *args, **kwargs):
super(ToasterTable, self).__init__()
if 'template_name' in kwargs:
self.template_name = kwargs['template_name']
self.title = None
self.queryset = None
self.columns = []
self.filters = {}
self.total_count = 0
self.static_context_extra = {}
self.filter_actions = {}
self.empty_state = "Sorry - no data found"
self.default_orderby = ""
# add the "id" column, undisplayable, by default
self.add_column(title="Id",
displayable=False,
orderable=True,
field_name="id")
def get(self, request, *args, **kwargs):
if request.GET.get('format', None) == 'json':
self.setup_queryset(*args, **kwargs)
# Put the project id into the context for the static_data_template
if 'pid' in kwargs:
self.static_context_extra['pid'] = kwargs['pid']
cmd = request.GET.get('cmd', None)
if cmd and 'filterinfo' in cmd:
data = self.get_filter_info(request)
else:
# If no cmd is specified we give you the table data
data = self.get_data(request, **kwargs)
return HttpResponse(data, content_type="application/json")
return super(ToasterTable, self).get(request, *args, **kwargs)
def get_filter_info(self, request):
data = None
self.setup_filters()
search = request.GET.get("search", None)
if search:
self.apply_search(search)
name = request.GET.get("name", None)
if name is None:
data = json.dumps(self.filters,
indent=2,
cls=DjangoJSONEncoder)
else:
for actions in self.filters[name]['filter_actions']:
actions['count'] = self.filter_actions[actions['name']](count_only=True)
# Add the "All" items filter action
self.filters[name]['filter_actions'].insert(0, {
'name' : 'all',
'title' : 'All',
'count' : self.queryset.count(),
})
data = json.dumps(self.filters[name],
indent=2,
cls=DjangoJSONEncoder)
return data
def setup_columns(self, *args, **kwargs):
""" function to implement in the subclass which sets up the columns """
pass
def setup_filters(self, *args, **kwargs):
""" function to implement in the subclass which sets up the filters """
pass
def setup_queryset(self, *args, **kwargs):
""" function to implement in the subclass which sets up the queryset"""
pass
def add_filter(self, name, title, filter_actions):
"""Add a filter to the table.
Args:
name (str): Unique identifier of the filter.
title (str): Title of the filter.
filter_actions: Actions for all the filters.
"""
self.filters[name] = {
'title' : title,
'filter_actions' : filter_actions,
}
def make_filter_action(self, name, title, action_function):
""" Utility to make a filter_action """
action = {
'title' : title,
'name' : name,
}
self.filter_actions[name] = action_function
return action
def add_column(self, title="", help_text="",
orderable=False, hideable=True, hidden=False,
field_name="", filter_name=None, static_data_name=None,
displayable=True, computation=None,
static_data_template=None):
"""Add a column to the table.
Args:
title (str): Title for the table header
help_text (str): Optional help text to describe the column
orderable (bool): Whether the column can be ordered.
We order on the field_name.
hideable (bool): Whether the user can hide the column
hidden (bool): Whether the column is default hidden
field_name (str or list): field(s) required for this column's data
static_data_name (str, optional): The column's main identifier
which will replace the field_name.
static_data_template(str, optional): The template to be rendered
as data
"""
self.columns.append({'title' : title,
'help_text' : help_text,
'orderable' : orderable,
'hideable' : hideable,
'hidden' : hidden,
'field_name' : field_name,
'filter_name' : filter_name,
'static_data_name': static_data_name,
'static_data_template': static_data_template,
'displayable': displayable,
'computation': computation,
})
def render_static_data(self, template, row):
"""Utility function to render the static data template"""
context = {
'extra' : self.static_context_extra,
'data' : row,
}
context = Context(context)
template = Template(template)
return template.render(context)
def apply_filter(self, filters):
self.setup_filters()
try:
filter_name, filter_action = filters.split(':')
except ValueError:
return
if "all" in filter_action:
return
try:
self.filter_actions[filter_action]()
except KeyError:
# pass it to the user - programming error here
raise
def apply_orderby(self, orderby):
# Note that django will execute this when we try to retrieve the data
self.queryset = self.queryset.order_by(orderby)
def apply_search(self, search_term):
"""Creates a query based on the model's search_allowed_fields"""
if not hasattr(self.queryset.model, 'search_allowed_fields'):
raise Exception("Err Search fields aren't defined in the model")
search_queries = []
for st in search_term.split(" "):
q_map = [Q(**{field + '__icontains': st})
for field in self.queryset.model.search_allowed_fields]
search_queries.append(reduce(operator.or_, q_map))
search_queries = reduce(operator.and_, search_queries)
self.queryset = self.queryset.filter(search_queries)
def get_data(self, request, **kwargs):
"""Returns the data for the page requested with the specified
parameters applied"""
page_num = request.GET.get("page", 1)
limit = request.GET.get("limit", 10)
search = request.GET.get("search", None)
filters = request.GET.get("filter", None)
orderby = request.GET.get("orderby", None)
# Make a unique cache name
cache_name = self.__class__.__name__
for key, val in request.GET.iteritems():
cache_name = cache_name + str(key) + str(val)
for key, val in kwargs.iteritems():
cache_name = cache_name + str(key) + str(val)
data = cache.get(cache_name)
#if data:
# return data
self.setup_columns(**kwargs)
if search:
self.apply_search(search)
if filters:
self.apply_filter(filters)
if orderby:
self.apply_orderby(orderby)
paginator = Paginator(self.queryset, limit)
try:
page = paginator.page(page_num)
except EmptyPage:
page = paginator.page(1)
data = {
'total' : self.queryset.count(),
'default_orderby' : self.default_orderby,
'columns' : self.columns,
'rows' : [],
'error' : "ok",
}
try:
for row in page.object_list:
#Use collection to maintain the order
required_data = collections.OrderedDict()
for col in self.columns:
field = col['field_name']
if not field:
field = col['static_data_name']
if not field:
raise Exception("Must supply a field_name or static_data_name for column %s.%s" % (self.__class__.__name__,col))
# Check if we need to process some static data
if "static_data_name" in col and col['static_data_name']:
required_data["static:%s" % col['static_data_name']] = self.render_static_data(col['static_data_template'], row)
# Overwrite the field_name with static_data_name
# so that this can be used as the html class name
col['field_name'] = col['static_data_name']
# compute the computation on the raw data if needed
model_data = row
if col['computation']:
model_data = col['computation'](row)
else:
# Traverse to any foriegn key in the object hierachy
for subfield in field.split("__"):
if hasattr(model_data, subfield):
model_data = getattr(model_data, subfield)
# The field could be a function on the model so check
# If it is then call it
if isinstance(model_data, types.MethodType):
model_data = model_data()
required_data[col['field_name']] = model_data
data['rows'].append(required_data)
except FieldError:
# pass it to the user - programming-error here
raise
data = json.dumps(data, indent=2, default=objtojson)
cache.set(cache_name, data, 60*30)
return data
class ToasterTemplateView(TemplateView):
# renders a instance in a template, or returns the context as json
# the class-equivalent of the _template_renderer decorator for views
def get(self, *args, **kwargs):
if self.request.GET.get('format', None) == 'json':
from django.core.urlresolvers import reverse
from django.shortcuts import HttpResponse
from views import objtojson
from toastergui.templatetags.projecttags import json as jsonfilter
context = self.get_context_data(**kwargs)
for x in context.keys():
if x not in self.context_entries:
del context[x]
context["error"] = "ok"
return HttpResponse(jsonfilter(context, default=objtojson ),
content_type = "application/json; charset=utf-8")
return super(ToasterTemplateView, self).get(*args, **kwargs)
| Brainbuster/openpli-buildumgebung | bitbake/lib/toaster/toastergui/widgets.py | Python | gpl-2.0 | 12,716 |
"""Utilities for making internal assertions."""
from .collections import ( # noqa
assert_all_valid_keys,
assert_iterable_contains_all_expr_symbols)
| welchbj/tt | tt/_assertions/__init__.py | Python | mit | 158 |
import time
import json
import re
from slackclient import SlackClient # https://github.com/slackhq/python-slackclient
# token found at https://api.slack.com/web#authentication
def connect_to_slack(access_token):
if not access_token:
access_token = str(raw_input("Enter your developer access token: "))
sc = SlackClient(access_token)
if sc:
return sc
else:
raise RuntimeError("connection failed! invalid token?")
def map_user_id_to_names(slack_client):
response = slack_client.api_call('users.list')
member_data = json.loads(response)
if member_data['ok']:
all_members = {}
members = member_data['members']
for member in members:
if 'is_bot' in member and not member['is_bot']:
member_id = member['id']
try:
full_name = member['profile']['real_name']
first_name = member['profile']['first_name']
nickname = member['name']
except KeyError: # some users have no name information
email_name = member['profile']['email'].split('@')[0] # best we can do, get email address
full_name, first_name, nickname = email_name, email_name, email_name
all_members[member_id] = {'full_name': full_name, 'first_name': first_name,
'nickname': nickname}
else:
raise RuntimeError("bad connection to slack api! tried to hit users.list")
return all_members
def get_messages(user_id_to_name_map, channel_id, channel_name, slack_client, limit):
messages_data = {'channel_name': channel_name, 'channel_id': channel_id, 'messages': []}
response = slack_client.api_call('channels.history', count=limit, channel=channel_id)
channel_data = json.loads(response)
if channel_data['ok']:
for message in channel_data['messages']:
if message['type'] == 'message' and 'subtype' not in message:
text = message['text'] + '.'
# replace user ids in text with user names!
all_user_ids = re.findall(r'<@([^\s\.]*)>', text)
for user_id in all_user_ids:
user = user_id_to_name_map[user_id]['nickname']
text = text.replace('<@%s>' % user_id, '@' + user)
sender = user_id_to_name_map[message['user']]
timestamp = message['ts']
emojis = []
try:
emojis = [{'name': name, 'count': count} for reaction in message['reactions']]
except: KeyError # no emojis in the message
message_data = {'text': text, 'emojis': emojis, 'sender': sender, 'timestamp': timestamp}
messages_data['messages'].append(message_data)
return messages_data
else:
raise RuntimeError('bad connection to slack api! tried to hit channels.history')
| andrewsy97/Treehacks | slack_listener.py | Python | mit | 2,978 |
"""
WSGI config for django_mturk_minimalistic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "minimalistic_mturk.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| raffienficiaud/django_mturk_minimalistic | minimalistic_mturk/wsgi.py | Python | mit | 418 |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetStatusOk(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, players=None, server_version=None, start_time=None, vip=None):
"""
GetStatusOk - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'players': 'int',
'server_version': 'str',
'start_time': 'datetime',
'vip': 'bool'
}
self.attribute_map = {
'players': 'players',
'server_version': 'server_version',
'start_time': 'start_time',
'vip': 'vip'
}
self._players = players
self._server_version = server_version
self._start_time = start_time
self._vip = vip
@property
def players(self):
"""
Gets the players of this GetStatusOk.
Current online player count
:return: The players of this GetStatusOk.
:rtype: int
"""
return self._players
@players.setter
def players(self, players):
"""
Sets the players of this GetStatusOk.
Current online player count
:param players: The players of this GetStatusOk.
:type: int
"""
if players is None:
raise ValueError("Invalid value for `players`, must not be `None`")
self._players = players
@property
def server_version(self):
"""
Gets the server_version of this GetStatusOk.
Running version as string
:return: The server_version of this GetStatusOk.
:rtype: str
"""
return self._server_version
@server_version.setter
def server_version(self, server_version):
"""
Sets the server_version of this GetStatusOk.
Running version as string
:param server_version: The server_version of this GetStatusOk.
:type: str
"""
if server_version is None:
raise ValueError("Invalid value for `server_version`, must not be `None`")
self._server_version = server_version
@property
def start_time(self):
"""
Gets the start_time of this GetStatusOk.
Server start timestamp
:return: The start_time of this GetStatusOk.
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Sets the start_time of this GetStatusOk.
Server start timestamp
:param start_time: The start_time of this GetStatusOk.
:type: datetime
"""
if start_time is None:
raise ValueError("Invalid value for `start_time`, must not be `None`")
self._start_time = start_time
@property
def vip(self):
"""
Gets the vip of this GetStatusOk.
If the server is in VIP mode
:return: The vip of this GetStatusOk.
:rtype: bool
"""
return self._vip
@vip.setter
def vip(self, vip):
"""
Sets the vip of this GetStatusOk.
If the server is in VIP mode
:param vip: The vip of this GetStatusOk.
:type: bool
"""
self._vip = vip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetStatusOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| minlexx/pyevemon | esi_client/models/get_status_ok.py | Python | gpl-3.0 | 5,256 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = 'Basic implementation of K-Means clustering algorithm.'
setup(
name='KMeans',
version='0.1.0',
author='Jakub Konka',
author_email='kubkon@gmail.com',
packages=['kmeans', 'kmeans.test'],
url='https://github.com/kubkon/kmeans',
license='LICENSE.txt',
description='Basic implementation of K-Means clustering algorithm.',
long_description=description,
install_requires=[
"numpy>=1.7.1",
"matplotlib>=1.3.0",
],
) | kubkon/kmeans | setup.py | Python | mit | 691 |
# Load values from the instance config
url = instance['url']
default_timeout = self.init_config.get('default_timeout', 5)
timeout = float(instance.get('timeout', default_time))
# Use a hash of the URL as an aggregation key
aggregation_key = md5(url).hexdigest()
# Check the URL
start_time = time.time()
try:
r = requests.get(url, timeout=timeout)
end_time = time.time()
except requests.exceptions.Timeout as e:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
if r.status_code != 200:
self.status_code_event(url, r, aggregation_key) | macobo/documentation | code_snippets/guides-agentchecks-ex-request.py | Python | bsd-3-clause | 579 |
# coding: utf-8
# python 2.7
import Orange
import pandas as pd
import numpy as np
import sys
import os
from collections import defaultdict
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.support = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setSupport(self, supports) :
self.support = supports
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getSupport(self) :
return(self.support)
def getSupportD(self) :
return(self.support * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("support:" + str(self.support))
print("conf:" + str(self.conf))
# ======================================================
# Rules のうち、P個の属性値が分かれば、クラスを推定できるか
# ======================================================
def getPerIdentifiedClass(rules, p) :
attribute_values = [rule.getValue() for rule in rules]
attribute_values = list(chain.from_iterable(attribute_values))
attribute_values = list(set(attribute_values))
combi_attribute_values = combinations(attribute_values,p)
count = 0
bunbo = 0
for combi in combi_attribute_values :
bunbo += 1
rules_target = []
for rule in rules :
matching_count = len(list(set(combi) & set(rule.getValue())))
if matching_count == len(list(combi)) :
rules_target.append(rule)
# rules_target が空なら評価から外す
if len(rules_target) == 0:
bunbo -= 1
#
else :
consequents = [rule.getConsequent() for rule in rules_target]
if len(list(set(consequents))) == 1:
count += 1
if bunbo == 0:
ans = 0
else:
ans = (float(count) / float(bunbo))
return(ans)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def isExplainRule(obj, rule) :
matching_count = len(list(set(obj) & set(rule.getValue())))
if matching_count == len(rule.getValue()) : return(True)
else : return(False)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def getMatchingFactor(obj, rule) :
matching_factor = len(list(set(obj) & set(rule.getValue())))
matching_factor = matching_factor / len(rule.getValue())
return(matching_factor)
# ======================================================
# ルールのsupport P を返す
# ======================================================
def getSupportP(obj, rule) :
matching_factor = getMatchingFactor(obj, rule)
return(rule.getSupportD() * matching_factor)
# ======================================================
# ルールから対象のクラスを予測
# ======================================================
def estimateClass(obj, rules) :
list_judge = [isExplainRule(obj, r) for r in rules]
# 1つ以上マッチするなら
if any(list_judge) :
consequents = [rules[i].getConsequent() for i, judge in enumerate(list_judge) if judge]
# マッチしたルールが推論するクラスの数がただ1つなら
if len(set(consequents)) == 1 :
return(consequents[0])
else :
rules_match = list(compress(rules,list_judge))
supportD = [r.getSupportD() for r in rules_match]
return(rules_match[supportD.index(max(supportD))].getConsequent())
# rule が objに1つもマッチしない場合は部分一致ルールによる推定
else :
supportP = [getSupportP(obj, rule) for rule in rules]
return(rules[supportP.index(max(supportP))].getConsequent())
# ======================================================
# LERS による精度評価
# ======================================================
def predictByLERS(FILENAME, iter1, iter2, rules) :
# read test data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.txt'
decision_table_test = pd.read_csv(filepath, delimiter=' ', header=None)
decision_table_test = decision_table_test.dropna()
decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()
decision_table_test = decision_table_test.drop(decision_table_test.columns[len(decision_table_test.columns)-1], axis=1)
decision_table_test = decision_table_test.values.tolist()
# LERS で予測
predictions = []
for obj in decision_table_test:
estimated_class = estimateClass(obj, rules)
predictions.append(estimated_class)
# 正答率を求める
accuracy = accuracy_score(decision_class, predictions)
print(accuracy)
return(accuracy)
# =====================================
# Main 関数
# =====================================
def getRulesByApriori(FILENAME, classes, iter1, iter2, minsup, minconf, sup_ratio = True) :
# read data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data_pd = pd.read_csv(filepath, delimiter=' ')
pd.DataFrame.to_csv(data_pd, DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket', index=False, sep=',')
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket'
data_table = Orange.data.Table(filepath)
#print len(data_table)
# set parameter
num_lines = sum(1 for line in open(filepath))
minsup = float(minsup) if sup_ratio else float(minsup) / float(num_lines)
#print minsup
# induce rules
#rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support=minsup, confidence=minconf)
rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support = minsup, max_item_sets = 2000)
# convert Rule Class
rules = []
for rule_orange in rules_orange :
consequent = rule_orange.right.get_metas(str).keys()
if len(consequent) == 1 and consequent[0] in classes and rule_orange.confidence >= minconf :
rule = Rule()
rule.setValue(rule_orange.left.get_metas(str).keys())
rule.setConsequent(consequent[0])
rule.setSupport(rule_orange.support)
rule.setConf(rule_orange.confidence)
rules.append(rule)
# END
return(rules)
# ======================================================
# Apriori_LERS
# ======================================================
def Apriori_LERS(FILENAME, classes, iter1, iter2, min_sup, min_conf):
# rule 抽出
rules = getRulesByApriori(FILENAME, classes, iter1, iter2, min_sup, min_conf)
# predict by LERS
accuracy = predictByLERS(FILENAME, iter1, iter2, rules)
# save
savepath = DIR_UCI+'/'+FILENAME+'/Apriori_LERS.csv'
with open(savepath, "a") as f :
f.writelines('Apriori_LERS,{min_sup},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=accuracy,min_sup=min_sup)+"\n")
# END
return(accuracy)
def wrapper_Apriori_LERS(multi_args):
multi_args[0](multi_args[1],multi_args[2],multi_args[3],multi_args[4],multi_args[5],multi_args[6])
# ========================================
# listの平均と分散を求める
# ========================================
def getEvalMeanVar(result):
ans = '{mean}±{std}'.format(mean=('%.3f' % round(np.mean(results),3)), std=('%.3f' % round(np.std(results),3)))
return(ans)
# ========================================
# multi に実行する
# ========================================
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
results = []
multiargs = []
classes = kargs['classes']
min_sup_range = kargs['min_sup'] if 'min_sup' in kargs else range(2,11)
min_conf = kargs['min_conf']
# Apriori_LERS 用
if FUN == Apriori_LERS :
WRAPPER_FUN = wrapper_Apriori_LERS
for iter1, iter2, min_sup in product(range(1,11), range(1,11), min_sup_range):
multiargs.append((FUN, FILENAME, classes, iter1, iter2, min_sup, min_conf))
#print(multiargs)
results = pool.map(WRAPPER_FUN, multiargs)
else :
print("I dont' know the function.")
return(results)
# ========================================
# main
# ========================================
if __name__ == "__main__":
#FILENAME = 'hayes-roth'
FILENAME = 'german_credit_categorical'
# number of class
#classes = ['D1', 'D2', 'D3']
classes = ['D1', 'D2',]
iter1 = 10
iter2 = 3
# support と confidence の閾値
min_sup_range = range(2,11,1)
min_sup_range = range(2,20,2)
min_sup = 0.2
min_conf = 0.9
# rule induction
rules = getRulesByApriori(FILENAME, classes, iter1, iter2, min_sup, min_conf, sup_ratio=True)
#print len(rules)
for r in rules:
print(r.output())
# predict by LERS
print(predictByLERS(FILENAME, iter1, iter2, rules))
exit(0)
# 並列実行して全データで評価
proc=32
freeze_support()
FUN = Apriori_LERS
results = multi_main(proc, FILENAME, FUN, classes = classes, min_sup = min_sup_range, min_conf = min_conf)
| gingi99/research_dr | python/apriori/orange_apriori.py | Python | mit | 10,135 |
import collections
import pytest
from utils import log
#: A dict of tests, and their state at various test phases
test_tracking = collections.defaultdict(dict)
# Expose the cfme logger as a fixture for convenience
@pytest.fixture(scope='session')
def logger():
return log.logger
@pytest.mark.hookwrapper
def pytest_runtest_setup(item):
path, lineno, domaininfo = item.location
logger().info(log.format_marker(_format_nodeid(item.nodeid), mark="-"),
extra={'source_file': path, 'source_lineno': lineno})
yield
def pytest_collection_modifyitems(session, config, items):
logger().info(log.format_marker('Starting new test run', mark="="))
expression = config.getvalue('keyword') or False
expr_string = ', will filter with "%s"' % expression if expression else ''
logger().info('Collected %i items%s' % (len(items), expr_string))
@pytest.mark.hookwrapper
def pytest_runtest_logreport(report):
# e.g. test_tracking['test_name']['setup'] = 'passed'
# test_tracking['test_name']['call'] = 'skipped'
# test_tracking['test_name']['teardown'] = 'failed'
yield
test_tracking[_format_nodeid(report.nodeid, False)][report.when] = report.outcome
if report.when == 'teardown':
path, lineno, domaininfo = report.location
logger().info(log.format_marker('%s result: %s' % (_format_nodeid(report.nodeid),
_test_status(_format_nodeid(report.nodeid, False)))),
extra={'source_file': path, 'source_lineno': lineno})
if report.outcome == "skipped":
# Usualy longrepr's a tuple, other times it isn't... :(
try:
longrepr = report.longrepr[-1]
except AttributeError:
longrepr = str(report.longrepr)
logger().info(log.format_marker(longrepr))
def pytest_exception_interact(node, call, report):
# Despite the name, call.excinfo is a py.code.ExceptionInfo object. Its traceback property
# is similarly a py.code.TracebackEntry. The following lines, including "entry.lineno+1" are
# based on the code there, which does unintuitive things with a traceback's line number.
# This is the same code that powers py.test's output, so we gain py.test's magical ability
# to get useful AssertionError output by doing it this way, which makes the voodoo worth it.
entry = call.excinfo.traceback.getcrashentry()
logger().error(call.excinfo.getrepr(),
extra={'source_file': entry.path, 'source_lineno': entry.lineno + 1})
def pytest_sessionfinish(session, exitstatus):
c = collections.Counter()
for test in test_tracking:
c[_test_status(test)] += 1
# Prepend a total to the summary list
results = ['total: %d' % sum(c.values())] + map(lambda n: '%s: %d' % (n[0], n[1]), c.items())
# Then join it with commas
summary = ', '.join(results)
logger().info(log.format_marker('Finished test run', mark='='))
logger().info(log.format_marker(str(summary), mark='='))
def _test_status(test_name):
test_phase = test_tracking[test_name]
# Test failure in setup or teardown is an error, which pytest doesn't report internally
if 'failed' in (test_phase.get('setup', 'failed'), test_phase.get('teardown', 'failed')):
return 'error'
# A test can also be skipped
elif 'skipped' in test_phase.get('setup', 'skipped'):
return 'skipped'
# Otherwise, report the call phase outcome (passed, skipped, or failed)
else:
return test_phase['call']
def _format_nodeid(nodeid, strip_filename=True):
# Remove test class instances and filenames, replace with a dot to impersonate a method call
nodeid = nodeid.replace('::()::', '.')
# Trim double-colons to single
nodeid = nodeid.replace('::', ':')
# Strip filename (everything before and including the first colon)
if strip_filename:
try:
return nodeid.split(':', 1)[1]
except IndexError:
# No colon to split on, return the whole nodeid
return nodeid
else:
return nodeid
| thom-at-redhat/cfme_tests | fixtures/log.py | Python | gpl-2.0 | 4,066 |
"""sportassociation URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from communication.views import (HomeView, AssociationView, InscriptionView,
ContactView, SponsorsView, ForumView, MentionsLegalesView)
from . import settings
from users.views import AdminUserCreateView
urlpatterns = [
#Comment the next line if you don't want to create users with random passwords.
url(r'^admin/users/customuser/add/$', AdminUserCreateView.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^forum/', ForumView.as_view(), name='forum'),
url(r'^association$', AssociationView.as_view(), name='association'),
url(r'^inscription$', InscriptionView.as_view(), name='inscription'),
url(r'^contact$', ContactView.as_view(), name='contact'),
url(r'^sponsors$', SponsorsView.as_view(), name='sponsors'),
url(r'^mentions-legales$', MentionsLegalesView.as_view(), name='mentions-legales'),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^', include('django.contrib.auth.urls')),
url(r'^communication/', include('communication.urls', namespace='communication')),
url(r'^activities/', include('activities.urls', namespace='activities')),
url(r'^sports/', include('sports.urls', namespace='sports')),
url(r'^captcha/', include('captcha.urls')),
]
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| QSchulz/sportassociation | sportassociation/urls.py | Python | mit | 2,230 |
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class ScSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if Attrs['k']=='Main':
sw+=' $ store.chapter='
sw+="'Chapter."+Attrs['cp']+Attrs['sc']+"'\n"
return sw | dtysky/Gal2Renpy | Gal2Renpy/SpSyntax/ScSp.py | Python | mit | 352 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
import json
import tempfile
import toml
import os
class RequirementsTXTUpdater(object):
SUB_REGEX = r"^{}(?=\s*\r?\n?$)"
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
"""
Updates the requirement to the latest version for the given content and adds hashes
if neccessary.
:param content: str, content
:return: str, updated content
"""
new_line = "{name}{spec}{version}".format(name=dependency.full_name, spec=spec, version=version)
appendix = ''
# leave environment markers intact
if ";" in dependency.line:
# condense multiline, split out the env marker, strip comments and --hashes
new_line += ";" + dependency.line.splitlines()[0].split(";", 1)[1] \
.split("#")[0].split("--hash")[0].rstrip()
# add the comment
if "#" in dependency.line:
# split the line into parts: requirement and comment
parts = dependency.line.split("#")
requirement, comment = parts[0], "#".join(parts[1:])
# find all whitespaces between the requirement and the comment
whitespaces = (hex(ord('\t')), hex(ord(' ')))
trailing_whitespace = ''
for c in requirement[::-1]:
if hex(ord(c)) in whitespaces:
trailing_whitespace += c
else:
break
appendix += trailing_whitespace + "#" + comment
# if this is a hashed requirement, add a multiline break before the comment
if dependency.hashes and not new_line.endswith("\\"):
new_line += " \\"
# if this is a hashed requirement, add the hashes
if hashes:
for n, new_hash in enumerate(hashes):
new_line += "\n --hash={method}:{hash}".format(
method=new_hash['method'],
hash=new_hash['hash']
)
# append a new multiline break if this is not the last line
if len(hashes) > n + 1:
new_line += " \\"
new_line += appendix
regex = cls.SUB_REGEX.format(re.escape(dependency.line))
return re.sub(regex, new_line, content, flags=re.MULTILINE)
class CondaYMLUpdater(RequirementsTXTUpdater):
SUB_REGEX = r"{}(?=\s*\r?\n?$)"
class ToxINIUpdater(CondaYMLUpdater):
pass
class SetupCFGUpdater(CondaYMLUpdater):
pass
class PipfileUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = toml.loads(content)
if data:
for package_type in ['packages', 'dev-packages']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = "{spec}{version}".format(
spec=spec, version=version
)
try:
from pipenv.project import Project
except ImportError:
raise ImportError("Updating a Pipfile requires the pipenv extra to be installed. Install it with "
"pip install dparse[pipenv]")
pipfile = tempfile.NamedTemporaryFile(delete=False)
p = Project(chdir=False)
p.write_toml(data=data, path=pipfile.name)
data = open(pipfile.name).read()
os.remove(pipfile.name)
return data
class PipfileLockUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = json.loads(content)
if data:
for package_type in ['default', 'develop']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = {
'hashes': [
"{method}:{hash}".format(
hash=h['hash'],
method=h['method']
) for h in hashes
],
'version': "{spec}{version}".format(
spec=spec, version=version
)
}
return json.dumps(data, indent=4, separators=(',', ': ')) + "\n"
| kennethreitz/pipenv | pipenv/vendor/dparse/updater.py | Python | mit | 4,566 |
# -*- coding:utf-8 -*-
"""
竞赛页面
"""
from flask import Blueprint
from flask import render_template
from application.utils.loginHelper import ContestLoginHelper
contest = Blueprint("contest", __name__)
@contest.route("/contest",methods=["GET"])
@ContestLoginHelper.login_required
def contest_index():
return render_template("contest/index.html")
# 现在正在进行的竞赛
@contest.route("/contest/timeline", methods=['GET', 'POST'])
@ContestLoginHelper.login_required
def timeline():
return render_template("contest/timeline.html")
# 报名竞赛
@contest.route("/contest/apply", methods=['GET', 'POST'])
def apply():
pass
# 成绩查询与展示
@contest.route("/contest/score", methods=['GET', 'POST'])
def score():
pass
# 知识点管理
@contest.route("/contest/knowledgepoint", methods=['GET', 'POST'])
def knowledgepoint():
pass
# 学生试卷管理与展示
# todo:如何省去人工录入的时间
@contest.route("/contest/papers", methods=['GET', 'POST'])
def papers():
pass
| nature-python/youcai-contest | application/controller/contest.py | Python | apache-2.0 | 1,031 |
# -*- coding: utf-8 -*-
#
# Copyright 2012 James Thornton (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
Interface for interacting with a graph database through Rexster.
"""
from bulbs.config import Config
from bulbs.gremlin import Gremlin
from bulbs.element import Vertex, Edge
from bulbs.model import Node, Relationship
from bulbs.base.graph import Graph as BaseGraph
# Rexster-specific imports
from .client import TitanClient
from .index import KeyIndex
class Graph(BaseGraph):
"""
The primary interface to Rexster.
Instantiates the database :class:`~bulbs.rexster.client.Client` object using
the specified Config and sets up proxy objects to the database.
:param config: Optional. Defaults to the default config.
:type config: bulbs.config.Config
:cvar client_class: RexsterClient class.
:cvar default_index: Default index class.
:ivar client: RexsterClient object.
:ivar vertices: VertexProxy object.
:ivar edges: EdgeProxy object.
:ivar config: Config object.
:ivar gremlin: Gremlin object.
:ivar scripts: GroovyScripts object.
Example:
>>> from bulbs.rexster import Graph
>>> g = Graph()
>>> james = g.vertices.create(name="James")
>>> julie = g.vertices.create(name="Julie")
>>> g.edges.create(james, "knows", julie)
"""
client_class = TitanClient
default_index = KeyIndex
def __init__(self, config=None):
super(Graph, self).__init__(config)
# Rexster supports Gremlin
self.gremlin = Gremlin(self.client)
self.scripts = self.client.scripts # for convienience
def load_graphml(self,uri):
"""
Loads a GraphML file into the database and returns the response.
:param uri: URI of the GraphML file to load.
:type uri: str
:rtype: RexsterResult
"""
script = self.client.scripts.get('load_graphml')
params = dict(uri=uri)
return self.gremlin.command(script, params)
def get_graphml(self):
"""
Returns a GraphML file representing the entire database.
:rtype: RexsterResult
"""
script = self.client.scripts.get('save_graphml')
return self.gremlin.command(script, params=None)
def warm_cache(self):
"""
Warms the server cache by loading elements into memory.
:rtype: RexsterResult
"""
script = self.scripts.get('warm_cache')
return self.gremlin.command(script, params=None)
def clear(self):
"""
Deletes all the elements in the graph.
:rtype: RexsterResult
.. admonition:: WARNING
This will delete all your data!
"""
script = self.client.scripts.get('clear')
return self.gremlin.command(script,params=None)
| mudbungie/NetExplorer | env/lib/python3.4/site-packages/bulbs/titan/graph.py | Python | mit | 2,869 |
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
import os
from app import app, db
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
shell = Shell(use_ipython=True)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| hkdsun/qkd | manage.py | Python | mit | 356 |
import sys
import time
import os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# text_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'text'))
seeds_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'seeds'))
labelled_data_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'labelled_data'))
from wedc.infrastructure import database
from wedc.infrastructure.model.labelled_data import LabelledData
from wedc.infrastructure.model.need_to_label_data import NeedToLabelData
from wedc.infrastructure.model.seed_dict import SeedDict
class TestDatabaseMethods(unittest.TestCase):
def setUp(self):
pass
def test_create_database(self):
database.create_database()
def test_drop_database(self):
database.drop_database()
def tearDown(self):
pass
class TestLabelledDataMethods(unittest.TestCase):
def setUp(self):
pass
def test_insert_data(self):
LabelledData.insert(content='test_content', label=1, flag=2)
def test_insert_from_csv(self):
csv_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'groundtruth.csv'))
LabelledData.insert_from_csv(csv_)
def test_load_data(self):
for idx, data in enumerate(LabelledData.load_data()):
post_id = idx + 1
# print data.label, data.content
print post_id, data.label, data.extraction
def test_clear_data(self):
print LabelledData.clear_data()
def test_load_potential_seeds(self):
potential_seeds = LabelledData.load_potential_seeds()
for seed, vec in potential_seeds.items():
weight = vec[0]
label = vec[1]
SeedDict.insert(seed, weight)
def test_generate_labelled_data_file(self):
LabelledData.generate_labelled_data_file(labelled_data_)
def test_load_labelled_data_file(self):
print LabelledData.load_labelled_data_file(labelled_data_)[0]
def tearDown(self):
pass
class TestSeedDictMethods(unittest.TestCase):
def setUp(self):
pass
def test_insert_from_txt(self):
txt_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'weighted_seed_dict'))
SeedDict.insert_from_txt(txt_)
def test_load_data(self):
seeds = SeedDict.load_data()
for seed, weight in seeds.items():
print seed, weight
print len(seeds)
def test_clear_data(self):
print SeedDict.clear_data()
def test_generate_seed_file(self):
SeedDict.generate_seed_file(seeds_)
def test_load_seed_file(self):
print SeedDict.load_seed_file(seeds_)
def tearDown(self):
pass
if __name__ == '__main__':
# unittest.main()
def run_main_test():
suite = unittest.TestSuite()
# suite.addTest(TestDatabaseMethods("test_create_database"))
# suite.addTest(TestDatabaseMethods("test_drop_database"))
# suite.addTest(TestLabelledDataMethods("test_insert_data"))
# suite.addTest(TestLabelledDataMethods("test_insert_from_csv"))
# suite.addTest(TestLabelledDataMethods("test_load_data"))
# suite.addTest(TestLabelledDataMethods("test_clear_data"))
# suite.addTest(TestLabelledDataMethods("test_load_potential_seeds"))
suite.addTest(TestLabelledDataMethods("test_generate_labelled_data_file"))
# suite.addTest(TestLabelledDataMethods("test_load_labelled_data_file"))
# suite.addTest(TestSeedDictMethods("test_insert_from_txt"))
# suite.addTest(TestSeedDictMethods("test_load_data"))
# suite.addTest(TestSeedDictMethods("test_clear_data"))
# suite.addTest(TestSeedDictMethods("test_generate_seed_file"))
# suite.addTest(TestSeedDictMethods("test_load_seed_file"))
runner = unittest.TextTestRunner()
runner.run(suite)
run_main_test()
| usc-isi-i2/WEDC | tests/test_database.py | Python | apache-2.0 | 3,966 |
# Written by Bram Cohen
# see LICENSE.txt for license information
from random import randrange, shuffle
from BitTornado.clock import clock
try:
True
except:
True = 1
False = 0
class PiecePicker:
def __init__(self, numpieces,
rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
priority_step = 20):
self.rarest_first_cutoff = rarest_first_cutoff
self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step
self.priority_step = priority_step
self.cutoff = rarest_first_priority_cutoff
self.numpieces = numpieces
self.started = []
self.totalcount = 0
self.numhaves = [0] * numpieces
self.priority = [1] * numpieces
self.removed_partials = {}
self.crosscount = [numpieces]
self.crosscount2 = [numpieces]
self.has = [0] * numpieces
self.numgot = 0
self.done = False
self.seed_connections = {}
self.past_ips = {}
self.seed_time = None
self.superseed = False
self.seeds_connected = 0
self._init_interests()
def _init_interests(self):
self.interests = [[] for x in xrange(self.priority_step)]
self.level_in_interests = [self.priority_step] * self.numpieces
interests = range(self.numpieces)
shuffle(interests)
self.pos_in_interests = [0] * self.numpieces
for i in xrange(self.numpieces):
self.pos_in_interests[interests[i]] = i
self.interests.append(interests)
def got_have(self, piece):
self.totalcount+=1
numint = self.numhaves[piece]
self.numhaves[piece] += 1
self.crosscount[numint] -= 1
if numint+1==len(self.crosscount):
self.crosscount.append(0)
self.crosscount[numint+1] += 1
if not self.done:
numintplus = numint+self.has[piece]
self.crosscount2[numintplus] -= 1
if numintplus+1 == len(self.crosscount2):
self.crosscount2.append(0)
self.crosscount2[numintplus+1] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] += 1
if self.superseed:
self.seed_got_haves[piece] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] += 1
elif self.has[piece] or self.priority[piece] == -1:
return
if numint == len(self.interests) - 1:
self.interests.append([])
self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
def lost_have(self, piece):
self.totalcount-=1
numint = self.numhaves[piece]
self.numhaves[piece] -= 1
self.crosscount[numint] -= 1
self.crosscount[numint-1] += 1
if not self.done:
numintplus = numint+self.has[piece]
self.crosscount2[numintplus] -= 1
self.crosscount2[numintplus-1] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] -= 1
if self.superseed:
numint = self.level_in_interests[piece]
self.level_in_interests[piece] -= 1
elif self.has[piece] or self.priority[piece] == -1:
return
self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
def _shift_over(self, piece, l1, l2):
assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0)
parray = self.pos_in_interests
p = parray[piece]
assert l1[p] == piece
q = l1[-1]
l1[p] = q
parray[q] = p
del l1[-1]
newp = randrange(len(l2)+1)
if newp == len(l2):
parray[piece] = len(l2)
l2.append(piece)
else:
old = l2[newp]
parray[old] = len(l2)
l2.append(old)
l2[newp] = piece
parray[piece] = newp
def got_seed(self):
self.seeds_connected += 1
self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
def became_seed(self):
self.got_seed()
self.totalcount -= self.numpieces
self.numhaves = [i-1 for i in self.numhaves]
if self.superseed or not self.done:
self.level_in_interests = [i-1 for i in self.level_in_interests]
if self.interests:
del self.interests[0]
del self.crosscount[0]
if not self.done:
del self.crosscount2[0]
def lost_seed(self):
self.seeds_connected -= 1
self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
def requested(self, piece):
if piece not in self.started:
self.started.append(piece)
def _remove_from_interests(self, piece, keep_partial = False):
l = self.interests[self.level_in_interests[piece]]
p = self.pos_in_interests[piece]
assert l[p] == piece
q = l[-1]
l[p] = q
self.pos_in_interests[q] = p
del l[-1]
try:
self.started.remove(piece)
if keep_partial:
self.removed_partials[piece] = 1
except ValueError:
pass
def complete(self, piece):
assert not self.has[piece]
self.has[piece] = 1
self.numgot += 1
if self.numgot == self.numpieces:
self.done = True
self.crosscount2 = self.crosscount
else:
numhaves = self.numhaves[piece]
self.crosscount2[numhaves] -= 1
if numhaves+1 == len(self.crosscount2):
self.crosscount2.append(0)
self.crosscount2[numhaves+1] += 1
self._remove_from_interests(piece)
def next(self, haves, wantfunc, complete_first = False):
cutoff = self.numgot < self.rarest_first_cutoff
complete_first = (complete_first or cutoff) and not haves.complete()
best = None
bestnum = 2 ** 30
for i in self.started:
if haves[i] and wantfunc(i):
if self.level_in_interests[i] < bestnum:
best = i
bestnum = self.level_in_interests[i]
if best is not None:
if complete_first or (cutoff and len(self.interests) > self.cutoff):
return best
if haves.complete():
r = [ (0, min(bestnum,len(self.interests))) ]
elif cutoff and len(self.interests) > self.cutoff:
r = [ (self.cutoff, min(bestnum,len(self.interests))),
(0, self.cutoff) ]
else:
r = [ (0, min(bestnum,len(self.interests))) ]
for lo,hi in r:
for i in xrange(lo,hi):
for j in self.interests[i]:
if haves[j] and wantfunc(j):
return j
if best is not None:
return best
return None
def am_I_complete(self):
return self.done
def bump(self, piece):
l = self.interests[self.level_in_interests[piece]]
pos = self.pos_in_interests[piece]
del l[pos]
l.append(piece)
for i in range(pos,len(l)):
self.pos_in_interests[l[i]] = i
try:
self.started.remove(piece)
except:
pass
def set_priority(self, piece, p):
if self.superseed:
return False # don't muck with this if you're a superseed
oldp = self.priority[piece]
if oldp == p:
return False
self.priority[piece] = p
if p == -1:
# when setting priority -1,
# make sure to cancel any downloads for this piece
if not self.has[piece]:
self._remove_from_interests(piece, True)
return True
if oldp == -1:
level = self.numhaves[piece] + (self.priority_step * p)
self.level_in_interests[piece] = level
if self.has[piece]:
return True
while len(self.interests) < level+1:
self.interests.append([])
l2 = self.interests[level]
parray = self.pos_in_interests
newp = randrange(len(l2)+1)
if newp == len(l2):
parray[piece] = len(l2)
l2.append(piece)
else:
old = l2[newp]
parray[old] = len(l2)
l2.append(old)
l2[newp] = piece
parray[piece] = newp
if self.removed_partials.has_key(piece):
del self.removed_partials[piece]
self.started.append(piece)
# now go to downloader and try requesting more
return True
numint = self.level_in_interests[piece]
newint = numint + ((p - oldp) * self.priority_step)
self.level_in_interests[piece] = newint
if self.has[piece]:
return False
while len(self.interests) < newint+1:
self.interests.append([])
self._shift_over(piece, self.interests[numint], self.interests[newint])
return False
def is_blocked(self, piece):
return self.priority[piece] < 0
def set_superseed(self):
assert self.done
self.superseed = True
self.seed_got_haves = [0] * self.numpieces
self._init_interests() # assume everyone is disconnected
def next_have(self, connection, looser_upload):
if self.seed_time is None:
self.seed_time = clock()
return None
if clock() < self.seed_time+10: # wait 10 seconds after seeing the first peers
return None # to give time to grab have lists
if not connection.upload.super_seeding:
return None
olddl = self.seed_connections.get(connection)
if olddl is None:
ip = connection.get_ip()
olddl = self.past_ips.get(ip)
if olddl is not None: # peer reconnected
self.seed_connections[connection] = olddl
if olddl is not None:
if looser_upload:
num = 1 # send a new have even if it hasn't spread that piece elsewhere
else:
num = 2
if self.seed_got_haves[olddl] < num:
return None
if not connection.upload.was_ever_interested: # it never downloaded it?
connection.upload.skipped_count += 1
if connection.upload.skipped_count >= 3: # probably another stealthed seed
return -1 # signal to close it
for tier in self.interests:
for piece in tier:
if not connection.download.have[piece]:
seedint = self.level_in_interests[piece]
self.level_in_interests[piece] += 1 # tweak it up one, so you don't duplicate effort
if seedint == len(self.interests) - 1:
self.interests.append([])
self._shift_over(piece,
self.interests[seedint], self.interests[seedint + 1])
self.seed_got_haves[piece] = 0 # reset this
self.seed_connections[connection] = piece
connection.upload.seed_have_list.append(piece)
return piece
return -1 # something screwy; terminate connection
def lost_peer(self, connection):
olddl = self.seed_connections.get(connection)
if olddl is None:
return
del self.seed_connections[connection]
self.past_ips[connection.get_ip()] = olddl
if self.seed_got_haves[olddl] == 1:
self.seed_got_haves[olddl] = 0
| lehoanganh/kcsdb | chef-repo/.chef/murder-kcsd/dist/BitTornado/BT1/PiecePicker.py | Python | apache-2.0 | 11,915 |
from flask import Blueprint, request, jsonify
from sqlalchemy.exc import IntegrityError
from api.core import db
from api.utils import delete_item, get_items
from api.decorators import requires_login, requires_roles, requires_keys
from api.models.parts import Manufacturer
blueprint = Blueprint('manufacturers', __name__, url_prefix='/manufacturers')
@blueprint.route('/', methods=['GET'])
def get_all():
return jsonify(get_items('manufacturers', Manufacturer, request))
@blueprint.route('/<int:id>/', methods=['DELETE'])
@requires_login
@requires_roles('webmaster')
def delete(id):
return jsonify(delete_item(id, Manufacturer))
@blueprint.route('/', methods=['POST'])
@requires_login
@requires_roles('parts_manager')
@requires_keys('name')
def add_new_manufacturer():
errors = []
json = request.get_json(force=True)
try:
db.session.add(Manufacturer(json['name']))
db.session.commit()
except IntegrityError:
errors.append('Already exists')
return jsonify(success=not errors, errors=errors)
| CalPolyIEEE/api.calpolyieee.org | api/routes/manufacturers.py | Python | mit | 1,049 |
import os
import re
import sys
import shlex
import subprocess
import time
import json
from operator import xor
from contextlib import contextmanager
dirname = os.path.dirname(os.path.realpath(__file__))
os.chdir(dirname)
sys.path.append("../lib")
from util import cmd
mzbench_dir = dirname + '/../'
scripts_dir = mzbench_dir + 'acceptance_tests/scripts/'
mzbench_script = mzbench_dir + 'bin/mzbench'
@contextmanager
def start_mzbench_server(custom_data_location=None):
if 'MZBENCH_RSYNC' in os.environ:
node_location_param = '{{node_rsync, "{0}"}},'.format(os.environ['MZBENCH_RSYNC'])
elif 'MZBENCH_REPO' in os.environ:
node_location_param = '{{node_git, "{0}"}},'.format(os.environ['MZBENCH_REPO'])
else:
node_location_param = ''
if custom_data_location:
custom_data_location_param = '{{bench_data_dir, "{0}"}},'.format(custom_data_location)
else:
custom_data_location_param = ''
with open(dirname + "/test_server.config", "w") as config:
config.write('[{{mzbench_api, [{0} {1} {{node_log_port, 0}}, {{node_log_user_port, 0}}, {{node_management_port, 0}}, {{node_interconnect_port, 0}}]}}].'
.format(node_location_param, custom_data_location_param))
with open('{0}/test_server.config'.format(dirname), 'r') as f:
print(f.read())
cmd('{0} start_server --config {1}/test_server.config'.format(mzbench_script, dirname))
try:
time.sleep(3) # give server some time to start
yield
except:
print ''
print '-------------------- >> begin server logs << ---------------------'
logdir = os.path.join(mzbench_dir + 'server/_build/default/rel/mzbench_api/log')
logfiles = [logfile for logfile in os.listdir(logdir)]
logfile = sorted([os.path.join(logdir, l) for l in logfiles if l.startswith('erlang')], key=os.path.getmtime, reverse=True)[0]
with open(logfile) as f:
for line in f:
print line.rstrip().replace('\\n', '\n')
print '-------------------- >> end server logs << ---------------------'
print ''
raise
finally:
cmd('{0} stop_server'.format(mzbench_script))
def run_successful_bench(name, nodes=None, workers_per_node=None, env={},
email=None, expected_log_message_regex=None,
check_log_function=None, check_user_log_function=None, post_start=None):
return run_bench(name, should_fail=False,
nodes=nodes, workers_per_node=workers_per_node, env=env, email=email,
expected_log_message_regex=expected_log_message_regex,
check_log_function=check_log_function,
check_user_log_function=check_user_log_function, post_start=post_start)
def run_failing_bench(name, nodes=None, workers_per_node=None, env={},
email=None, expected_log_message_regex=None,
check_log_function=None, check_user_log_function=None, post_start=None):
return run_bench(name, should_fail=True,
nodes=nodes, workers_per_node=workers_per_node, env=env,
expected_log_message_regex=expected_log_message_regex,
check_log_function=check_log_function,
check_user_log_function=check_user_log_function, post_start=post_start)
def run_bench(name=None, worker_package_with_default_scenario=None, nodes=None,
workers_per_node=None, env={}, email=None, should_fail=False, max_retries=2,
expected_log_message_regex=None,
check_log_function=None, check_user_log_function=None, post_start=None):
email_option = ('--email=' + email) if email else ''
if workers_per_node:
nodes_option = '--workers_per_node ' + str(workers_per_node)
else:
if nodes:
nodes_option = '--nodes ' + ','.join(nodes)
else:
nodes_option = '--nodes 1'
env_option = ' '.join(('--env={0}={1}'.format(k, v)
for k, v in env.iteritems()))
def run():
if 'worker_branch' in env:
node_commit_arg = '--node_commit={0}'.format(env['worker_branch'])
else:
node_commit_arg = ''
flags = ' '.join([
'--host=localhost:4800',
node_commit_arg,
nodes_option,
env_option,
email_option])
if name is not None:
invocation = mzbench_dir + 'bin/mzbench ' + flags + ' start ' + name
elif worker_package_with_default_scenario is not None:
invocation = mzbench_dir + 'bin/mzbench ' + flags + ' start_default_scenario_of_worker ' + worker_package_with_default_scenario
else:
raise RuntimeError('Neither script filename nor default scenario package provided.')
start = subprocess.Popen(shlex.split(invocation.encode('ascii')),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_out, start_err = start.communicate()
try:
bench_id = json.loads(start_out)['id']
except Exception:
print 'mzbench returned invalid json: \nCommand: {0}\nOutput: {1}\nStderr: {2}'.format(invocation, start_out, start_err)
raise
if (post_start is not None) and wait_status(bench_id, 'running', 240):
print "Calling post start for {0}".format(bench_id)
post_start(bench_id)
wait = subprocess.Popen(shlex.split(
mzbench_dir + 'bin/mzbench --host=localhost:4800 status --wait {0}'.format(bench_id)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
wait.communicate()
return (bench_id, wait.returncode == 0)
attempt = 0
while attempt < max_retries:
print 'Attempt #{0}'.format(attempt)
try:
(bench_id, success) = run()
except Exception as e:
print "Unexpected error: {0}".format(e)
bench_id, success = (None, False)
if xor(success, should_fail):
if not expected_log_message_regex and not check_log_function and not check_user_log_function:
# no need to check the log
return bench_id
log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 log {0}'.format(bench_id)
log = cmd(log_cmd)
if expected_log_message_regex:
if isinstance(expected_log_message_regex, str) or isinstance(expected_log_message_regex, unicode):
regex = re.compile(expected_log_message_regex, re.DOTALL + re.UNICODE)
else:
regex = expected_log_message_regex
if not regex.search(log):
print
print u"Log doesn't contain expected log message '{0}':\n".format(regex.pattern)
print log
raise RuntimeError
if check_log_function:
maybe_error = check_log_function(log)
if maybe_error:
print
print "Log doesn't pass custom check:\n{0}\n\n".format(maybe_error)
print log
raise RuntimeError
if check_user_log_function:
log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 userlog {0}'.format(bench_id)
log = cmd(log_cmd)
maybe_error = check_user_log_function(log)
if maybe_error:
print
print "Log doesn't pass custom check:\n{0}\n\n".format(maybe_error)
print log
raise RuntimeError
return bench_id
print 'Attempt #{0} for bench-id {1} unexpectedly {2}, retrying.'.format(attempt, bench_id, 'succeeded' if should_fail else 'failed')
attempt += 1
if (max_retries <= attempt):
print('All {0} attempts failed'.format(max_retries))
print('Log of the last attempt (bench {0}):'.format(bench_id))
if bench_id is not None:
log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 log {0}'.format(bench_id)
print cmd(log_cmd).replace('\\n', '\n')
raise RuntimeError('BenchId {0} for test {1} unexpectedly {2}'.format(
bench_id, name, 'succeeded' if should_fail else 'failed'))
def restart_bench(bench_id):
restart = subprocess.Popen(
[mzbench_dir + 'bin/mzbench',
'--host=localhost:4800',
'restart',
str(bench_id)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
restart_out, restart_err = restart.communicate()
try:
return json.loads(restart_out)['id']
except Exception:
print 'mzbench restart returned invalid json:\nOutput: {0}\nStderr: {1}'.format(restart_out, restart_err)
raise
def wait_status(bench_id, status, n):
if n <= 0:
print 'ERROR: Wait for status "running" has timed out!'
return False
wait = subprocess.Popen(shlex.split(
mzbench_dir + 'bin/mzbench --host=localhost:4800 status {0}'.format(bench_id)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = wait.communicate()
try:
current_status = json.loads(out)['status']
except Exception:
print 'mzbench status returned invalid json: \nOutput: {0}\nStderr: {1}'.format(out, err)
raise
print "current_status: {0}".format(current_status)
if current_status == status:
return True
elif current_status == 'failed':
return False
elif current_status == 'complete':
return False
else:
time.sleep(5)
return wait_status(bench_id, status, n - 1)
| MrAlone/mzbench | acceptance_tests/mzb_test_utils.py | Python | bsd-3-clause | 9,638 |
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be perfomed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import pylab as pl
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = pl.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(pl.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
pl.xticks(n_components_range)
pl.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
pl.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
pl.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = pl.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
pl.xlim(-10, 10)
pl.ylim(-3, 6)
pl.xticks(())
pl.yticks(())
pl.title('Selected GMM: full model, 2 components')
pl.subplots_adjust(hspace=.35, bottom=.02)
pl.show()
| florian-f/sklearn | examples/mixture/plot_gmm_selection.py | Python | bsd-3-clause | 3,192 |
#!/usr/bin/env python2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
import stream
import make_db_test
import model
class FakeSub(object):
def __init__(self, pulls):
self.pulls = pulls
self.trace = []
def pull(self, return_immediately=False, **_kwargs):
self.trace.append(['pull', return_immediately])
return self.pulls.pop(0)
def acknowledge(self, acks):
self.trace.append(['ack', acks])
def modify_ack_deadline(self, acks, time):
self.trace.append(['modify-ack', acks, time])
class FakeTable(object):
def __init__(self, name, schema, trace=None):
self.name = name
self.schema = schema
self.trace = [] if trace is None else trace
def insert_data(self, *args, **kwargs):
self.trace.append(['insert-data', args, kwargs])
return []
class Attrs(object):
def __init__(self, attributes):
self.attributes = attributes
class FakeSchemaField(object):
def __init__(self, **kwargs):
self.__dict__ = kwargs
class StreamTest(unittest.TestCase):
def test_main(self):
# It's easier to run a full integration test with stubbed-out
# external interfaces and validate the trace than it is to test
# each individual piece.
# The components are mostly tested in make_*_test.py.
db = model.Database(':memory:')
fakesub = FakeSub([
[
('a', Attrs({'eventType': 'OBJECT_DELETE'})),
],
[
('b', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/123/finished.json',
'bucketId': 'kubernetes-jenkins'})),
],
[],
[
('c', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/123/finished.json',
'bucketId': 'kubernetes-jenkins'})),
],
[],
[
('d', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/124/started.json'})),
],
[],
])
faketable = FakeTable('day', stream.load_schema(FakeSchemaField), fakesub.trace)
tables = {'day': (faketable, 'incr')}
stream.main(
db, fakesub, tables, make_db_test.MockedClient, [1, 0, 0, 0].pop)
# uncomment if the trace changes
# import pprint; pprint.pprint(fakesub.trace)
# self.maxDiff = 3000
now = make_db_test.MockedClient.NOW
self.assertEqual(
fakesub.trace,
[['pull', False], ['pull', True], ['pull', True],
['ack', ['a']],
['modify-ack', ['b'], 180],
['ack', ['b']],
['insert-data',
([[5,
now - 5,
now,
True,
u'SUCCESS',
None,
u'gs://kubernetes-jenkins/logs/fake/123',
u'fake',
123,
[],
[{'name': 'Foo', 'time': 3.0},
{'failed': True,
'failure_text': 'stacktrace',
'name': 'Bad',
'time': 4.0}],
2,
1,
None]],
[1]),
{'skip_invalid_rows': True}],
['pull', False], ['pull', True],
['modify-ack', ['c'], 180],
['ack', ['c']],
['pull', False], ['pull', True],
['ack', ['d']]])
if __name__ == '__main__':
unittest.main()
| abgworrall/test-infra | kettle/stream_test.py | Python | apache-2.0 | 4,290 |
__author__ = 'Axion'
import unittest
import datetime
from work_tracker import _parse_line_into_date_and_comment, _parse_date_string_into_date
from work_tracker import _get_refs_from_comment, _get_nums_from_comment, _get_pertinant_words
class WorkTrackerTests(unittest.TestCase):
def test_parse_line_into_date_and_comment(self):
test_line = "Thu Nov 7 12:26:45 2013 +1300 Bug fix in tests.ApiTest due to change in get_client. Refs #4390"
output = _parse_line_into_date_and_comment(test_line)
self.assertEqual(len(output), 2)
def test_parse_date_string_into_date(self):
date_string = "Thu Nov 7 12:26:45 2013 +1300"
date = _parse_date_string_into_date(date_string)
self.assertIsInstance(date, datetime.date)
def test_get_refs_from_comment(self):
comment = "Bug fix in tests.ApiTest due to change in get_client. Refs #4390"
list_out = _get_refs_from_comment(comment)
self.assertEqual(list_out[0], 'Refs #4390')
def test_get_refs_from_comment_multi(self):
comment = "Bug fix in tests.ApiTest due to change in get_client. Refs #4390, Refs #2556"
list_out = _get_refs_from_comment(comment)
self.assertEqual(list_out[0], 'Refs #4390')
self.assertEqual(list_out[1], 'Refs #2556')
def test_get_refs_from_comment_multi_2(self):
comment = "Bug fix in Refs #4567tests.ApiTest due to change in get_client. Refs #4390, Refs #2556"
list_out = _get_refs_from_comment(comment)
self.assertEqual(list_out[0], 'Refs #4567')
self.assertEqual(list_out[1], 'Refs #4390')
self.assertEqual(list_out[2], 'Refs #2556')
def test_get_nums_from_comment(self):
comment = "Bug fix in tests.ApiTest due to change in get_client. Refs #4390"
list_out = _get_nums_from_comment(comment)
self.assertEqual(list_out[0], '4390')
def test_get_pertinant_words(self):
comment = "Bug fix in Refs 4567tests.ApiTest due to change in get_client. Refs #4390, Refs #2556"
list_out = _get_pertinant_words(comment)
self.assertEqual(list_out[0], 'Refs #4390')
self.assertEqual(list_out[1], 'Refs #2556')
self.assertEqual(list_out[2], '4567')
| devinbarry/yellow-worktracker | tests.py | Python | apache-2.0 | 2,235 |
from __future__ import division, absolute_import, print_function
import re
import sys
import os
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
an MSVC-compiled DLL import library. It correctly discriminates between
data and functions. The data is collected from the output of the program
nm(1).
Usage:
python lib2def.py [libname.lib] [output.def]
or
python lib2def.py [libname.lib] > output.def
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
Author: Robert Kern <kernr@mail.ncifcrf.gov>
Last Update: April 30, 1999
"""
__version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
DEFAULT_NM = 'nm -Cs'
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
;DATA PRELOAD SINGLE
EXPORTS
""" % py_ver
# the header of the DEF file
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
def parse_cmd():
"""Parses the command-line arguments.
libfile, deffile = parse_cmd()"""
if len(sys.argv) == 3:
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
libfile, deffile = sys.argv[1:]
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
print("I'm assuming that your first argument is the library")
print("and the second is the DEF file.")
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
libfile = 'python%s.lib' % py_ver
elif sys.argv[1][-4:] == '.lib':
deffile = None
libfile = sys.argv[1]
else:
libfile = 'python%s.lib' % py_ver
deffile = None
return libfile, deffile
def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE)
nm_output = f.stdout.read()
f.stdout.close()
return nm_output
def parse_nm(nm_output):
"""Returns a tuple of lists: dlist for the list of data
symbols and flist for the list of function symbols.
dlist, flist = parse_nm(nm_output)"""
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
flist.append(sym)
dlist = []
for sym in data:
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
dlist.append(sym)
dlist.sort()
flist.sort()
return dlist, flist
def output_def(dlist, flist, header, file = sys.stdout):
"""Outputs the final DEF file to a file defaulting to stdout.
output_def(dlist, flist, header, file = sys.stdout)"""
for data_sym in dlist:
header = header + '\t%s DATA\n' % data_sym
header = header + '\n' # blank line
for func_sym in flist:
header = header + '\t%s\n' % func_sym
file.write(header)
if __name__ == '__main__':
libfile, deffile = parse_cmd()
if deffile is None:
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
nm_cmd = [str(DEFAULT_NM), str(libfile)]
nm_output = getnm(nm_cmd)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/site-packages/numpy/distutils/lib2def.py | Python | gpl-3.0 | 3,487 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-08 09:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neatweb', '0004_auto_20160308_0846'),
]
operations = [
migrations.AddField(
model_name='organism',
name='network',
field=models.CharField(default='', max_length=256),
preserve_default=False,
),
]
| jasonb5/pyneat-web | neatweb/migrations/0005_organism_network.py | Python | apache-2.0 | 500 |
__author__ = 'Irwan Fathurrahman <irwan@kartoza.com>'
__date__ = '10/05/17'
class Version(object):
"""
Version model for tracking versions.
"""
version = 0.0
edited_by = ''
edited_at = ''
| meomancer/field-campaigner | flask_project/campaign_manager/models/version.py | Python | bsd-3-clause | 214 |
#!/usr/bin/env python
"""
Copyright 2008 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
from omero_version import omero_version as ov
setup(name="OmeroWeb",
version=ov,
description="OmeroWeb",
long_description="""\
OmeroWeb is the container of the web clients for OMERO."
""",
author="Aleksandra Tarkowska",
author_email="",
url="http://trac.openmicroscopy.org.uk/ome/wiki/OmeroWeb",
download_url="http://trac.openmicroscopy.org.uk/ome/wiki/OmeroWeb",
packages=[''],
test_suite='test.suite'
)
| hflynn/openmicroscopy | components/tools/OmeroWeb/setup.py | Python | gpl-2.0 | 722 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from glob import glob
try:
from distribute_setup import use_setuptools
use_setuptools()
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
#######################
# Pre-install checks #
#######################
def dependCheck(checkers):
dflags = dict(zip( checkers.keys(), [True]*len(checkers) ))
for depName, checker in checkers.iteritems():
print 'Checking %s ... ' %depName,
if checker():print 'ok'
else:
print 'NOT OK!'
dflags[depName] = False
return dflags
def checkPyXML():
try:
import xml
except ImportError:
return False
return True
def checkPyhrf():
try:
import pyhrf
except ImportError,e :
print e
return False
return True
dependCheckers = {
'pyxml': checkPyXML,
}
dependFlags = dependCheck(dependCheckers)
cExtensions = [
Extension('pyhrf.jde.intensivecalc',
['src/pyhrf/jde/intensivecalc.c']),
Extension('pyhrf.boldsynth.pottsfield.pottsfield_c',
['src/pyhrf/boldsynth/pottsfield/pottsField.c']),
Extension('pyhrf.vbjde.UtilsC',
['src/pyhrf/vbjde/utilsmodule.c']),
Extension('pyhrf.cparcellation',
['src/pyhrf/cparcellation.c']),
## used to sample the GIG (not maintained)
## Extension('pyhrf.stats.cRandom',
## ['src/pyhrf/stats/cRandom.c'],
## libraries=['unuran', 'prng'],
## ),
]
try:
import numpy as np
except ImportError:
print 'Numpy should be installed prior to pyhrf installation'
sys.exit(1)
setup(
name="pyhrf", author='Thomas VINCENT, Philippe CIUCIU, Solveig BADILLO',
author_email='thomas.tv.vincent@gmail.com',
version='0.3',
setup_requires=['numpy>=1.0'],
install_requires=['numpy>=1.0','matplotlib>=0.90.1','scipy>=0.7',
'nibabel', 'nipy', 'PyXML>=0.8.4'],
dependency_links = [],
package_dir = {'' : 'python'},
packages=find_packages('python'),
include_package_data=True,
include_dirs = [np.get_include()],
package_data={'pyhrf':['datafiles/*']},
ext_modules=cExtensions,
scripts=glob('./bin/*'),
platforms=['linux'],
zip_safe=False,
)
# optional deps and description of associated feature:
optional_deps = {
'sklearn' : '(scikit-learn) -- spatial ward parcellation',
'joblib' : 'local parallel feature (eg pyhrf_jde_estim -x local)',
'soma.workflow' : 'cluster parallel feature (eg pyhrf_jde_estim -x cluster)',
'PIL' : 'loading of image file as simulation maps',
'munkres' : 'computation of distance between parcellations',
'pygraphviz' : '(python-graph-core) -- save plot of simulation pipelines',
'PyQt4': 'viewer and xml editor',
}
def check_opt_dep(dep_name, dep_descrip):
"""
Return a message telling if dependency *dep_name* is available
with an import
"""
try:
__import__(dep_name)
except ImportError:
return '%s *NOT IMPORTABLE*, %s will *NOT* be available' %(dep_name,
dep_descrip)
return '%s is importable, %s will be available' %(dep_name, dep_descrip)
print 'Optional dependencies:'
print '\n'.join(['- '+ check_opt_dep(dn, dd) for dn, dd in optional_deps.items()])
print '\nIf the installation was successfull, you may run '\
'"pyhrf_maketests" to run package tests.\n'
print 'Report on installation:'
installCheckers = {
'Pyhrf main installation' : checkPyhrf,
}
dependCheck(installCheckers)
| philouc/pyhrf | setup.py | Python | gpl-3.0 | 3,704 |
from socket import *
import threading as thrd
import multiprocessing as mult
import time
#############CLASS############
def setdict(idic,dic):
for i in dic:
idic[i]=dic[i]
class idict(dict):
def __getattribute__(self,key):
try:
return self[key]
except:
pass
def __setattr__(self,key,value):
self["".join(key.split('-'))]=value
##############TCP##############
host=""
port=8080
servaddr=(host,port)
maxconn=10
processcount=1#mult.cpu_count()
def tcpConn():
zsock=socket(AF_INET,SOCK_STREAM)
zsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
zsock.setsockopt(SOL_TCP,TCP_NODELAY,1)
zsock.bind(servaddr)
zsock.listen(maxconn)
proclist=[]
for i in range(0,processcount):
proclist.append(mult.Process(target=tcpProc,args=(zsock,)))
for proc in proclist:
proc.start()
for proc in proclist:
proc.join()
def tcpProc(zsock):
req=reqHandler().send
req(None)
while(1):
req(zsock.accept())
############REQUEST############
from urllib.request import unquote
import traceback
bufsize=1024
def reqHandler():
noq=req_noq().send
rtr=router().send
noq(None)
rtr(None)
hlist={
"Accept":noq,
"Accept-Encoding":noq,
"Accept-Language":noq,
}
while(1):
try:
(zconn,useraddr)=yield
###firewall###
###parse###
data=zconn.recv(bufsize)
#print(data,"\n")
data=data.split(b'\r\n\r\n')
head=data[0].decode("ascii").split('\r\n')
startline=head[0].split(" ")
request=idict()
request.method=startline[0]
request.path=startline[1]
request.version=startline[2]
headers=idict()
for i in head[1:]:
c=i.find(":")
i=[i[:c],i[c+1:]]
try:
headers[i[0]]=hlist[i[0]](i[1])
except:
headers[i[0]]=i[1].lstrip()
request.headers=headers
#print(request)
#check#
rtr(request)
zconn.send(b"HTTP/1.1 304 OK")
zconn.close()
except:
traceback.print_exc()
zconn.send(b'HTTP/1.1 500 OMG')
zconn.close()
def req_noq():
"""
parse the value which has q=x.x
"""
out=[]
while(1):
v=yield out
out=[]
for i in v.split(","):
out.append(unquote(i.split(";")[0].strip()))
###########RESPONSE###########
############ROUTING############
routewall=idict()
def router():
pcmp=pathcmp().send
pcmp(None)
while(1):
try:
request=yield
for key in routewall:
r=eval(key)
result=pcmp((request.path,r["path"]))
if(result!=None):
del r["path"]
scs=1
for i in r:
if(r[i]!=request[i]):
scs=0
break
if(scs==1):
routewall[key](request,**result)
except:pass
def route(**key):
def reg(func):
key["path"]=spliter(key["path"])
routewall[str(key)]=func
return reg
def spliter(path):
"""
bug:dhjdknxnj<djkd
=>dhjdknxnj djkd
dont fix it
"""
idx=0
l=[]
left=0
right=0
while(1):
idx=path.find("<",idx)+1
if(idx==0):
l.append(path[right:])
break
left=idx
l.append(path[right:left-1])
idx=path.find(">",idx)+1
if(idx==0):
l.append(path[left:])
break
right=idx
l.append(path[left:right-1])
return l
def pathcmp():
out=None
while(1):
path,l=yield out
start=0
t=[]
out={}
scs=False
if(path.find(l[0])==0):
scs=True
t=[0,len(l[0])]
start=len(l[0])
if(len(l)==1):
if(l[0]!=path):scs=False
for i in range(2,len(l),2):
idx=path.find(l[i],start)
start=idx
end=idx+len(l[i])
if(end>len(path)):
scs=False
break
t.append(idx)
t.append(end)
scs=True
if(scs):
for i in range(1,len(l),2):
try:out[l[i]]=path[t[i]:t[i+1]]
except:out[l[i]]=path[t[i]:]
else:out=None
############MAIN#############
@route(path="<a>")
def hey(r,a):
pass
tcpConn() | fenichs/zopir | zopir.py | Python | apache-2.0 | 4,731 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys # for system io
import datetime
import sqlite3 # for DB Activities
import urllib2 # for url parssing et al
import textwrap # To limit o/p characters per line
import subprocess # To invoke espeak
import time
import os
from nltk.corpus import wordnet as wn # Wordnet DB
from nltk.stem.wordnet import WordNetLemmatizer # To Obtain Lemma
from BeautifulSoup import BeautifulSoup
import goslate
import re
from pattern.en import conjugate, pluralize, singularize, comparative, superlative, suggest
rpath = "/home/shingu/workspace/vocab_prep/"
#rpath = "/home/rajkumar.r/backup/workspace/users/raj/vocab_prep/"
#tdef_file = open("/home/shingu/rtrt","w")
def similar_Wrd(word):
list_of_sim=[]
#print "Running similar_Wrd for %s" %(word)
for wrd in wn.synsets(word):
#print "%s in lemma_names" %(wrd)
for sim in wrd.lemma_names:
if sim not in list_of_sim:
#print "%s not in list_of_sim" %(sim)
if sim in sword_list and sim not in completed_list:
#print "%s is in sword_list" %(sim)
list_of_sim.append(sim)
else:
pass
#print "%s is not in sword_list" %(sim)
else:
pass
#print "%s is in list_of_sim" %(sim)
#print "\n\n\n%s in hyponyms" %(wrd)
for hypo in wrd.hyponyms():
for lemma in hypo.lemma_names:
if lemma not in list_of_sim:
#print "%s not in list_of_sim" %(lemma)
if lemma in sword_list and lemma not in completed_list:
#print "%s is in sword_list" %(lemma)
list_of_sim.append(lemma)
else:
pass
#print "%s is not in sword_list" %(lemma)
else:
pass
#print "%s is in list_of_sim" %(lemma)
if word in list_of_sim:
list_of_sim.remove(word)
return list_of_sim
rword_list = []
sword_list = []
completed_list = []
if __name__ == "__main__":
# Try to get the input
try:
fp = open(sys.argv[1],'r')
word_list = fp.read()
except:
word_list = sys.argv[1]
no_of_word = 0
# Count the number of words in the word_list
for word in word_list.split():
sword_list.append(word)
for word in sword_list:
list_of_sim = similar_Wrd(word)
entity = (word, 0, 0, list_of_sim)
rword_list.append(entity)
print "%s" %(word)
for sim in list_of_sim:
if sim in sword_list:
sword_list.remove(sim)
print "%s" %(sim)
completed_list.append(sim)
completed_list.append(word)
#sys.exit();
no_of_word = no_of_word + 1
#print "Total number of words : %d" %(no_of_word)
| esotericnomen/vocab_prep | cluster.py | Python | gpl-3.0 | 2,476 |
# -*- coding: utf-8 -*-
"""
WSGI config for myproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.dev")
application = get_wsgi_application()
| tkjone/django-starters | series_3/p_01/myproject/src/server/config/wsgi.py | Python | mit | 381 |
class Solution:
"""
@param s: The first string
@param b: The second string
@return true or false
"""
# Time: is equal to sorted O(nlogn)
# Space: O(1)
def anagram(self, s, t):
# write your code here
s = sorted(s)
t = sorted(t)
return s == t
| rosegun38/LintCode | Two_Strings_Are_Anagrams/Solution.py | Python | gpl-3.0 | 307 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
# Reads from papertrail using the API and inserts log data into ES in
# the same manner as esworker_eventtask.py
import json
import kombu
import sys
import socket
import time
from configlib import getConfig, OptionParser
from datetime import datetime, timedelta
import calendar
import requests
from mozdef_util.elasticsearch_client import ElasticsearchClient, ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.utilities.to_unicode import toUnicode
from mozdef_util.utilities.remove_at import removeAt
from mozdef_util.utilities.logger import logger, initLogger
from lib.plugins import sendEventToPlugins, registerPlugins
# running under uwsgi?
try:
import uwsgi
hasUWSGI = True
except ImportError as e:
hasUWSGI = False
class PTRequestor(object):
def __init__(self, apikey, evmax=2000):
self._papertrail_api = 'https://papertrailapp.com/api/v1/events/search.json'
self._apikey = apikey
self._events = {}
self._evmax = evmax
self._evidcache = []
def parse_events(self, resp):
for x in resp['events']:
if x['id'] in self._evidcache:
# saw this event last time, just ignore it
continue
self._events[x['id']] = x
if 'reached_record_limit' in resp and resp['reached_record_limit']:
return resp['min_id']
return None
def makerequest(self, query, stime, etime, maxid):
payload = {
'min_time': calendar.timegm(stime.utctimetuple()),
'max_time': calendar.timegm(etime.utctimetuple()),
'q': query
}
if maxid is not None:
payload['max_id'] = maxid
hdrs = {'X-Papertrail-Token': self._apikey}
max_retries = 3
total_retries = 0
while True:
logger.debug("Sending request to papertrail API")
resp = requests.get(self._papertrail_api, headers=hdrs, params=payload)
if resp.status_code == 200:
break
else:
logger.debug("Received invalid status code: {0}: {1}".format(resp.status_code, resp.text))
total_retries += 1
if total_retries < max_retries:
logger.debug("Sleeping a bit then retrying")
time.sleep(2)
else:
logger.error("Received too many error messages...exiting")
logger.error("Last malformed response: {0}: {1}".format(resp.status_code, resp.text))
sys.exit(1)
return self.parse_events(resp.json())
def request(self, query, stime, etime):
self._events = {}
maxid = None
while True:
maxid = self.makerequest(query, stime, etime, maxid)
if maxid is None:
break
if len(self._events.keys()) > self._evmax:
logger.warning('papertrail esworker hitting event request limit')
break
# cache event ids we return to allow for some duplicate filtering checks
# during next run
self._evidcache = self._events.keys()
return self._events
def keyMapping(aDict):
'''map common key/fields to a normalized structure,
explicitly typed when possible to avoid schema changes for upsteam consumers
Special accomodations made for logstash,nxlog, beaver, heka and CEF
Some shippers attempt to conform to logstash-style @fieldname convention.
This strips the leading at symbol since it breaks some elastic search
libraries like elasticutils.
'''
returndict = dict()
# uncomment to save the source event for debugging, or chain of custody/forensics
# returndict['original']=aDict
# set the timestamp when we received it, i.e. now
returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat()
returndict['mozdefhostname'] = options.mozdefhostname
returndict['details'] = {}
try:
for k, v in aDict.iteritems():
k = removeAt(k).lower()
if k in ('message', 'summary'):
returndict[u'summary'] = toUnicode(v)
if k in ('payload') and 'summary' not in aDict:
# special case for heka if it sends payload as well as a summary, keep both but move payload to the details section.
returndict[u'summary'] = toUnicode(v)
elif k in ('payload'):
returndict[u'details']['payload'] = toUnicode(v)
if k in ('eventtime', 'timestamp', 'utctimestamp'):
returndict[u'utctimestamp'] = toUTC(v).isoformat()
returndict[u'timestamp'] = toUTC(v).isoformat()
if k in ('hostname', 'source_host', 'host'):
returndict[u'hostname'] = toUnicode(v)
if k in ('tags'):
if len(v) > 0:
returndict[u'tags'] = v
# nxlog keeps the severity name in syslogseverity,everyone else should use severity or level.
if k in ('syslogseverity', 'severity', 'severityvalue', 'level'):
returndict[u'severity'] = toUnicode(v).upper()
if k in ('facility', 'syslogfacility','source'):
returndict[u'source'] = toUnicode(v)
if k in ('pid', 'processid'):
returndict[u'processid'] = toUnicode(v)
# nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname
if k in ('pname', 'processname', 'sourcename'):
returndict[u'processname'] = toUnicode(v)
# the file, or source
if k in ('path', 'logger', 'file'):
returndict[u'eventsource'] = toUnicode(v)
if k in ('type', 'eventtype', 'category'):
returndict[u'category'] = toUnicode(v)
# custom fields as a list/array
if k in ('fields', 'details'):
if type(v) is not dict:
returndict[u'details'][u'message'] = v
else:
if len(v) > 0:
for details_key, details_value in v.iteritems():
returndict[u'details'][details_key] = details_value
# custom fields/details as a one off, not in an array
# i.e. fields.something=value or details.something=value
# move them to a dict for consistency in querying
if k.startswith('fields.') or k.startswith('details.'):
newName = k.replace('fields.', '')
newName = newName.lower().replace('details.', '')
# add field with a special case for shippers that
# don't send details
# in an array as int/floats/strings
# we let them dictate the data type with field_datatype
# convention
if newName.endswith('_int'):
returndict[u'details'][unicode(newName)] = int(v)
elif newName.endswith('_float'):
returndict[u'details'][unicode(newName)] = float(v)
else:
returndict[u'details'][unicode(newName)] = toUnicode(v)
# nxlog windows log handling
if 'Domain' in aDict and 'SourceModuleType' in aDict:
# nxlog parses all windows event fields very well
# copy all fields to details
returndict[u'details'][k] = v
if 'utctimestamp' not in returndict:
# default in case we don't find a reasonable timestamp
returndict['utctimestamp'] = toUTC(datetime.now()).isoformat()
if 'type' not in returndict:
# default replacement for old _type subcategory.
# to preserve filtering capabilities
returndict['type'] = 'event'
except Exception as e:
logger.exception('Received exception while normalizing message: %r' % e)
logger.error('Malformed message: %r' % aDict)
return None
return returndict
def esConnect():
'''open or re-open a connection to elastic search'''
return ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)), options.esbulksize)
class taskConsumer(object):
def __init__(self, ptRequestor, esConnection):
self.ptrequestor = ptRequestor
self.esConnection = esConnection
# calculate our initial request window
self.lastRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptinterval) - \
timedelta(seconds=options.ptbackoff)
def run(self):
while True:
try:
curRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptbackoff)
records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime)
# update last request time for the next request
self.lastRequestTime = curRequestTime
for msgid in records:
msgdict = records[msgid]
# strip any line feeds from the message itself, we just convert them
# into spaces
msgdict['message'] = msgdict['message'].replace('\n', ' ').replace('\r', '')
event = dict()
event['tags'] = ['papertrail', options.ptacctname]
event['details'] = msgdict
if 'generated_at' in event['details']:
event['utctimestamp'] = toUTC(event['details']['generated_at']).isoformat()
if 'hostname' in event['details']:
event['hostname'] = event['details']['hostname']
if 'message' in event['details']:
event['summary'] = event['details']['message']
if 'severity' in event['details']:
event['severity'] = event['details']['severity']
if 'source_ip' in event['details']:
event['sourceipaddress'] = event['details']['source_ip']
else:
event['severity'] = 'INFO'
event['category'] = 'syslog'
# process message
self.on_message(event, msgdict)
time.sleep(options.ptinterval)
except ValueError as e:
logger.exception('Exception while handling message: %r' % e)
def on_message(self, body, message):
# print("RECEIVED MESSAGE: %r" % (body, ))
try:
# default elastic search metadata for an event
metadata = {
'index': 'events',
'id': None
}
# just to be safe..check what we were sent.
if isinstance(body, dict):
bodyDict = body
elif isinstance(body, str) or isinstance(body, unicode):
try:
bodyDict = json.loads(body) # lets assume it's json
except ValueError as e:
# not json..ack but log the message
logger.error("esworker exception: unknown body type received %r" % body)
# message.ack()
return
else:
logger.error("esworker exception: unknown body type received %r" % body)
# message.ack()
return
if 'customendpoint' in bodyDict and bodyDict['customendpoint']:
# custom document
# send to plugins to allow them to modify it if needed
(normalizedDict, metadata) = sendEventToPlugins(bodyDict, metadata, pluginList)
else:
# normalize the dict
# to the mozdef events standard
normalizedDict = keyMapping(bodyDict)
# send to plugins to allow them to modify it if needed
if normalizedDict is not None and isinstance(normalizedDict, dict):
(normalizedDict, metadata) = sendEventToPlugins(normalizedDict, metadata, pluginList)
# drop the message if a plug in set it to None
# signaling a discard
if normalizedDict is None:
# message.ack()
return
# make a json version for posting to elastic search
jbody = json.JSONEncoder().encode(normalizedDict)
try:
bulk = False
if options.esbulksize != 0:
bulk = True
self.esConnection.save_event(
index=metadata['index'],
doc_id=metadata['id'],
body=jbody,
bulk=bulk
)
except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
# handle loss of server or race condition with index rotation/creation/aliasing
try:
self.esConnection = esConnect()
# message.requeue()
return
except kombu.exceptions.MessageStateError:
# state may be already set.
return
except ElasticsearchException as e:
# exception target for queue capacity issues reported by elastic search so catch the error, report it and retry the message
try:
logger.exception('ElasticSearchException: {0} reported while indexing event'.format(e))
# message.requeue()
return
except kombu.exceptions.MessageStateError:
# state may be already set.
return
# message.ack()
except Exception as e:
logger.exception(e)
logger.error('Malformed message body: %r' % body)
def main():
if hasUWSGI:
logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
else:
logger.info('started without uwsgi')
# establish api interface with papertrail
ptRequestor = PTRequestor(options.ptapikey, evmax=options.ptquerymax)
# consume our queue
taskConsumer(ptRequestor, es).run()
def initConfig():
# capture the hostname
options.mozdefhostname = getConfig('mozdefhostname', socket.gethostname(), options.configfile)
# elastic search options. set esbulksize to a non-zero value to enable bulk posting, set timeout to post no matter how many events after X seconds.
options.esservers = list(getConfig('esservers', 'http://localhost:9200', options.configfile).split(','))
options.esbulksize = getConfig('esbulksize', 0, options.configfile)
options.esbulktimeout = getConfig('esbulktimeout', 30, options.configfile)
# papertrail configuration
options.ptapikey = getConfig('papertrailapikey', 'none', options.configfile)
options.ptquery = getConfig('papertrailquery', '', options.configfile)
options.ptinterval = getConfig('papertrailinterval', 60, options.configfile)
options.ptbackoff = getConfig('papertrailbackoff', 300, options.configfile)
options.ptacctname = getConfig('papertrailaccount', 'unset', options.configfile)
options.ptquerymax = getConfig('papertrailmaxevents', 2000, options.configfile)
# plugin options
# secs to pass before checking for new/updated plugins
# seems to cause memory leaks..
# regular updates are disabled for now,
# though we set the frequency anyway.
options.plugincheckfrequency = getConfig('plugincheckfrequency', 120, options.configfile)
if __name__ == '__main__':
# configure ourselves
parser = OptionParser()
parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
initLogger(options)
# open ES connection globally so we don't waste time opening it per message
es = esConnect()
pluginList = registerPlugins()
try:
main()
except KeyboardInterrupt as e:
logger.info("Exiting worker")
if options.esbulksize != 0:
es.finish_bulk()
except Exception as e:
if options.esbulksize != 0:
es.finish_bulk()
raise
| Phrozyn/MozDef | mq/esworker_papertrail.py | Python | mpl-2.0 | 16,704 |
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class Download(github.GithubObject.CompletableGithubObject):
"""
This class represents Downloads. The reference can be found here https://developer.github.com/v3/repos/downloads/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value})
@property
def accesskeyid(self):
"""
:type: string
"""
self._completeIfNotSet(self._accesskeyid)
return self._accesskeyid.value
@property
def acl(self):
"""
:type: string
"""
self._completeIfNotSet(self._acl)
return self._acl.value
@property
def bucket(self):
"""
:type: string
"""
self._completeIfNotSet(self._bucket)
return self._bucket.value
@property
def content_type(self):
"""
:type: string
"""
self._completeIfNotSet(self._content_type)
return self._content_type.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def download_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._download_count)
return self._download_count.value
@property
def expirationdate(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._expirationdate)
return self._expirationdate.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def mime_type(self):
"""
:type: string
"""
self._completeIfNotSet(self._mime_type)
return self._mime_type.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def path(self):
"""
:type: string
"""
self._completeIfNotSet(self._path)
return self._path.value
@property
def policy(self):
"""
:type: string
"""
self._completeIfNotSet(self._policy)
return self._policy.value
@property
def prefix(self):
"""
:type: string
"""
self._completeIfNotSet(self._prefix)
return self._prefix.value
@property
def redirect(self):
"""
:type: bool
"""
self._completeIfNotSet(self._redirect)
return self._redirect.value
@property
def s3_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._s3_url)
return self._s3_url.value
@property
def signature(self):
"""
:type: string
"""
self._completeIfNotSet(self._signature)
return self._signature.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/downloads/:id <http://developer.github.com/v3/repos/downloads>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def _initAttributes(self):
self._accesskeyid = github.GithubObject.NotSet
self._acl = github.GithubObject.NotSet
self._bucket = github.GithubObject.NotSet
self._content_type = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._download_count = github.GithubObject.NotSet
self._expirationdate = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._mime_type = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._path = github.GithubObject.NotSet
self._policy = github.GithubObject.NotSet
self._prefix = github.GithubObject.NotSet
self._redirect = github.GithubObject.NotSet
self._s3_url = github.GithubObject.NotSet
self._signature = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "accesskeyid" in attributes: # pragma no branch
self._accesskeyid = self._makeStringAttribute(
attributes["accesskeyid"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "acl" in attributes: # pragma no branch
self._acl = self._makeStringAttribute(
attributes["acl"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "bucket" in attributes: # pragma no branch
self._bucket = self._makeStringAttribute(
attributes["bucket"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "content_type" in attributes: # pragma no branch
self._content_type = self._makeStringAttribute(attributes["content_type"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "download_count" in attributes: # pragma no branch
self._download_count = self._makeIntAttribute(attributes["download_count"])
if "expirationdate" in attributes: # pragma no branch
self._expirationdate = self._makeDatetimeAttribute(
attributes["expirationdate"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "mime_type" in attributes: # pragma no branch
self._mime_type = self._makeStringAttribute(
attributes["mime_type"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(
attributes["path"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "policy" in attributes: # pragma no branch
self._policy = self._makeStringAttribute(
attributes["policy"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "prefix" in attributes: # pragma no branch
self._prefix = self._makeStringAttribute(
attributes["prefix"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "redirect" in attributes: # pragma no branch
self._redirect = self._makeBoolAttribute(
attributes["redirect"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "s3_url" in attributes: # pragma no branch
self._s3_url = self._makeStringAttribute(
attributes["s3_url"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "signature" in attributes: # pragma no branch
self._signature = self._makeStringAttribute(
attributes["signature"]
) # pragma no cover (was covered only by create_download, which has been removed)
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| ahmad88me/PyGithub | github/Download.py | Python | lgpl-3.0 | 11,274 |
"""
unit tests for clustershell roster
"""
from tests.support.mock import MagicMock, patch
# Import Salt Testing libraries
from tests.support.unit import TestCase, skipIf
try:
from ClusterShell.NodeSet import NodeSet # pylint: disable=unused-import
HAS_CLUSTERSHELL = True
except (ImportError, OSError) as e:
HAS_CLUSTERSHELL = False
@skipIf(
HAS_CLUSTERSHELL is False,
"Install Python Clustershell bindings before running these tests.",
)
class ClusterShellTestCase(TestCase):
"""
Test cases for clustershell roster
"""
def test_targets(self):
mock_socket = MagicMock()
mock_nodeset = MagicMock()
mock_nodeset.NodeSet.return_value = ["foo"]
with patch.dict(
"sys.modules",
**{"socket": mock_socket, "ClusterShell.NodeSet": mock_nodeset}
):
import salt.roster.clustershell
salt.roster.clustershell.__opts__ = {}
with patch.dict(
salt.roster.clustershell.__opts__,
{"ssh_scan_ports": [1, 2, 3], "ssh_scan_timeout": 30},
):
# Reimports are necessary to re-init the namespace.
# pylint: disable=unused-import
import socket
from ClusterShell.NodeSet import NodeSet
# pylint: enable=unused-import
ret = salt.roster.clustershell.targets("foo")
mock_socket.gethostbyname.assert_any_call("foo")
self.assertTrue("foo" in ret)
self.assertTrue(ret["foo"]["port"] == 3)
| saltstack/salt | tests/unit/roster/test_clustershell.py | Python | apache-2.0 | 1,592 |
### Paper RMSD selections ###
sel = []
sel.append(['a2_subdomain1_backbone','backbone and resid 57:68 and not name H*'])
sel.append(['motif_2_backbone','backbone and resid 117:124 and not name H*'])
sel.append(['aligned_CAs','protein and (resid 20:25 50:55 73:75 90:94 112:116 142:147 165:169 190:194 214:218 236:240 253:258 303:307) and name CA'])
sel.append(['aligned_betas','protein and (resid 20:25 50:55 73:75 90:94 112:116 142:147 165:169 190:194 214:218 236:240 253:258 303:307) and not name H*'])
#sel.append(['',''])
| rbdavid/DENV-NS3h | RMSD_analyses/sel_list.py | Python | gpl-3.0 | 529 |
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import sys
name = 'ball_glut'
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(400,400)
glutCreateWindow(name)
glClearColor(0.,0.,0.,1.)
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
lightZeroPosition = [10.,4.,10.,1.]
lightZeroColor = [0.8,1.0,0.8,1.0] #green tinged
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
glEnable(GL_LIGHT0)
glutDisplayFunc(display)
glMatrixMode(GL_PROJECTION)
gluPerspective(40.,1.,1.,40.)
glMatrixMode(GL_MODELVIEW)
gluLookAt(0,0,10,
0,0,0,
0,1,0)
glPushMatrix()
glutMainLoop()
return
def display():
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
color = [1.0,0.,0.,1.]
glMaterialfv(GL_FRONT,GL_DIFFUSE,color)
glutSolidSphere(2,20,20)
glPopMatrix()
glutSwapBuffers()
return
if __name__ == '__main__': main()
| ActiveState/code | recipes/Python/325391_Open_GLUT_window_draw_sphere_using/recipe-325391.py | Python | mit | 1,249 |
from .version import __version__
from dtreeviz.classifiers import clfviz
| parrt/dtreeviz | dtreeviz/__init__.py | Python | mit | 74 |
'''Module to handle unit conversions.
'''
# conversion functions
def identity(x):
'''Identify conversion. Useful when converting from a unit to itself.
'''
return x
def m2ft(d):
'''Meters to feet conversion.
'''
return 3.28084 * d
def ft2m(d):
'''Feet to meters conversion.
'''
return 0.3048 * d
distance_convert_functions = {'m': {'m': identity, 'ft': m2ft},
'ft': {'m': ft2m, 'ft': identity}}
def c2f(t):
'''Celcius to Fahrenheit conversion.
'''
return 9.0 * t / 5.0 + 32.0
def f2c(t):
'''Fahrenheit to Celcius conversion.
'''
return 5.0 / 9.0 * (t - 32.0)
temp_convert_functions = {'c': {'c': identity, 'f': c2f},
'f': {'c': f2c, 'f': identity}}
def get_convert_function(input_units, output_units):
'''Helper routine to retrieve the appropriate conversion function given
the input and output units.
'''
if input_units.lower() in distance_convert_functions:
cf = distance_convert_functions[input_units.lower()]
elif input_units.lower() in temp_convert_functions:
cf = temp_convert_functions[input_units.lower()]
else:
return identity
return cf[output_units.lower()]
def convert(units=None):
'''Decorator to specify the units a function returns.
'''
def decorate(func):
input_units = units
def f(x, units=None):
if units is None:
cf = identity
else:
cf = get_convert_function(input_units, units)
return cf(func(x))
f.units = input_units
return f
return decorate
| mgalloy/burin | burin/units.py | Python | bsd-3-clause | 1,673 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime
from flask.ext.sqlalchemy import BaseQuery
import simplejson
from sqlalchemy import Column, DateTime, Integer
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.types import TEXT, UserDefinedType, TypeDecorator
__all_mixins = ['IdMixin', 'TimestampMixin', 'PermissionMixin', 'UrlForMixin',
'BaseMixin']
class Query(BaseQuery):
"""
Extends flask.ext.sqlalchemy.BaseQuery to add additional helper methods.
"""
def one_or_none(self):
"""
Like :meth:`one` but returns None if no results are found. Raises an exception
if multiple results are found.
"""
try:
return self.one()
except NoResultFound:
return None
def notempty(self):
"""
Returns the equivalent of ``bool(query.count())`` but using an efficient
SQL EXISTS function, so the database stops counting after the first result
is found.
"""
return self.session.query(self.exists()).first()[0]
def isempty(self):
"""
Returns the equivalent of ``not bool(query.count())`` but using an efficient
SQL EXISTS function, so the database stops counting after the first result
is found.
"""
return not self.session.query(self.exists()).first()[0]
class IdMixin(object):
"""
Provides the :attr:`id` primary key column
"""
query_class = Query
#: Database identity for this model, used for foreign key
#: references from other models
id = Column(Integer, primary_key=True)
def make_timestamp_columns():
return (
Column('created_at', DateTime, default=datetime.utcnow, nullable=False),
Column('updated_at', DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False),
)
class TimestampMixin(object):
"""
Provides the :attr:`created_at` and :attr:`updated_at` audit timestamps
"""
#: Timestamp for when this instance was created, in UTC
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
#: Timestamp for when this instance was last updated (via the app), in UTC
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
class PermissionMixin(object):
"""
Provides the :meth:`permissions` method used by BaseMixin and derived classes
"""
def permissions(self, user, inherited=None):
"""
Return permissions available to the given user on this object
"""
if inherited is not None:
return set(inherited)
else:
return set()
class UrlForMixin(object):
"""
Provides a placeholder :meth:`url_for` method used by BaseMixin-derived classes
"""
def url_for(self, action='view', **kwargs):
"""
Return public URL to this instance for a given action (default 'view')
"""
return None
class BaseMixin(IdMixin, TimestampMixin, PermissionMixin, UrlForMixin):
"""
Base mixin class for all tables that adds id and timestamp columns and includes
stub :meth:`permissions` and :meth:`url_for` methods
"""
def _set_fields(self, fields):
for f in fields:
if hasattr(self, f):
setattr(self, f, fields[f])
else:
raise TypeError("'{arg}' is an invalid argument for {instance_type}".format(arg=f, instance_type=self.__class__.__name__))
# --- Column types ------------------------------------------------------------
__all_columns = ['JsonDict']
class JsonType(UserDefinedType):
"""The PostgreSQL JSON type."""
def get_col_spec(self):
return "JSON"
class JsonbType(UserDefinedType):
"""The PostgreSQL JSONB type."""
def get_col_spec(self):
return "JSONB"
# Adapted from http://docs.sqlalchemy.org/en/rel_0_8/orm/extensions/mutable.html#establishing-mutability-on-scalar-column-values
class JsonDict(TypeDecorator):
"""
Represents a JSON data structure. Usage::
column = Column(JsonDict)
"""
impl = TEXT
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
version = tuple(dialect.server_version_info[:2])
if version in [(9, 2), (9, 3)]:
return dialect.type_descriptor(JsonType)
elif version >= (9, 4):
return dialect.type_descriptor(JsonbType)
return dialect.type_descriptor(self.impl)
def process_bind_param(self, value, dialect):
if value is not None:
value = simplejson.dumps(value, default=lambda o: unicode(o))
return value
def process_result_value(self, value, dialect):
if value is not None and isinstance(value, basestring):
# Psycopg2 >= 2.5 will auto-decode JSON columns, so
# we only attempt decoding if the value is a string.
# Since this column stores dicts only, processed values
# can never be strings.
value = simplejson.loads(value, use_decimal=True)
return value
__all__ = __all_mixins + __all_columns | amorwilliams/gst | server/apps/common/sqlalchemy.py | Python | mit | 5,210 |
from django.http import HttpResponse
from django.contrib.auth.models import User, Group
from django.utils import simplejson
from ganeti_webmgr.authentication.models import ClusterUser
def search_users(request):
""" search users and groups and return results as json """
if 'term' in request.GET:
term = request.GET['term']
else:
term = None
if 'pk' in request.GET:
pk = request.GET['pk']
else:
pk = None
limit = 10
if request.GET.get("groups", 'True') == 'True':
data = simplejson.dumps(search_users_and_groups(term, pk, limit))
else:
data = simplejson.dumps(search_users_only(term, pk, limit))
return HttpResponse(data, mimetype="application/json")
def search_owners(request):
if 'term' in request.GET:
term = request.GET['term']
else:
term = None
if 'pk' in request.GET:
pk = request.GET['pk']
else:
pk = None
limit = 10
data = simplejson.dumps(search_cluster_users(term, pk, limit))
return HttpResponse(data, mimetype="application/json")
def search_cluster_users(term=None, pk=None, limit=10):
if pk:
clusterUsers = ClusterUser.objects.filter(id=int(pk))
elif term:
clusterUsers = ClusterUser.objects.filter(name__istartswith=term)
else:
clusterUsers = ClusterUser.objects.all()
clusterUsers = clusterUsers.values('pk', 'name')
if pk:
query = clusterUsers[0]['name']
elif term:
query = term
else:
query = ""
if limit:
clusterUsers = clusterUsers[:limit]
# lable each item based on its real_type
labeledUsers = []
for i in clusterUsers:
f = 'other'
userType = str(ClusterUser.objects.get(id=i['pk'])
.cast()._get_real_type())
if userType == "profile":
f = 'user'
elif userType == "organization":
f = 'group'
labeledUsers.append((i['name'], f, i['pk']))
clusterUsers = labeledUsers
# sort list and crop out all but the top [limit] results
clusterUsers = sorted(clusterUsers, key=lambda x: x[0])
clusterUsers = clusterUsers if len(clusterUsers) \
< limit else clusterUsers[:limit]
return {
'query': query,
'results': clusterUsers
}
def search_users_only(term=None, pk=None, limit=10):
"""
Returns a list of the top N matches from Users with a name
starting with term
@param term: the term to search for
@param limit: the number of results to return
"""
if pk:
users = User.objects.filter(id=int(pk))
elif term:
users = User.objects.filter(username__istartswith=term)
else:
users = User.objects.all()
users = users.values('pk', 'username')
if pk:
query = users[0]['username']
elif term:
query = term
else:
query = ""
if limit:
users = users[:limit]
# lable each item as a user
f = 'user'
users = [(i['username'], f, i['pk']) for i in users]
# sort list and crop out all but the top [limit] results
users = sorted(users, key=lambda x: x[0])
users = users if len(users) \
< limit else users[:limit]
return {
'query': query,
'results': users
}
def search_users_and_groups(term=None, pk=None, limit=10):
"""
Returns a list of the top N matches from Groups and Users with a name
starting with term.
Warning: Searching for users and groups using a primary key will return
a match from both users AND groups
@param term: the term to search for
@param pk: the primary key of the user/group to search for
@param limit: the number of results to return
"""
if pk:
users = User.objects.filter(id=int(pk))
groups = Group.objects.filter(id=int(pk))
elif term:
users = User.objects.filter(username__istartswith=term)
groups = Group.objects.filter(name__istartswith=term)
else:
users = User.objects.all()
groups = Group.objects.all()
users = users.values('pk', 'username')
groups = groups.values('pk', 'name')
if pk:
query = ""
elif term:
query = term
else:
query = ""
if limit:
users = users[:limit]
groups = groups[:limit]
# label each item as either user or a group
f = 'user'
users = [(i['username'], f, i['pk']) for i in users]
f = 'group'
groups = [(i['name'], f, i['pk']) for i in groups]
# merge lists together
# then sort lists and crop out all but the top [limit] results
merged = users + groups
merged = sorted(merged, key=lambda x: x[0])
merged = merged if len(merged) < limit else merged[:limit]
return {
'query': query,
'results': merged
}
| dannyman/ganeti_webmgr | ganeti_webmgr/ganeti_web/views/user_search.py | Python | gpl-2.0 | 4,845 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2010-2012 Cristian Salamea Gnuthink Software Labs Cia. Ltda
{
'name': 'Ecuador - Accounting',
'version': '1.1',
'category': 'Localization',
'description': """
This is the base module to manage the accounting chart for Ecuador in Odoo.
==============================================================================
Accounting chart and localization for Ecuador.
""",
'author': 'Gnuthink Co.Ltd.',
'depends': [
'account',
'base_vat',
'base_iban',
],
'data': [
'data/l10n_ec_chart_data.xml',
'data/account_tax_data.xml',
'data/account_chart_template_data.yml',
],
}
| chienlieu2017/it_management | odoo/addons/l10n_ec/__manifest__.py | Python | gpl-3.0 | 769 |
from . import BaseWordChoice
class WordPreference(BaseWordChoice):
def pick_w(self,m,voc,mem,context=[]):
if m in voc.get_known_meanings():
if m in list(mem['prefered words'].keys()):
w = mem['prefered words'][m]
if w not in voc.get_known_words(m=m):
w = voc.get_random_known_w(m=m)
else:
w = voc.get_random_known_w(m=m)
elif voc.get_unknown_words():
w = voc.get_new_unknown_w()
else:
w = voc.get_random_known_w(option='min')
return w
class PlaySmart(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_smart'}],*args,**kwargs)
class PlayLast(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_last'}],*args,**kwargs)
class PlayFirst(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_first'}],*args,**kwargs)
| flowersteam/naminggamesal | naminggamesal/ngstrat/word_choice/word_preference.py | Python | agpl-3.0 | 997 |
import os
import CTK
UPLOAD_DIR = "/tmp"
def ok (filename, target_dir, target_file, params):
txt = "<h1>It worked!</h1>"
txt += "<pre>%s</pre>" %(os.popen("ls -l '%s'" %(os.path.join(target_dir, target_file))).read())
txt += "<p>Params: %s</p>" %(str(params))
txt += "<p>Filename: %s</p>" %(filename)
return txt
class default:
def __init__ (self):
self.page = CTK.Page ()
self.page += CTK.RawHTML ("<h1>Direct Upload with params</h1>")
self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, {'var':'foo'})
self.page += CTK.RawHTML ("<h1>Temporal Upload without params</h1>")
self.page += CTK.Uploader({'handler': ok, 'target_dir': UPLOAD_DIR}, direct=False)
def __call__ (self):
return self.page.Render()
CTK.publish ('', default)
CTK.run (port=8000)
| cherokee/pyscgi | tests/test5.py | Python | bsd-3-clause | 850 |
# -*- coding: utf-8 -*-
from cmd_resolution import analysis, words_to_func
def test_analysis():
words = analysis(' play 1213 ')
assert words == ['play', '1213']
def test_words_to_func():
words = ['play', '1213', 'hello']
func = words_to_func(words)
assert func == 'play(1213, hello)'
| JanlizWorldlet/FeelUOwn | history_research/Cli/tests/test_analysis.py | Python | mit | 310 |
class AnonError(Exception):
pass
__all__ = ('AnonError',)
| tek/amino | amino/anon/error.py | Python | mit | 64 |
# -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2010 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango
def dump_fields(script):
buffer = ""
num_fields = script.get_num_fields()
buffer += "ScriptName <" + str(script.Name.value) +">\n"
for i in xrange(num_fields):
field = script._get_field(i)
field_name = field._get_name()
field_type = field._get_type()
buffer += "Field: <"+ str(field_name) + "> of type: <" + str(field_type) + "> = <" + str(field.value) + ">\n"
return buffer
class ScriptFieldPrinter(avango.script.Script):
Script = avango.script.SFObject()
def __init__(self):
self.super(ScriptFieldPrinter).__init__()
self.always_evaluate(True)
self.Name.value = "ScriptFieldPrinter"
def evaluate(self):
if self.Script.value:
buffer = dump_fields(self.Script.value)
print buffer
| vrsys/avangong | avango-utils/python/_field_dump.py | Python | lgpl-3.0 | 2,357 |
"""Low-level AMQP client for Python (fork of amqplib)"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
VERSION = (1, 0, 13)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Barry Pederson'
__maintainer__ = 'Ask Solem'
__contact__ = 'pyamqp@celeryproject.org'
__homepage__ = 'http://github.com/celery/py-amqp'
__docformat__ = 'restructuredtext'
# -eof meta-
#
# Pull in the public items from the various sub-modules
#
from .basic_message import Message
from .channel import Channel
from .connection import Connection
from .exceptions import (
AMQPError,
ConnectionError,
ChannelError,
ConsumerCancel,
)
__all__ = [
'Connection',
'Channel',
'Message',
'AMQPError',
'ConnectionError',
'ChannelError',
'ConsumerCancel',
]
| neumerance/deploy | .venv/lib/python2.7/site-packages/amqp/__init__.py | Python | apache-2.0 | 1,576 |
##### Description of this python file #####
# This is the file to calculate the adjusted elevation and determine whether or not slope/flow directions need to be recalculated
##### VARIABLES - Used in this file#####
# DTM - Digital terrain model of the river catchment
# cell_size - the cell size of the model
# nbr - area of search in ArcGIS
# lowest_cell - raster containing the value of the minimum surrounding elevation
# height_difference - difference between the elevation of each cell and the surrounding cell
# slope - the slope of the river catchment in degrees
# slope_threshold - a value which must be exceeded for mass wasting to take place.
#
#
#---------------------------------------------------------------------#
##### START OF CODE #####
# Import statements
import numpy as np
import arcpy
from arcpy.sa import *
from itertools import izip
import gc
### Import Script Files NJ created ###
import active_inactive_layer_check
import elevation_adjustment
class masswasting_sediment(object):
### A function to calculate the lowest cell in a neighbour hood. This is using tanX = Opposite (Height difference) / Adjacent (Cell width) ###
def calculate_slope_degrees(self, DTM, bottom_left_corner, cell_size, flow_direction_np, save_date):
DTM[flow_direction_np == -9999] = -9999
DTM_ras = arcpy.NumPyArrayToRaster(DTM, bottom_left_corner, cell_size, cell_size, -9999)
# Set local variables
nbr = NbrRectangle(3, 3, "CELL")
# Execute BlockStatistics to find the minimum surrounding cell
lowest_cell = FocalStatistics(DTM_ras, nbr, "MINIMUM", "DATA")
# Calculate the difference between the DTM and the lowest surrounding cell
height_difference = DTM_ras - lowest_cell
# Calculate the slope between cells
# First calculate opposite over adjacent
height_difference /= cell_size
# Then use inverse tan to calculate the slope
slope = ATan(height_difference) * (180/np.pi)
slope.save("slope" + save_date)
# Convert slope to numpy to check if any cells are greater than 45 degrees
slope_np = arcpy.RasterToNumPyArray(slope, '#', '#', '#', -9999)
# Clean up after DTM
arcpy.Delete_management(DTM_ras)
arcpy.Delete_management(lowest_cell)
del slope, height_difference, DTM_ras
return slope_np
def get_cellsgreater_45degrees(self, slope, active_layer, inactive_layer):
def is_empty(any_structure):
if any_structure:
return False
else:
return True
# The follow code is adapted from the peice of code that moves sediment through the system
slope_mask = np.zeros_like(slope, dtype = float)
slope_threshold = 45
# Get indices with great enough slope to intiate mass wasting
# - this only gets the cells with great enough slope for sediment movemnet to occur by mass wasting
np.putmask(slope_mask, slope >= 45, slope)
# Get the indices where the sediment transport is greater than 0
sort_idx = np.flatnonzero(slope_mask)
# Now return those indices as a list
new_idx = zip(*np.unravel_index(sort_idx[::-1], slope.shape))
# Check that if the slope is greater than 45 degrees that there is availiable sediment to move
final_idx = []
for x in new_idx:
if active_layer[x] != 0 and inactive_layer[x] != 0:
final_idx.append(x)
empty = is_empty(final_idx)
# Now check that there are slopes greater than 45 degrees and that there is availiable sediment to move
if np.any(slope_mask >= 45) and empty == False:
carryout_masswasting = True
arcpy.AddMessage("There are cells with a steep slope therefore mass wasting will be calculated.")
else:
carryout_masswasting = False
arcpy.AddMessage("Mass wasting will not be calculated.")
arcpy.AddMessage("-------------------------")
return carryout_masswasting, final_idx
def sediment_movement_amount(self, active_layer_proportion, new_idx, cell_size):
sediment_entrainment_out = np.zeros_like(active_layer_proportion, dtype = float)
removal_amount = 0.05 * (cell_size * cell_size)
for i, j in new_idx:
sediment_entrainment_out[i, j] = active_layer_proportion[i, j] * removal_amount
return sediment_entrainment_out
def move_sediment(self, sediment_entrainment_out, new_idx, flow_direction_np):
# Get the rows and columns of the slope file
nrows, ncols = flow_direction_np.shape
# Pads the array with zeros to prevent negative indexing
tmp = np.zeros((nrows+2, ncols+2), dtype = float)
tmp[1:-1, 1:-1] = sediment_entrainment_out
sediment_entrainment_out = tmp
# Create empty array for the moved sediment
sediment_entrainment_in = np.zeros_like(sediment_entrainment_out, dtype = float)
lookup = {32: (-1, -1),
16: (0, -1),
8:(1, -1),
4:(1, 0),
64: (-1, 0),
128:(-1, 1),
1: (0, 1),
2: (1, 1)}
for i, j in new_idx:
# Need to take into account the offset in the "padded_transport"
r, c = i + 1, j + 1
# This also allows for flow_direction values not listed above...
dr, dc = lookup.get(flow_direction_np[i, j], (0,0)) # Gets the flow direction for that cell
#if grain_transport_mask[r, c] <= grain_size_active_layer_mask[r, c]: # this adds in a check to make sure that there is sufficient sediment in the active layer to transport it.
sediment_entrainment_in[r + dr, c + dc] += sediment_entrainment_out[r, c] # move the sediment in the downstream direction by one timestep.
sediment_entrainment_in_fin = np.zeros_like(flow_direction_np, dtype=float)
sediment_entrainment_in_fin = sediment_entrainment_in[1:-1, 1:-1]
sediment_entrainment_in_fin[flow_direction_np == -9999] = -9999
return sediment_entrainment_in_fin
def masswasting_loop(self, DTM, DTM_MINUS_AL_IAL, active_layer, inactive_layer, bottom_left_corner, cell_size, flow_direction_np,
active_layer_GS_P_temp, active_layer_V_temp,
inactive_layer_GS_P_temp, inactive_layer_V_temp, recalculate_slope_flow):
# Couple of counters which are useful for logic checking
mass_loop_counter = 0
save_date = "0"
# First calculate the slope of the cells
arcpy.AddMessage("Checking if any cells have a slope greater than 45 degrees and sediment available to be transported")
np.set_printoptions(precision=4)
DTM[flow_direction_np == -9999] = -9999
slope = self.calculate_slope_degrees(DTM, bottom_left_corner, cell_size, flow_direction_np, save_date)
# Check if any of then are greater than 45 degrees
conduct_masswasting, new_idx = self.get_cellsgreater_45degrees(slope, active_layer, inactive_layer)
grain_size_counter = 1
while conduct_masswasting == True:
mass_loop_counter += 1
arcpy.AddMessage("Starting loop " + str(mass_loop_counter))
total_volume = np.zeros_like(slope, dtype = float)
for active_layer_proportion_temp, active_layer_volume_temp in izip(active_layer_GS_P_temp, active_layer_V_temp):
# Locad the arrays from the disk
active_layer_proportion = np.load(active_layer_proportion_temp)
active_layer_volume = np.load(active_layer_volume_temp)
# Calculate the amount of sediment that can be moved out of each cell
sediment_entrainment_out = self.sediment_movement_amount(active_layer_proportion, new_idx, cell_size)
# Calculate sediment transport in for that grainsize
sediment_entrainment_in = self.move_sediment(sediment_entrainment_out, new_idx, flow_direction_np)
# Calculate the change in sediment volume
new_grain_volume = active_layer_volume - sediment_entrainment_out + sediment_entrainment_in
np.save(active_layer_volume_temp, new_grain_volume)
# Update the total volume
total_volume += new_grain_volume
# Increment the grainsize by 1 for the next round of calculations
grain_size_counter = grain_size_counter + 1
# Collect garbage
del sediment_entrainment_out, sediment_entrainment_in, new_grain_volume
collected = gc.collect()
arcpy.AddMessage("Garbage collector: collected %d objects." % (collected))
# Count the grainsizes as the model works through them
grain_size_counter = 1
for active_layer_proportion_temp, active_layer_volume_temp in izip(active_layer_GS_P_temp, active_layer_V_temp):
# Locad the arrays from the disk
active_layer_volume = np.load(active_layer_volume_temp)
active_layer_proportion = active_layer_volume / total_volume
arcpy.AddMessage("Calculated new proportions after mass wasting for grainsize " + str(grain_size_counter))
# Check for nodata and nan values and save to disk
active_layer_proportion[total_volume == 0] = 0
active_layer_proportion[flow_direction_np == -9999] = -9999
np.save(active_layer_proportion_temp, active_layer_proportion)
# Update the counter
grain_size_counter += 1
if grain_size_counter == 8:
grain_size_counter = 1
del active_layer_volume, active_layer_proportion
# Collect garbage
collected = gc.collect()
arcpy.AddMessage("Garbage collector: collected %d objects." % (collected))
# Need to update active layer and inactive layer depths if required.
active_layer, inactive_layer = active_inactive_layer_check.active_layer_depth(total_volume, inactive_layer, active_layer_GS_P_temp, active_layer_V_temp,
inactive_layer_GS_P_temp, inactive_layer_V_temp, cell_size)
# Convert DTM back to a raster
#DTM = arcpy.RasterToNumPyArray(DTM, '#', '#', '#', -9999)
# Need to recalculate the DTM
### Check if elevations need to be recalculated ###
DTM, DTM_MINUS_AL_IAL, recalculate_slope_flow = elevation_adjustment.update_DTM_elevations(DTM, DTM_MINUS_AL_IAL, active_layer, inactive_layer, cell_size)
inactive_layer *= (cell_size*cell_size)
active_layer *= (cell_size*cell_size)
DTM[flow_direction_np == -9999] = -9999
slope = self.calculate_slope_degrees(DTM, bottom_left_corner, cell_size, flow_direction_np, save_date)
save_date = str(mass_loop_counter)
# Check if any of then are greater than 45 degrees
conduct_masswasting, new_idx = self.get_cellsgreater_45degrees(slope, active_layer, inactive_layer)
return DTM, DTM_MINUS_AL_IAL, recalculate_slope_flow, active_layer, inactive_layer
def calculate_slope_fraction_raster_in(self, DTM, bottom_left_corner, cell_size):
# Set local variables
nbr = NbrRectangle(3, 3, "CELL")
# Execute BlockStatistics to find the minimum surrounding cell
lowest_cell = FocalStatistics(DTM, nbr, "MINIMUM", "DATA")
# Calculate the difference between the DTM and the lowest surrounding cell
height_difference = DTM - lowest_cell
# Calculate the slope between cells
# First calculate opposite over adjacent
height_difference /= cell_size
# Then use inverse tan to calculate the slope
slope = ATan(height_difference) * (180/np.pi)
# Convert slope to numpy to check if any cells are greater than 45 degrees
slope = arcpy.RasterToNumPyArray(slope, '#', '#', '#', -9999)
# Convert slope to fraction of slope rather than degrees or radians
np.radians(slope)
np.tan(slope)
# Catch statement for areas with 0 slope
slope[slope == 0] = 0.0001
arcpy.AddMessage("Slope calculated")
arcpy.AddMessage("-------------------------")
return slope
def calculate_slope_fraction(self, DTM, bottom_left_corner, cell_size, save_date):
# Convert the DTM to a raster
DTM_ras = arcpy.NumPyArrayToRaster(DTM, bottom_left_corner, cell_size, cell_size, -9999)
#DTM_ras.save("ele_" + save_date)
# Set local variables
nbr = NbrRectangle(3, 3, "CELL")
# Execute BlockStatistics to find the minimum surrounding cell
lowest_cell = FocalStatistics(DTM_ras, nbr, "MINIMUM", "DATA")
# Calculate the difference between the DTM and the lowest surrounding cell
height_difference = DTM_ras - lowest_cell
# Calculate the slope between cells
# First calculate opposite over adjacent
height_difference /= cell_size
# Then use inverse tan to calculate the slope
slope = ATan(height_difference) * (180/np.pi)
### SAVE A COPY OF THE SLOPE and DTM FOR TESTING PURPOSE ONLY ###
#slope.save("slope" + save_date)
# Convert slope to numpy to check if any cells are greater than 45 degrees
slope_np = arcpy.RasterToNumPyArray(slope, '#', '#', '#', -9999)
# Convert slope to fraction of slope rather than degrees or radians
np.radians(slope_np)
np.tan(slope_np)
# Clean up after DTM
arcpy.Delete_management(DTM_ras)
arcpy.Delete_management(lowest_cell)
del slope, DTM_ras, height_difference, lowest_cell
gc.collect()
# Catch statement for areas with 0 or negative slope
slope_np[slope_np == 0] = 0.000001
slope_np[slope_np < 0] = 0.000001
arcpy.AddMessage("Slope calculated")
arcpy.AddMessage("-------------------------")
return slope_np | nickicejones/ENGAGE | ENGAGE2.0/ENGAGE2.0/Main model code/masswasting.py | Python | gpl-2.0 | 15,037 |
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from operator import itemgetter, attrgetter
from tests import test_settings
from tests.base_test import ArchesTestCase
from django.core import management
from arches.app.models import models
from arches.app.utils.betterJSONSerializer import JSONSerializer
# these tests can be run from the command line via
# python manage.py test tests/importer/ontology_import_tests.py --pattern="*.py" --settings="tests.test_settings"
class OntologyModelTests(ArchesTestCase):
@classmethod
def setUpClass(cls):
management.call_command("load_ontology", source=test_settings.ONTOLOGY_FIXTURES)
@classmethod
def tearDownClass(cls):
ontology = models.Ontology.objects.get(pk="11111111-0000-0000-0000-000000000000")
ontology.delete()
def test_load_ontology(self):
ontology_class = models.OntologyClass.objects.get(
ontology__pk="11111111-0000-0000-0000-000000000000", source="http://www.cidoc-crm.org/cidoc-crm/E53_Place"
)
predicted_property_list = {
"http://www.cidoc-crm.org/cidoc-crm/P1_is_identified_by",
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type",
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note",
"http://www.cidoc-crm.org/cidoc-crm/P48_has_preferred_identifier",
"http://www.cidoc-crm.org/cidoc-crm/P137_exemplifies",
"http://www.cidoc-crm.org/cidoc-crm/P15i_influenced",
"http://www.cidoc-crm.org/cidoc-crm/P17i_motivated",
"http://www.cidoc-crm.org/cidoc-crm/P136i_supported_type_creation",
"http://www.cidoc-crm.org/cidoc-crm/P62i_is_depicted_by",
"http://www.cidoc-crm.org/cidoc-crm/P67i_is_referred_to_by",
"http://www.cidoc-crm.org/cidoc-crm/P70i_is_documented_in",
"http://www.cidoc-crm.org/cidoc-crm/P71i_is_listed_in",
"http://www.cidoc-crm.org/cidoc-crm/P129i_is_subject_of",
"http://www.cidoc-crm.org/cidoc-crm/P138i_has_representation",
"http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by",
"http://www.cidoc-crm.org/cidoc-crm/P39i_was_measured_by",
"http://www.cidoc-crm.org/cidoc-crm/P41i_was_classified_by",
"http://www.cidoc-crm.org/cidoc-crm/P141i_was_assigned_by",
"http://www.cidoc-crm.org/cidoc-crm/P87_is_identified_by",
"http://www.cidoc-crm.org/cidoc-crm/P89_falls_within",
"http://www.cidoc-crm.org/cidoc-crm/P121_overlaps_with",
"http://www.cidoc-crm.org/cidoc-crm/P122_borders_with",
"http://www.cidoc-crm.org/cidoc-crm/P157_is_at_rest_relative_to",
"http://www.cidoc-crm.org/cidoc-crm/P168_place_is_defined_by",
"http://www.cidoc-crm.org/cidoc-crm/P7i_witnessed",
"http://www.cidoc-crm.org/cidoc-crm/P26i_was_destination_of",
"http://www.cidoc-crm.org/cidoc-crm/P27i_was_origin_of",
"http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of",
"http://www.cidoc-crm.org/cidoc-crm/P54i_is_current_permanent_location_of",
"http://www.cidoc-crm.org/cidoc-crm/P55i_currently_holds",
"http://www.cidoc-crm.org/cidoc-crm/P59i_is_located_on_or_within",
"http://www.cidoc-crm.org/cidoc-crm/P74i_is_current_or_former_residence_of",
"http://www.cidoc-crm.org/cidoc-crm/P89i_contains",
"http://www.cidoc-crm.org/cidoc-crm/P161i_is_spatial_projection_of",
"http://www.cidoc-crm.org/cidoc-crm/P156i_is_occupied_by",
"http://www.cidoc-crm.org/cidoc-crm/P167i_was_place_at",
}
self.assertEqual(len(ontology_class.target["down"]), len(predicted_property_list))
result_property_list = set()
for item in ontology_class.target["down"]:
result_property_list.add(item["ontology_property"])
self.assertEqual(result_property_list, predicted_property_list)
predicted_subclass_list = {
"http://www.cidoc-crm.org/cidoc-crm/E41_Appellation",
"http://www.cidoc-crm.org/cidoc-crm/E42_Identifier",
"http://www.cidoc-crm.org/cidoc-crm/E44_Place_Appellation",
"http://www.cidoc-crm.org/cidoc-crm/E45_Address",
"http://www.cidoc-crm.org/cidoc-crm/E46_Section_Definition" "http://www.cidoc-crm.org/cidoc-crm/E47_Spatial_Coordinates",
"http://www.cidoc-crm.org/cidoc-crm/E48_Place_Name",
"http://www.cidoc-crm.org/cidoc-crm/E49_Time_Appellation",
"http://www.cidoc-crm.org/cidoc-crm/E50_Date",
"http://www.cidoc-crm.org/cidoc-crm/E75_Conceptual_Object_Appellation",
"http://www.cidoc-crm.org/cidoc-crm/E82_Actor_Appellation",
"http://www.cidoc-crm.org/cidoc-crm/E51_Contact_Point",
"http://www.cidoc-crm.org/cidoc-crm/E35_Title",
}
for item in ontology_class.target["down"]:
if item["ontology_classes"] == "http://www.cidoc-crm.org/cidoc-crm/P1_is_identified_by":
self.assertEqual(set(item["ontology_classes"]), predicted_subclass_list)
# {u'ontology_property': u'P89_falls_within', u'ontology_classes': [u'E53_Place']}
# {u'ontology_property': u'P89i_contains', u'ontology_classes': [u'E53_Place']}
# {u'ontology_property': u'P121_overlaps_with', u'ontology_classes': [u'E53_Place']}
# {u'ontology_property': u'P122_borders_with', u'ontology_classes': [u'E53_Place']}
# {u'ontology_property': u'P161i_is_spatial_projection_of', u'ontology_classes': [u'E15_Identifier_Assignment', u'E22_Man-Made_Object', u'E86_Leaving', u'E67_Birth', u'E69_Death', u'E8_Acquisition', u'E19_Physical_Object', u'E7_Activity', u'E4_Period', u'E80_Part_Removal', u'E65_Creation', u'E27_Site', u'E87_Curation_Activity', u'E83_Type_Creation', u'E21_Person', u'E78_Collection', u'E81_Transformation', u'E68_Dissolution', u'E5_Event', u'E20_Biological_Object', u'E6_Destruction', u'E63_Beginning_of_Existence', u'E9_Move', u'E12_Production', u'E92_Spacetime_Volume', u'E79_Part_Addition', u'E18_Physical_Thing', u'E16_Measurement', u'E26_Physical_Feature', u'E94_Space', u'E10_Transfer_of_Custody', u'E84_Information_Carrier', u'E17_Type_Assignment', u'E85_Joining', u'E13_Attribute_Assignment', u'E93_Presence', u'E66_Formation', u'E64_End_of_Existence', u'E11_Modification', u'E24_Physical_Man-Made_Thing', u'E14_Condition_Assessment', u'E25_Man-Made_Feature']}
# {u'ontology_property': u'P87_is_identified_by', u'ontology_classes': [u'E44_Place_Appellation', u'E48_Place_Name', u'E47_Spatial_Coordinates', u'E45_Address', u'E46_Section_Definition']}
# {u'ontology_property': u'P53i_is_former_or_current_location_of', u'ontology_classes': [u'E22_Man-Made_Object', u'E21_Person', u'E18_Physical_Thing', u'E25_Man-Made_Feature', u'E78_Collection', u'E19_Physical_Object', u'E26_Physical_Feature', u'E84_Information_Carrier', u'E20_Biological_Object', u'E24_Physical_Man-Made_Thing', u'E27_Site']}
# {u'ontology_property': u'P59i_is_located_on_or_within', u'ontology_classes': [u'E22_Man-Made_Object', u'E21_Person', u'E18_Physical_Thing', u'E25_Man-Made_Feature', u'E78_Collection', u'E19_Physical_Object', u'E26_Physical_Feature', u'E84_Information_Carrier', u'E20_Biological_Object', u'E24_Physical_Man-Made_Thing', u'E27_Site']}
# {u'ontology_property': u'P156i_is_occupied_by', u'ontology_classes': [u'E22_Man-Made_Object', u'E21_Person', u'E18_Physical_Thing', u'E25_Man-Made_Feature', u'E78_Collection', u'E19_Physical_Object', u'E26_Physical_Feature', u'E84_Information_Carrier', u'E20_Biological_Object', u'E24_Physical_Man-Made_Thing', u'E27_Site']}
# {u'ontology_property': u'P157_is_at_rest_relative_to', u'ontology_classes': [u'E22_Man-Made_Object', u'E21_Person', u'E18_Physical_Thing', u'E25_Man-Made_Feature', u'E78_Collection', u'E19_Physical_Object', u'E26_Physical_Feature', u'E84_Information_Carrier', u'E20_Biological_Object', u'E24_Physical_Man-Made_Thing', u'E27_Site']}
# {u'ontology_property': u'P74i_is_current_or_former_residence_of', u'ontology_classes': [u'E40_Legal_Body', u'E74_Group', u'E39_Actor', u'E21_Person']}
# {u'ontology_property': u'P54i_is_current_permanent_location_of', u'ontology_classes': [u'E84_Information_Carrier', u'E21_Person', u'E22_Man-Made_Object', u'E20_Biological_Object', u'E19_Physical_Object']}
# {u'ontology_property': u'P55i_currently_holds', u'ontology_classes': [u'E84_Information_Carrier', u'E21_Person', u'E22_Man-Made_Object', u'E20_Biological_Object', u'E19_Physical_Object']}
# {u'ontology_property': u'P7i_witnessed', u'ontology_classes': [u'E15_Identifier_Assignment', u'E87_Curation_Activity', u'E67_Birth', u'E69_Death', u'E8_Acquisition', u'E7_Activity', u'E4_Period', u'E80_Part_Removal', u'E65_Creation', u'E86_Leaving', u'E16_Measurement', u'E81_Transformation', u'E68_Dissolution', u'E5_Event', u'E6_Destruction', u'E63_Beginning_of_Existence', u'E9_Move', u'E12_Production', u'E79_Part_Addition', u'E83_Type_Creation', u'E10_Transfer_of_Custody', u'E17_Type_Assignment', u'E85_Joining', u'E13_Attribute_Assignment', u'E66_Formation', u'E64_End_of_Existence', u'E11_Modification', u'E14_Condition_Assessment']}
# {u'ontology_property': u'P168_place_is_defined_by', u'ontology_classes': [u'E94_Space']}
# {u'ontology_property': u'P26i_was_destination_of', u'ontology_classes': [u'E9_Move']}
# {u'ontology_property': u'P27i_was_origin_of', u'ontology_classes': [u'E9_Move']}
# {u'ontology_property': u'P39i_was_measured_by', u'ontology_classes': [u'E16_Measurement']}
# {u'ontology_property': u'P140i_was_attributed_by', u'ontology_classes': [u'E15_Identifier_Assignment', u'E16_Measurement', u'E17_Type_Assignment', u'E13_Attribute_Assignment', u'E14_Condition_Assessment']}
# {u'ontology_property': u'P141i_was_assigned_by', u'ontology_classes': [u'E15_Identifier_Assignment', u'E16_Measurement', u'E17_Type_Assignment', u'E13_Attribute_Assignment', u'E14_Condition_Assessment']}
# {u'ontology_property': u'P48_has_preferred_identifier', u'ontology_classes': [u'E42_Identifier']}
# {u'ontology_property': u'P2_has_type', u'ontology_classes': [u'E57_Material', u'E58_Measurement_Unit', u'E56_Language', u'E55_Type']}
# {u'ontology_property': u'P137_exemplifies', u'ontology_classes': [u'E57_Material', u'E58_Measurement_Unit', u'E56_Language', u'E55_Type']}
# {u'ontology_property': u'P1_is_identified_by', u'ontology_classes': [u'E51_Contact_Point', u'E75_Conceptual_Object_Appellation', u'E42_Identifier', u'E45_Address', u'E41_Appellation', u'E44_Place_Appellation', u'E35_Title', u'E50_Date', u'E82_Actor_Appellation', u'E48_Place_Name', u'E49_Time_Appellation', u'E47_Spatial_Coordinates', u'E46_Section_Definition']}
# {u'ontology_property': u'P67i_is_referred_to_by', u'ontology_classes': [u'E37_Mark', u'E30_Right', u'E89_Propositional_Object', u'E38_Image', u'E29_Design_or_Procedure', u'E36_Visual_Item', u'E32_Authority_Document', u'E31_Document', u'E34_Inscription', u'E73_Information_Object', u'E33_Linguistic_Object', u'E35_Title']}
# {u'ontology_property': u'P129i_is_subject_of', u'ontology_classes': [u'E37_Mark', u'E30_Right', u'E89_Propositional_Object', u'E38_Image', u'E29_Design_or_Procedure', u'E36_Visual_Item', u'E32_Authority_Document', u'E31_Document', u'E34_Inscription', u'E73_Information_Object', u'E33_Linguistic_Object', u'E35_Title']}
# {u'ontology_property': u'P136i_supported_type_creation', u'ontology_classes': [u'E83_Type_Creation']}
# {u'ontology_property': u'P71i_is_listed_in', u'ontology_classes': [u'E32_Authority_Document']}
# {u'ontology_property': u'P15i_influenced', u'ontology_classes': [u'E15_Identifier_Assignment', u'E12_Production', u'E16_Measurement', u'E13_Attribute_Assignment', u'E87_Curation_Activity', u'E79_Part_Addition', u'E66_Formation', u'E11_Modification', u'E8_Acquisition', u'E14_Condition_Assessment', u'E80_Part_Removal', u'E65_Creation', u'E10_Transfer_of_Custody', u'E7_Activity', u'E17_Type_Assignment', u'E83_Type_Creation', u'E9_Move', u'E85_Joining', u'E86_Leaving']}
# {u'ontology_property': u'P17i_motivated', u'ontology_classes': [u'E15_Identifier_Assignment', u'E12_Production', u'E16_Measurement', u'E13_Attribute_Assignment', u'E87_Curation_Activity', u'E79_Part_Addition', u'E66_Formation', u'E11_Modification', u'E8_Acquisition', u'E14_Condition_Assessment', u'E80_Part_Removal', u'E65_Creation', u'E10_Transfer_of_Custody', u'E7_Activity', u'E17_Type_Assignment', u'E83_Type_Creation', u'E9_Move', u'E85_Joining', u'E86_Leaving']}
# {u'ontology_property': u'P70i_is_documented_in', u'ontology_classes': [u'E32_Authority_Document', u'E31_Document']}
# {u'ontology_property': u'P3_has_note', u'ontology_classes': [u'E62_String']}
# {u'ontology_property': u'P138i_has_representation', u'ontology_classes': [u'E34_Inscription', u'E37_Mark', u'E36_Visual_Item', u'E38_Image']}
# {u'ontology_property': u'P62i_is_depicted_by', u'ontology_classes': [u'E84_Information_Carrier', u'E24_Physical_Man-Made_Thing', u'E22_Man-Made_Object', u'E25_Man-Made_Feature', u'E78_Collection']}
# {u'ontology_property': u'P41i_was_classified_by', u'ontology_classes': [u'E17_Type_Assignment']}
| archesproject/arches | tests/importer/ontology_import_tests.py | Python | agpl-3.0 | 14,162 |
#!/usr/bin/env python
import unittest
from mapreduce import parameters
from mapreduce.api import map_job
from mapreduce.api.map_job import sample_input_reader
class MapJobConfigTest(unittest.TestCase):
"""Test for MapJobConfig.
MapJobConfig is declarative. Thus most functional tests are already
done by its parent class.
"""
def testSmoke(self):
conf = map_job.JobConfig(
job_name="foo",
mapper=map_job.Mapper,
input_reader_cls=sample_input_reader.SampleInputReader,
input_reader_params={"foo": 1})
self.assertEqual("foo", conf.job_name)
self.assertTrue(conf.job_id)
self.assertEqual(map_job.Mapper, conf.mapper)
self.assertEqual(sample_input_reader.SampleInputReader,
conf.input_reader_cls)
self.assertEqual({"foo": 1}, conf.input_reader_params)
self.assertEqual(parameters.config.SHARD_COUNT, conf.shard_count)
def testUserProvidesJobID(self):
conf = map_job.JobConfig(
job_name="foo",
job_id="id",
mapper=map_job.Mapper,
input_reader_cls=sample_input_reader.SampleInputReader,
input_reader_params={"foo": 1})
self.assertEqual("id", conf.job_id)
if __name__ == "__main__":
unittest.main()
| potatolondon/potato-mapreduce | test/mapreduce/api/map_job/map_job_config_test.py | Python | apache-2.0 | 1,243 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-25 22:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.datetime_safe
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PhotoLogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='')),
('upload_time', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logo', models.ImageField(default='logo_none.jpg', upload_to='')),
('gender', models.CharField(blank=True, max_length=6)),
('birthday', models.DateField(default=django.utils.datetime_safe.date.today)),
('country', models.CharField(blank=True, max_length=100)),
('city', models.CharField(blank=True, max_length=50)),
('mobile_number', models.CharField(blank=True, max_length=10)),
('about', models.CharField(blank=True, max_length=999999)),
('education', models.CharField(blank=True, max_length=100)),
('user', models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='photologo',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile'),
),
]
| YuriyLisovskiy/messenger | account/migrations/0001_initial.py | Python | mit | 2,010 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import setup
setup.process_args()
from email.message import Message
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.appengine.api.users import User
from utils import TestCase
from codereview import models, views
class TestIncomingMail(TestCase):
def setUp(self):
super(TestIncomingMail, self).setUp()
self.login('foo@example.com')
self.issue = models.Issue(subject='test')
self.issue.put()
self.issue2 = models.Issue(subject='test2')
self.issue2.put()
self.logout()
def test_incoming_mail(self):
msg = Message()
msg['To'] = 'reply@example.com'
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg.set_payload('body')
response = self.client.post('/_ah/mail/reply@example.com',
msg.as_string(), content_type='text/plain')
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Message.all().ancestor(self.issue).count(), 1)
self.assertEqual(models.Message.all().ancestor(self.issue2).count(), 0)
msg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(msg.text, 'body')
self.assertEqual(msg.subject,
'subject (issue%s)' % self.issue.key().id())
self.assertEqual(msg.sender, 'sender@example.com')
self.assertEqual(msg.recipients, ['reply@example.com'])
self.assert_(msg.date is not None)
self.assertEqual(msg.draft, False)
def test_incoming_mail_invalid_subject(self):
msg = Message()
msg['To'] = 'reply@example.com'
msg['From'] = 'sender@example.com'
msg['Subject'] = 'invalid'
msg.set_payload('body')
response = self.client.post('/_ah/mail/reply@example.com',
msg, content_type='text/plain')
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Message.all().ancestor(self.issue).count(), 0)
def test_unknown_issue(self):
msg = Message()
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue99999)'
msg.set_payload('body')
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_empty_message(self):
msg = Message()
msg['From'] = 'sender@example.com'
msg['Subject'] = 'subject (issue%s)\r\n\r\n' % self.issue.key().id()
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_senders_becomes_reviewer(self):
msg = Message()
msg['From'] ='sender@example.com'
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg.set_payload('body')
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
issue = models.Issue.get_by_id(self.issue.key().id()) # re-fetch issue
self.assertEqual(issue.reviewers, ['sender@example.com'])
issue.reviewers = []
issue.put()
# try again with sender that has an account
# we do this to handle CamelCase emails correctly
models.Account.get_account_for_user(User('sender@example.com'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
issue = models.Issue.get_by_id(self.issue.key().id())
self.assertEqual(issue.reviewers, ['sender@example.com'])
def test_long_subjects(self):
# multi-line subjects should be collapsed into a single line
msg = Message()
msg['Subject'] = ('foo '*30)+' (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg.set_payload('body')
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(len(imsg.subject.splitlines()), 1)
def test_multipart(self):
# Text first
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('body', 'plain'))
msg.attach(MIMEText('ignore', 'html'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text, 'body')
imsg.delete()
# HTML first
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('ignore', 'html'))
msg.attach(MIMEText('body', 'plain'))
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text, 'body')
imsg.delete()
# no text at all
msg = MIMEMultipart('alternative')
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg.attach(MIMEText('ignore', 'html'))
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@example.com')
def test_mails_from_appengine(self): # bounces
msg = Message()
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg['X-Google-Appengine-App-Id'] = 'foo'
self.assertRaises(views.InvalidIncomingEmailError,
views._process_incoming_mail, msg.as_string(),
'reply@exampe.com')
def test_huge_body_is_truncated(self): # see issue325
msg = Message()
msg['subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
msg.set_payload('1' * 600 * 1024)
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(len(imsg.text), 500 * 1024)
self.assert_(imsg.text.endswith('... (message truncated)'))
def test_charset(self):
# make sure that incoming mails with non-ascii chars are handled correctly
# see related http://code.google.com/p/googleappengine/issues/detail?id=2326
jtxt = '\x1b$B%O%m!<%o!<%k%I!*\x1b(B'
jcode = 'iso-2022-jp'
msg = Message()
msg.set_payload(jtxt, jcode)
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text.encode(jcode), jtxt)
def test_encoding(self):
# make sure that incoming mails with 8bit encoding are handled correctly.
# see realted http://code.google.com/p/googleappengine/issues/detail?id=2383
jtxt = '\x1b$B%O%m!<%o!<%k%I!*\x1b(B'
jcode = 'iso-2022-jp'
msg = Message()
msg.set_payload(jtxt, jcode)
msg['Subject'] = 'subject (issue%s)' % self.issue.key().id()
msg['From'] = 'sender@example.com'
del msg['Content-Transfer-Encoding'] # replace 7bit encoding
msg['Content-Transfer-Encoding'] = '8bit'
views._process_incoming_mail(msg.as_string(), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text.encode(jcode), jtxt)
def test_missing_encoding(self):
# make sure that incoming mails with missing encoding and
# charset are handled correctly.
body = 'Âfoo'
msg = ('From: sender@example.com',
'Subject: subject (issue%s)' % self.issue.key().id(),
'',
body)
views._process_incoming_mail('\r\n'.join(msg), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text, u'Âfoo')
imsg.delete()
body = '\xf6'
msg = ('From: sender@example.com',
'Subject: subject (issue%s)' % self.issue.key().id(),
'',
body)
views._process_incoming_mail('\r\n'.join(msg), 'reply@example.com')
imsg = models.Message.all().ancestor(self.issue).get()
self.assertEqual(imsg.text, u'\ufffd')
if __name__ == '__main__':
unittest.main()
| riannucci/rietveldv2 | tests/test_incomingmail.py | Python | apache-2.0 | 8,777 |
from typing import Optional
from fastapi import FastAPI, Security
from fastapi.security import OAuth2AuthorizationCodeBearer
from fastapi.testclient import TestClient
app = FastAPI()
oauth2_scheme = OAuth2AuthorizationCodeBearer(
authorizationUrl="authorize",
tokenUrl="token",
description="OAuth2 Code Bearer",
auto_error=True,
)
@app.get("/items/")
async def read_items(token: Optional[str] = Security(oauth2_scheme)):
return {"token": token}
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"security": [{"OAuth2AuthorizationCodeBearer": []}],
}
}
},
"components": {
"securitySchemes": {
"OAuth2AuthorizationCodeBearer": {
"type": "oauth2",
"flows": {
"authorizationCode": {
"authorizationUrl": "authorize",
"tokenUrl": "token",
"scopes": {},
}
},
"description": "OAuth2 Code Bearer",
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_token():
response = client.get("/items")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
def test_incorrect_token():
response = client.get("/items", headers={"Authorization": "Non-existent testtoken"})
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
def test_token():
response = client.get("/items", headers={"Authorization": "Bearer testtoken"})
assert response.status_code == 200, response.text
assert response.json() == {"token": "testtoken"}
| tiangolo/fastapi | tests/test_security_oauth2_authorization_code_bearer_description.py | Python | mit | 2,343 |
# All of the other examples directly embed the Javascript and CSS code for
# Bokeh's client-side runtime into the HTML. This leads to the HTML files
# being rather large. An alternative is to ask Bokeh to produce HTML that
# has a relative link to the Bokeh Javascript and CSS. This is easy to
# do; you just pass in a few extra arguments to the output_file() command.
import numpy as np
from bokeh.plotting import *
N = 100
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_file("relative_paths.html", title="Relative path example", mode="relative")
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save")
p.circle(x,y, alpha=0.5, color="tomato", radius=0.1)
show(p)
# By default, the URLs for the Javascript and CSS will be relative to
# the current directory, i.e. the directory in which the HTML file is
# generated. You can provide a different "root" directory from which
# the relative paths will be computed:
#
# output_file("relative_paths.html", title="Relative path example",
# resources="relative", rootdir="some/other/path")
| zrhans/python | exemplos/Examples.lnk/bokeh/plotting/file/relative_paths.py | Python | gpl-2.0 | 1,061 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_app.forms import LanguageForm
@pytest.mark.parametrize(
"specialchars",
[" abcde ", " ab cd", " abcde", "abcde ", " a b c d e ", " a b c d e "],
)
@pytest.mark.django_db
def test_clean_specialchars_whitespace(specialchars):
"""Tests whitespace is accepted in special characters."""
form_data = {
"code": "foo",
"fullname": "Foo",
"checkstyle": "foo",
"nplurals": "2",
"specialchars": specialchars,
}
form = LanguageForm(form_data)
assert form.is_valid()
assert " " in form.cleaned_data["specialchars"]
@pytest.mark.parametrize(
"specialchars, count_char",
[(" abcde ", " "), (" aaaaaaaaaa", "a"), ("āéĩøøøøøøü", u"ø")],
)
@pytest.mark.django_db
def test_clean_specialchars_unique(specialchars, count_char):
"""Tests special characters are unique."""
form_data = {
"code": "foo",
"fullname": "Foo",
"checkstyle": "foo",
"nplurals": "2",
"specialchars": specialchars,
}
form = LanguageForm(form_data)
assert form.is_valid()
assert form.cleaned_data["specialchars"].count(count_char) == 1
| evernote/zing | tests/forms/language.py | Python | gpl-3.0 | 1,448 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from openstack_dashboard.dashboards.project.networks.subnets \
import views as project_views
from .workflows import CreateSubnet
from .workflows import UpdateSubnet
LOG = logging.getLogger(__name__)
class CreateView(project_views.CreateView):
workflow_class = CreateSubnet
class UpdateView(project_views.UpdateView):
workflow_class = UpdateSubnet
| fajoy/horizon-example | openstack_dashboard/dashboards/admin/networks/subnets/views.py | Python | apache-2.0 | 1,035 |
from __future__ import unicode_literals
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.attachments.models import FileAttachment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
draft_file_attachment_item_mimetype,
draft_file_attachment_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_draft_file_attachment_item_url,
get_draft_file_attachment_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(BaseWebAPITestCase):
"""Testing the DraftFileAttachmentResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/draft/file-attachments/'
resource = resources.draft_file_attachment
def compare_item(self, item_rsp, attachment):
self.assertEqual(item_rsp['id'], attachment.pk)
self.assertEqual(item_rsp['filename'], attachment.filename)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
if populate_items:
items = [self.create_file_attachment(review_request, draft=True)]
else:
items = []
return (get_draft_file_attachment_list_url(review_request,
local_site_name),
draft_file_attachment_list_mimetype,
items)
def test_get_with_non_owner_superuser(self):
"""Testing the GET review-requests/<id>/draft/file-attachments/ API
with non-owner as superuser
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
user = self._login_user(admin=True)
self.assertNotEqual(user, review_request.submitter)
rsp = self.api_get(
get_draft_file_attachment_list_url(review_request),
expected_mimetype=draft_file_attachment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
attachments = rsp['draft_file_attachments']
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0]['id'], attachment.pk)
@add_fixtures(['test_site'])
def test_get_with_non_owner_local_site_admin(self):
"""Testing the GET review-requests/<id>/draft/file-attachments/ API
with non-owner as LocalSite admin
"""
review_request = self.create_review_request(submitter=self.user,
with_local_site=True,
publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
user = self._login_user(local_site=True, admin=True)
self.assertNotEqual(user, review_request.submitter)
self.assertFalse(user.is_superuser)
rsp = self.api_get(
get_draft_file_attachment_list_url(review_request,
self.local_site_name),
expected_mimetype=draft_file_attachment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
attachments = rsp['draft_file_attachments']
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0]['id'], attachment.pk)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
if post_valid_data:
post_data = {
'path': open(self.get_sample_image_filename(), 'rb'),
'caption': 'New caption',
}
else:
post_data = {}
return (get_draft_file_attachment_list_url(review_request,
local_site_name),
draft_file_attachment_item_mimetype,
post_data,
[review_request])
def check_post_result(self, user, rsp, review_request):
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertIn('draft_file_attachment', rsp)
item_rsp = rsp['draft_file_attachment']
attachment = FileAttachment.objects.get(pk=item_rsp['id'])
self.assertIn(attachment, draft.file_attachments.all())
self.assertNotIn(attachment, review_request.file_attachments.all())
self.compare_item(item_rsp, attachment)
def test_post_with_permission_denied_error(self):
"""Testing the POST review-requests/<id>/draft/file-attachments/ API
with Permission Denied error
"""
review_request = self.create_review_request()
self.assertNotEqual(review_request.submitter, self.user)
with open(self.get_sample_image_filename(), 'rb') as f:
rsp = self.api_post(
get_draft_file_attachment_list_url(review_request),
{
'caption': 'Trophy',
'path': f,
},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(BaseWebAPITestCase):
"""Testing the DraftFileAttachmentResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/draft/file-attachments/<id>/'
resource = resources.draft_file_attachment
def compare_item(self, item_rsp, attachment):
self.assertEqual(item_rsp['id'], attachment.pk)
self.assertEqual(item_rsp['filename'], attachment.filename)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request,
draft=True)
return (get_draft_file_attachment_item_url(review_request,
file_attachment.pk,
local_site_name),
[review_request, file_attachment])
def check_delete_result(self, user, review_request, file_attachment):
draft = review_request.get_draft()
self.assertIsNotNone(draft)
self.assertNotIn(file_attachment,
draft.inactive_file_attachments.all())
self.assertNotIn(file_attachment, draft.file_attachments.all())
self.assertNotIn(file_attachment,
review_request.file_attachments.all())
self.assertNotIn(file_attachment,
review_request.inactive_file_attachments.all())
with self.assertRaises(FileAttachment.DoesNotExist):
FileAttachment.objects.get(pk=file_attachment.pk)
def test_delete_file_with_non_owner_superuser(self):
"""Testing the DELETE review-requests/<id>/draft/file-attachments/<id>/
API with non-owner as superuser
"""
review_request = self.create_review_request(submitter=self.user)
file_attachment = self.create_file_attachment(review_request,
draft=True)
user = self._login_user(admin=True)
self.api_delete(get_draft_file_attachment_item_url(review_request,
file_attachment.pk))
self.check_delete_result(user, review_request, file_attachment)
@add_fixtures(['test_site'])
def test_delete_file_with_non_owner_local_site_admin(self):
"""Testing the DELETE review-requests/<id>/draft/file-attachments/<id>/
API with non-owner as LocalSite admin
"""
review_request = self.create_review_request(submitter=self.user,
with_local_site=True,
publish=True)
file_attachment = self.create_file_attachment(review_request,
draft=True)
user = self._login_user(local_site=True, admin=True)
self.assertNotEqual(user, self.user)
self.api_delete(get_draft_file_attachment_item_url(
review_request, file_attachment.pk, self.local_site_name))
self.check_delete_result(user, review_request, file_attachment)
def test_delete_file_with_publish(self):
"""Testing the DELETE review-requests/<id>/draft/file-attachments/<id>/
API with published file attachment
"""
review_request = self.create_review_request(submitter=self.user,
target_people=[self.user])
file_attachment = self.create_file_attachment(review_request,
draft=True)
review_request.get_draft().publish()
self.api_delete(get_draft_file_attachment_item_url(review_request,
file_attachment.pk))
draft = review_request.get_draft()
file_attachment = FileAttachment.objects.get(pk=file_attachment.pk)
self.assertFalse(file_attachment.inactive_review_request.exists())
self.assertIsNotNone(draft)
self.assertIn(file_attachment,
draft.inactive_file_attachments.all())
self.assertNotIn(file_attachment, draft.file_attachments.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request,
draft=True)
return (get_draft_file_attachment_item_url(review_request,
file_attachment.pk,
local_site_name),
draft_file_attachment_item_mimetype,
file_attachment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user)
file_attachment = self.create_file_attachment(review_request)
return (get_draft_file_attachment_item_url(review_request,
file_attachment.pk,
local_site_name),
draft_file_attachment_item_mimetype,
{'caption': 'My new caption'},
file_attachment,
[])
def check_put_result(self, user, item_rsp, file_attachment):
file_attachment = FileAttachment.objects.get(pk=file_attachment.pk)
self.assertEqual(item_rsp['id'], file_attachment.pk)
self.assertEqual(item_rsp['caption'], 'My new caption')
self.assertEqual(file_attachment.draft_caption, 'My new caption')
def test_put_with_non_owner_superuser(self):
"""Testing the PUT review-requests/<id>/draft/file-attachments/<id>/
API with non-owner as superuser
"""
review_request = self.create_review_request(submitter=self.user)
file_attachment = self.create_file_attachment(review_request,
draft=True)
user = self._login_user(admin=True)
self.assertNotEqual(user, self.user)
rsp = self.api_put(
get_draft_file_attachment_item_url(review_request,
file_attachment.pk),
{
'caption': 'My new caption',
},
expected_mimetype=draft_file_attachment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.check_put_result(user, rsp['draft_file_attachment'],
file_attachment)
@add_fixtures(['test_site'])
def test_put_file_with_non_owner_local_site_admin(self):
"""Testing the PUT review-requests/<id>/draft/file-attachments/<id>/
API with non-owner as LocalSite admin
"""
review_request = self.create_review_request(submitter=self.user,
with_local_site=True,
publish=True)
file_attachment = self.create_file_attachment(review_request,
draft=True)
user = self._login_user(local_site=True, admin=True)
self.assertNotEqual(user, self.user)
rsp = self.api_put(
get_draft_file_attachment_item_url(review_request,
file_attachment.pk,
self.local_site_name),
{
'caption': 'My new caption',
},
expected_mimetype=draft_file_attachment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.check_put_result(user, rsp['draft_file_attachment'],
file_attachment)
| chipx86/reviewboard | reviewboard/webapi/tests/test_file_attachment_draft.py | Python | mit | 14,062 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.