code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import random import unittest from pyspark.resource import ExecutorResourceRequests, ResourceProfile, ResourceProfileBuilder,\ TaskResourceRequests class ResourceProfileTests(unittest.TestCase): def test_profile_before_sc(self): rpb = ResourceProfileBuilder() ereqs = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g") ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com") treqs = TaskResourceRequests().cpus(2).resource("gpu", 2) def assert_request_contents(exec_reqs, task_reqs): self.assertEqual(len(exec_reqs), 5) self.assertEqual(exec_reqs["cores"].amount, 2) self.assertEqual(exec_reqs["memory"].amount, 6144) self.assertEqual(exec_reqs["memoryOverhead"].amount, 1024) self.assertEqual(exec_reqs["pyspark.memory"].amount, 2048) self.assertEqual(exec_reqs["gpu"].amount, 2) self.assertEqual(exec_reqs["gpu"].discoveryScript, "testGpus") self.assertEqual(exec_reqs["gpu"].resourceName, "gpu") self.assertEqual(exec_reqs["gpu"].vendor, "nvidia.com") self.assertEqual(len(task_reqs), 2) self.assertEqual(task_reqs["cpus"].amount, 2.0) self.assertEqual(task_reqs["gpu"].amount, 2.0) assert_request_contents(ereqs.requests, treqs.requests) rp = rpb.require(ereqs).require(treqs).build assert_request_contents(rp.executorResources, rp.taskResources) from pyspark import SparkContext, SparkConf sc = SparkContext(conf=SparkConf()) rdd = sc.parallelize(range(10)).withResources(rp) return_rp = rdd.getResourceProfile() assert_request_contents(return_rp.executorResources, return_rp.taskResources) # intermix objects created before SparkContext init and after rpb2 = ResourceProfileBuilder() # use reqs created before SparkContext with Builder after rpb2.require(ereqs) rpb2.require(treqs) rp2 = rpb2.build self.assertTrue(rp2.id > 0) rdd2 = sc.parallelize(range(10)).withResources(rp2) return_rp2 = rdd2.getResourceProfile() assert_request_contents(return_rp2.executorResources, return_rp2.taskResources) ereqs2 = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g") ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com") treqs2 = TaskResourceRequests().cpus(2).resource("gpu", 2) # use reqs created after SparkContext with Builder before rpb.require(ereqs2) rpb.require(treqs2) rp3 = rpb.build assert_request_contents(rp3.executorResources, rp3.taskResources) sc.stop() if __name__ == "__main__": from pyspark.resource.tests.test_resources import * try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
spark-test/spark
python/pyspark/resource/tests/test_resources.py
Python
apache-2.0
3,846
from __future__ import division import math import os import sys import matplotlib from matplotlib import verbose from matplotlib.cbook import is_string_like, onetrue from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \ FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors from matplotlib._pylab_helpers import Gcf from matplotlib.figure import Figure from matplotlib.mathtext import MathTextParser from matplotlib.widgets import SubplotTool try: from PyQt4 import QtCore, QtGui, Qt except ImportError: raise ImportError("Qt4 backend requires that PyQt4 is installed.") backend_version = "0.9.1" def fn_name(): return sys._getframe(1).f_code.co_name DEBUG = False cursord = { cursors.MOVE : QtCore.Qt.SizeAllCursor, cursors.HAND : QtCore.Qt.PointingHandCursor, cursors.POINTER : QtCore.Qt.ArrowCursor, cursors.SELECT_REGION : QtCore.Qt.CrossCursor, } def draw_if_interactive(): """ Is called after every pylab drawing command """ if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw() def _create_qApp(): """ Only one qApp can exist at a time, so check before creating one. """ if QtGui.QApplication.startingUp(): if DEBUG: print "Starting up QApplication" global qApp qApp = QtGui.QApplication( [" "] ) QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ), qApp, QtCore.SLOT( "quit()" ) ) #remember that matplotlib created the qApp - will be used by show() _create_qApp.qAppCreatedHere = True _create_qApp.qAppCreatedHere = False def show(): """ Show all the figures and enter the qt main loop This should be the last line of your script """ for manager in Gcf.get_all_fig_managers(): manager.window.show() if DEBUG: print 'Inside show' figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw() if _create_qApp.qAppCreatedHere: QtGui.qApp.exec_() def new_figure_manager( num, *args, **kwargs ): """ Create a new figure manager instance """ thisFig = Figure( *args, **kwargs ) canvas = FigureCanvasQT( thisFig ) manager = FigureManagerQT( canvas, num ) return manager class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ): keyvald = { QtCore.Qt.Key_Control : 'control', QtCore.Qt.Key_Shift : 'shift', QtCore.Qt.Key_Alt : 'alt', } # left 1, middle 2, right 3 buttond = {1:1, 2:3, 4:2} def __init__( self, figure ): if DEBUG: print 'FigureCanvasQt: ', figure _create_qApp() QtGui.QWidget.__init__( self ) FigureCanvasBase.__init__( self, figure ) self.figure = figure self.setMouseTracking( True ) # hide until we can test and fix #self.startTimer(backend_IdleEvent.milliseconds) w,h = self.get_width_height() self.resize( w, h ) def __timerEvent(self, event): # hide until we can test and fix self.mpl_idle_event(event) def enterEvent(self, event): FigureCanvasBase.enter_notify_event(self, event) def leaveEvent(self, event): FigureCanvasBase.leave_notify_event(self, event) def mousePressEvent( self, event ): x = event.pos().x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.pos().y() button = self.buttond[event.button()] FigureCanvasBase.button_press_event( self, x, y, button ) if DEBUG: print 'button pressed:', event.button() def mouseMoveEvent( self, event ): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() FigureCanvasBase.motion_notify_event( self, x, y ) #if DEBUG: print 'mouse move' def mouseReleaseEvent( self, event ): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() button = self.buttond[event.button()] FigureCanvasBase.button_release_event( self, x, y, button ) if DEBUG: print 'button released' def keyPressEvent( self, event ): key = self._get_key( event ) FigureCanvasBase.key_press_event( self, key ) if DEBUG: print 'key press', key def keyReleaseEvent( self, event ): key = self._get_key(event) FigureCanvasBase.key_release_event( self, key ) if DEBUG: print 'key release', key def resizeEvent( self, event ): if DEBUG: print 'resize ({0:d} x {1:d})'.format(event.size().width(), event.size().height()) QtGui.QWidget.resizeEvent( self, event ) w = event.size().width() h = event.size().height() if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")" dpival = self.figure.dpi winch = w/dpival hinch = h/dpival self.figure.set_size_inches( winch, hinch ) self.draw() def resize( self, w, h ): # Pass through to Qt to resize the widget. QtGui.QWidget.resize( self, w, h ) # Resize the figure by converting pixels to inches. pixelPerInch = self.figure.dpi wInch = w / pixelPerInch hInch = h / pixelPerInch self.figure.set_size_inches( wInch, hInch ) # Redraw everything. self.draw() def sizeHint( self ): w, h = self.get_width_height() return QtCore.QSize( w, h ) def minumumSizeHint( self ): return QtCore.QSize( 10, 10 ) def _get_key( self, event ): if event.key() < 256: key = str(event.text()) elif event.key() in self.keyvald: key = self.keyvald[ event.key() ] else: key = None return key def flush_events(self): Qt.qApp.processEvents() def start_event_loop(self,timeout): FigureCanvasBase.start_event_loop_default(self,timeout) start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__ def stop_event_loop(self): FigureCanvasBase.stop_event_loop_default(self) stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__ class FigureManagerQT( FigureManagerBase ): """ Public attributes canvas : The FigureCanvas instance num : The Figure number toolbar : The qt.QToolBar window : The qt.QMainWindow """ def __init__( self, canvas, num ): if DEBUG: print 'FigureManagerQT.{0!s}'.format(fn_name()) FigureManagerBase.__init__( self, canvas, num ) self.canvas = canvas self.window = QtGui.QMainWindow() self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.window.setWindowTitle("Figure {0:d}".format(num)) image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' ) self.window.setWindowIcon(QtGui.QIcon( image )) # Give the keyboard focus to the figure instead of the manager self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus ) self.canvas.setFocus() QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ), self._widgetclosed ) self.window._destroying = False self.toolbar = self._get_toolbar(self.canvas, self.window) self.window.addToolBar(self.toolbar) QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"), self.window.statusBar().showMessage) self.window.setCentralWidget(self.canvas) if matplotlib.is_interactive(): self.window.show() # attach a show method to the figure for pylab ease of use self.canvas.figure.show = lambda *args: self.window.show() def notify_axes_change( fig ): # This will be called whenever the current axes is changed if self.toolbar is not None: self.toolbar.update() self.canvas.figure.add_axobserver( notify_axes_change ) def _widgetclosed( self ): if self.window._destroying: return self.window._destroying = True Gcf.destroy(self.num) def _get_toolbar(self, canvas, parent): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar'] == 'classic': print "Classic toolbar is not supported" elif matplotlib.rcParams['toolbar'] == 'toolbar2': toolbar = NavigationToolbar2QT(canvas, parent, False) else: toolbar = None return toolbar def resize(self, width, height): 'set the canvas size in pixels' self.window.resize(width, height) def destroy( self, *args ): if self.window._destroying: return self.window._destroying = True QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ), self._widgetclosed ) if self.toolbar: self.toolbar.destroy() if DEBUG: print "destroy figure manager" self.window.close() def set_window_title(self, title): self.window.setWindowTitle(title) class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ): def __init__(self, canvas, parent, coordinates=True): """ coordinates: should we show the coordinates on the right? """ self.canvas = canvas self.coordinates = coordinates QtGui.QToolBar.__init__( self, parent ) NavigationToolbar2.__init__( self, canvas ) def _icon(self, name): return QtGui.QIcon(os.path.join(self.basedir, name)) def _init_toolbar(self): self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images') a = self.addAction(self._icon('home.svg'), 'Home', self.home) a.setToolTip('Reset original view') a = self.addAction(self._icon('back.svg'), 'Back', self.back) a.setToolTip('Back to previous view') a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward) a.setToolTip('Forward to next view') self.addSeparator() a = self.addAction(self._icon('move.svg'), 'Pan', self.pan) a.setToolTip('Pan axes with left mouse, zoom with right') a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom) a.setToolTip('Zoom to rectangle') self.addSeparator() a = self.addAction(self._icon('subplots.png'), 'Subplots', self.configure_subplots) a.setToolTip('Configure subplots') a = self.addAction(self._icon('filesave.svg'), 'Save', self.save_figure) a.setToolTip('Save the figure') self.buttons = {} # Add the x,y location widget at the right side of the toolbar # The stretch factor is 1 which means any resizing of the toolbar # will resize this label instead of the buttons. if self.coordinates: self.locLabel = QtGui.QLabel( "", self ) self.locLabel.setAlignment( QtCore.Qt.AlignRight | QtCore.Qt.AlignTop ) self.locLabel.setSizePolicy( QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Ignored)) labelAction = self.addWidget(self.locLabel) labelAction.setVisible(True) # reference holder for subplots_adjust window self.adj_window = None def dynamic_update( self ): self.canvas.draw() def set_message( self, s ): self.emit(QtCore.SIGNAL("message"), s) if self.coordinates: self.locLabel.setText(s.replace(', ', '\n')) def set_cursor( self, cursor ): if DEBUG: print 'Set cursor' , cursor QtGui.QApplication.restoreOverrideCursor() QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) ) def draw_rubberband( self, event, x0, y0, x1, y1 ): height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 w = abs(x1 - x0) h = abs(y1 - y0) rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ] self.canvas.drawRectangle( rect ) def configure_subplots(self): self.adj_window = QtGui.QMainWindow() win = self.adj_window win.setAttribute(QtCore.Qt.WA_DeleteOnClose) win.setWindowTitle("Subplot Configuration Tool") image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' ) win.setWindowIcon(QtGui.QIcon( image )) tool = SubplotToolQt(self.canvas.figure, win) win.setCentralWidget(tool) win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) win.show() def _get_canvas(self, fig): return FigureCanvasQT(fig) def save_figure( self ): filetypes = self.canvas.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() default_filetype = self.canvas.get_default_filetype() start = "image." + default_filetype filters = [] selectedFilter = None for name, exts in sorted_filetypes: exts_list = " ".join(['*.{0!s}'.format(ext) for ext in exts]) filter = '{0!s} ({1!s})'.format(name, exts_list) if default_filetype in exts: selectedFilter = filter filters.append(filter) filters = ';;'.join(filters) fname = QtGui.QFileDialog.getSaveFileName( self, "Choose a filename to save to", start, filters, selectedFilter) if fname: try: self.canvas.print_figure( unicode(fname) ) except Exception, e: QtGui.QMessageBox.critical( self, "Error saving file", str(e), QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton) class SubplotToolQt( SubplotTool, QtGui.QWidget ): def __init__(self, targetfig, parent): QtGui.QWidget.__init__(self, None) self.targetfig = targetfig self.parent = parent self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal) self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical) self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal) self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical) self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal) self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical) # constraints QtCore.QObject.connect( self.sliderleft, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderright.setMinimum ) QtCore.QObject.connect( self.sliderright, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderleft.setMaximum ) QtCore.QObject.connect( self.sliderbottom, QtCore.SIGNAL( "valueChanged(int)" ), self.slidertop.setMinimum ) QtCore.QObject.connect( self.slidertop, QtCore.SIGNAL( "valueChanged(int)" ), self.sliderbottom.setMaximum ) sliders = (self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop, self.sliderwspace, self.sliderhspace, ) adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:') for slider, adjustment in zip(sliders, adjustments): slider.setMinimum(0) slider.setMaximum(1000) slider.setSingleStep(5) layout = QtGui.QGridLayout() leftlabel = QtGui.QLabel('left') layout.addWidget(leftlabel, 2, 0) layout.addWidget(self.sliderleft, 2, 1) toplabel = QtGui.QLabel('top') layout.addWidget(toplabel, 0, 2) layout.addWidget(self.slidertop, 1, 2) layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter) bottomlabel = QtGui.QLabel('bottom') layout.addWidget(QtGui.QLabel('bottom'), 4, 2) layout.addWidget(self.sliderbottom, 3, 2) layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter) rightlabel = QtGui.QLabel('right') layout.addWidget(rightlabel, 2, 4) layout.addWidget(self.sliderright, 2, 3) hspacelabel = QtGui.QLabel('hspace') layout.addWidget(hspacelabel, 0, 6) layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter) layout.addWidget(self.sliderhspace, 1, 6) layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter) wspacelabel = QtGui.QLabel('wspace') layout.addWidget(wspacelabel, 4, 6) layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter) layout.addWidget(self.sliderwspace, 3, 6) layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom) layout.setRowStretch(1,1) layout.setRowStretch(3,1) layout.setColumnStretch(1,1) layout.setColumnStretch(3,1) layout.setColumnStretch(6,1) self.setLayout(layout) self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000)) self.sliderbottom.setSliderPosition(\ int(targetfig.subplotpars.bottom*1000)) self.sliderright.setSliderPosition(\ int(targetfig.subplotpars.right*1000)) self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000)) self.sliderwspace.setSliderPosition(\ int(targetfig.subplotpars.wspace*1000)) self.sliderhspace.setSliderPosition(\ int(targetfig.subplotpars.hspace*1000)) QtCore.QObject.connect( self.sliderleft, QtCore.SIGNAL( "valueChanged(int)" ), self.funcleft ) QtCore.QObject.connect( self.sliderbottom, QtCore.SIGNAL( "valueChanged(int)" ), self.funcbottom ) QtCore.QObject.connect( self.sliderright, QtCore.SIGNAL( "valueChanged(int)" ), self.funcright ) QtCore.QObject.connect( self.slidertop, QtCore.SIGNAL( "valueChanged(int)" ), self.functop ) QtCore.QObject.connect( self.sliderwspace, QtCore.SIGNAL( "valueChanged(int)" ), self.funcwspace ) QtCore.QObject.connect( self.sliderhspace, QtCore.SIGNAL( "valueChanged(int)" ), self.funchspace ) def funcleft(self, val): if val == self.sliderright.value(): val -= 1 self.targetfig.subplots_adjust(left=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcright(self, val): if val == self.sliderleft.value(): val += 1 self.targetfig.subplots_adjust(right=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcbottom(self, val): if val == self.slidertop.value(): val -= 1 self.targetfig.subplots_adjust(bottom=val/1000.) if self.drawon: self.targetfig.canvas.draw() def functop(self, val): if val == self.sliderbottom.value(): val += 1 self.targetfig.subplots_adjust(top=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funcwspace(self, val): self.targetfig.subplots_adjust(wspace=val/1000.) if self.drawon: self.targetfig.canvas.draw() def funchspace(self, val): self.targetfig.subplots_adjust(hspace=val/1000.) if self.drawon: self.targetfig.canvas.draw() def error_msg_qt( msg, parent=None ): if not is_string_like( msg ): msg = ','.join( map( str,msg ) ) QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok ) def exception_handler( type, value, tb ): """Handle uncaught exceptions It does not catch SystemExit """ msg = '' # get the filename attribute if available (for IOError) if hasattr(value, 'filename') and value.filename is not None: msg = value.filename + ': ' if hasattr(value, 'strerror') and value.strerror is not None: msg += value.strerror else: msg += str(value) if len( msg ) : error_msg_qt( msg ) FigureManager = FigureManagerQT
runt18/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py
Python
agpl-3.0
20,731
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-12 15:43 import collections import jsonfield.fields from django.db import migrations import collections class Migration(migrations.Migration): dependencies = [ ('django_netjsonconfig', '0010_basemodel_reorganization'), ] operations = [ migrations.AlterField( model_name='template', name='config', field=jsonfield.fields.JSONField( blank=True, default=dict, dump_kwargs={'ensure_ascii': False, 'indent': 4}, help_text='configuration in NetJSON DeviceConfiguration format', load_kwargs={'object_pairs_hook': collections.OrderedDict}, verbose_name='configuration', ), ), ]
openwisp/django-netjsonconfig
django_netjsonconfig/migrations/0011_template_config_blank.py
Python
gpl-3.0
821
""" Django settings for email_auth project. Generated by 'django-admin startproject' using Django 1.11. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'cw5im3zsqi698c8-348d!01$09=ed2&&x6f6=_hiyy9^(3_#iy' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'account_app', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] # Set new model for user autentication AUTH_USER_MODEL = 'account_app.User' ROOT_URLCONF = 'email_auth.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'email_auth.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'email_user.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
jacruzsdev/django_1.11_auth_email
email_auth/email_auth/settings.py
Python
gpl-3.0
3,245
import datetime import random import struct from zlib import compress def unpack_legacy_message(self): """Unpack a legacy buffer message This routine unpacks a buffer message written by the LabVIEW DAQ software version 2.1.1 and below. Versions 2.2 and above use a version identifier in the message. This way, we can account for different message formats. """ self.blob = self.message # set the version of this legacy message to 21 (DAQ version 2.1.1) self.version = 21 tmp = struct.unpack("B", self.blob[0:1])[0] if tmp == 1: self.database = {"local": True, "central": False} elif tmp == 2: self.database = {"local": False, "central": True} elif tmp == 3: self.database = {"local": True, "central": True} else: # Should not happen self.database = {"local": False, "central": False} # Number of devices self.Ndev = struct.unpack("B", self.blob[1:2])[0] # Number of bytes per trace self.N = struct.unpack(">H", self.blob[2:4])[0] # Seconds self.second = struct.unpack("B", self.blob[4:5])[0] # Minutes self.minute = struct.unpack("B", self.blob[5:6])[0] # Hour self.hour = struct.unpack("B", self.blob[6:7])[0] # Day self.day = struct.unpack("B", self.blob[7:8])[0] # Month self.month = struct.unpack("B", self.blob[8:9])[0] # Year self.year = struct.unpack(">H", self.blob[9:11])[0] # date-time object self.datetime = datetime.datetime( self.year, self.month, self.day, self.hour, self.minute, self.second ) # Get the nanoseconds self.nanoseconds = struct.unpack(">I", self.blob[11:15])[0] # Trigger time of Slave relative to Master in ns self.SLVtime = struct.unpack(">i", self.blob[15:19])[0] # Trigger pattern # TODO: Unwrap trigger pattern self.trigger = struct.unpack(">H", self.blob[19:21])[0] # Baseline from master detector 1 self.mas_baseline1 = struct.unpack(">h", self.blob[21:23])[0] # Baseline from master detector 2 self.mas_baseline2 = struct.unpack(">h", self.blob[23:25])[0] # Number of peaks from master detector 1 self.mas_npeaks1 = struct.unpack(">h", self.blob[25:27])[0] # Number of peaks from master detector 2 self.mas_npeaks2 = struct.unpack(">h", self.blob[27:29])[0] # Pulse height from master detector 1 self.mas_pulseheight1 = struct.unpack(">h", self.blob[29:31])[0] # Pulse height from master detector 2 self.mas_pulseheight2 = struct.unpack(">h", self.blob[31:33])[0] # Integral from master detector 1 self.mas_int1 = struct.unpack(">i", self.blob[33:37])[0] # Integral from master detector 2 self.mas_int2 = struct.unpack(">i", self.blob[37:41])[0] # Trace from master detector 1 self.mas_tr1 = compress(self.unpack_trace(self.blob[41:41 + self.N / 2])) # Trace from master detector 2 self.mas_tr2 = compress(self.unpack_trace(self.blob[41 + self.N / 2:41 + self.N])) # If slave is attached: if self.Ndev == 2: o = 41 + self.N # Offset # Baseline from slave detector 1 self.slv_baseline1 = struct.unpack(">h", self.blob[o:o + 2])[0] # Baseline from slave detector 2 self.slv_baseline2 = struct.unpack(">h", self.blob[o + 2:o + 4])[0] # Number of peaks from slave detector 1 self.slv_npeaks1 = struct.unpack(">h", self.blob[o + 4:o + 6])[0] # Number of peaks from slave detector 2 self.slv_npeaks2 = struct.unpack(">h", self.blob[o + 6:o + 8])[0] # Pulse height from slave detector 1 self.slv_pulseheight1 = struct.unpack(">h", self.blob[o + 8:o + 10])[0] # Pulse height from slave detector 2 self.slv_pulseheight2 = struct.unpack(">h", self.blob[o + 10:o + 12])[0] # Integral from slave detector 1 self.slv_int1 = struct.unpack(">i", self.blob[o + 12:o + 16])[0] # Integral from slave detector 2 self.slv_int2 = struct.unpack(">i", self.blob[o + 16:o + 20])[0] # Trace from slave detector 1 self.slv_tr1 = compress(self.unpack_trace(self.blob[o + 20:o + 20 + self.N / 2])) # Trace from slave detector 2 self.slv_tr2 = compress(self.unpack_trace(self.blob[o + 20 + self.N / 2:o + 20 + self.N]))
HiSPARC/sapphire
scripts/kascade/read_sqldump/legacy.py
Python
gpl-3.0
4,556
# -*- coding: utf-8 -*- """ celery.canvas ~~~~~~~~~~~~~ Composing task workflows. Documentation for these functions are in :mod:`celery`. You should not import from this module directly. """ from __future__ import absolute_import from copy import deepcopy from functools import partial as _partial from operator import itemgetter from itertools import chain as _chain from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid from celery import current_app from celery.local import Proxy from celery.utils.compat import chain_from_iterable from celery.result import AsyncResult, GroupResult from celery.utils.functional import ( maybe_list, is_list, regen, chunks as _chunks, ) from celery.utils.text import truncate Chord = Proxy(lambda: current_app.tasks['celery.chord']) class _getitem_property(object): def __init__(self, key): self.key = key def __get__(self, obj, type=None): if obj is None: return type return obj.get(self.key) def __set__(self, obj, value): obj[self.key] = value class Signature(dict): """Class that wraps the arguments and execution options for a single task invocation. Used as the parts in a :class:`group` or to safely pass tasks around as callbacks. :param task: Either a task class/instance, or the name of a task. :keyword args: Positional arguments to apply. :keyword kwargs: Keyword arguments to apply. :keyword options: Additional options to :meth:`Task.apply_async`. Note that if the first argument is a :class:`dict`, the other arguments will be ignored and the values in the dict will be used instead. >>> s = subtask('tasks.add', args=(2, 2)) >>> subtask(s) {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} """ TYPES = {} _type = None @classmethod def register_type(cls, subclass, name=None): cls.TYPES[name or subclass.__name__] = subclass return subclass @classmethod def from_dict(self, d): typ = d.get('subtask_type') if typ: return self.TYPES[typ].from_dict(kwdict(d)) return Signature(d) def __init__(self, task=None, args=None, kwargs=None, options=None, type=None, subtask_type=None, immutable=False, **ex): init = dict.__init__ if isinstance(task, dict): return init(self, task) # works like dict(d) # Also supports using task class/instance instead of string name. try: task_name = task.name except AttributeError: task_name = task else: self._type = task init(self, task=task_name, args=tuple(args or ()), kwargs=kwargs or {}, options=dict(options or {}, **ex), subtask_type=subtask_type, immutable=immutable) def __call__(self, *partial_args, **partial_kwargs): return self.apply_async(partial_args, partial_kwargs) delay = __call__ def apply(self, args=(), kwargs={}, **options): """Apply this task locally.""" # For callbacks: extra args are prepended to the stored args. args, kwargs, options = self._merge(args, kwargs, options) return self.type.apply(args, kwargs, **options) def _merge(self, args=(), kwargs={}, options={}): if self.immutable: return self.args, self.kwargs, dict(self.options, **options) return (tuple(args) + tuple(self.args) if args else self.args, dict(self.kwargs, **kwargs) if kwargs else self.kwargs, dict(self.options, **options) if options else self.options) def clone(self, args=(), kwargs={}, **opts): # need to deepcopy options so origins links etc. is not modified. args, kwargs, opts = self._merge(args, kwargs, opts) s = Signature.from_dict({'task': self.task, 'args': tuple(args), 'kwargs': kwargs, 'options': deepcopy(opts), 'subtask_type': self.subtask_type, 'immutable': self.immutable}) s._type = self._type return s partial = clone def _freeze(self, _id=None): opts = self.options try: tid = opts['task_id'] except KeyError: tid = opts['task_id'] = _id or uuid() return self.AsyncResult(tid) def replace(self, args=None, kwargs=None, options=None): s = self.clone() if args is not None: s.args = args if kwargs is not None: s.kwargs = kwargs if options is not None: s.options = options return s def set(self, immutable=None, **options): if immutable is not None: self.immutable = immutable self.options.update(options) return self def apply_async(self, args=(), kwargs={}, **options): # For callbacks: extra args are prepended to the stored args. args, kwargs, options = self._merge(args, kwargs, options) return self._apply_async(args, kwargs, **options) def append_to_list_option(self, key, value): items = self.options.setdefault(key, []) if value not in items: items.append(value) return value def link(self, callback): return self.append_to_list_option('link', callback) def link_error(self, errback): return self.append_to_list_option('link_error', errback) def flatten_links(self): return list(chain_from_iterable(_chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) ))) def __or__(self, other): if not isinstance(self, chain) and isinstance(other, chain): return chain((self,) + other.tasks) elif isinstance(other, chain): return chain(*self.tasks + other.tasks) elif isinstance(other, Signature): if isinstance(self, chain): return chain(*self.tasks + (other, )) return chain(self, other) return NotImplemented def __invert__(self): return self.apply_async().get() def __reduce__(self): # for serialization, the task type is lazily loaded, # and not stored in the dict itself. return subtask, (dict(self), ) def reprcall(self, *args, **kwargs): args, kwargs, _ = self._merge(args, kwargs, {}) return reprcall(self['task'], args, kwargs) def __repr__(self): return self.reprcall() @cached_property def type(self): return self._type or current_app.tasks[self['task']] @cached_property def AsyncResult(self): try: return self.type.AsyncResult except KeyError: # task not registered return AsyncResult @cached_property def _apply_async(self): try: return self.type.apply_async except KeyError: return _partial(current_app.send_task, self['task']) task = _getitem_property('task') args = _getitem_property('args') kwargs = _getitem_property('kwargs') options = _getitem_property('options') subtask_type = _getitem_property('subtask_type') immutable = _getitem_property('immutable') class chain(Signature): def __init__(self, *tasks, **options): tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks Signature.__init__( self, 'celery.chain', (), {'tasks': tasks}, **options ) self.tasks = tasks self.subtask_type = 'chain' def __call__(self, *args, **kwargs): return self.apply_async(args, kwargs) @classmethod def from_dict(self, d): tasks = d['kwargs']['tasks'] if d['args'] and tasks: # partial args passed on to first task in chain (Issue #1057). tasks[0]['args'] = d['args'] + tasks[0]['args'] return chain(*d['kwargs']['tasks'], **kwdict(d['options'])) def __repr__(self): return ' | '.join(map(repr, self.tasks)) Signature.register_type(chain) class _basemap(Signature): _task_name = None _unpack_args = itemgetter('task', 'it') def __init__(self, task, it, **options): Signature.__init__( self, self._task_name, (), {'task': task, 'it': regen(it)}, immutable=True, **options ) def apply_async(self, args=(), kwargs={}, **opts): # need to evaluate generators task, it = self._unpack_args(self.kwargs) return self.type.apply_async( (), {'task': task, 'it': list(it)}, **opts ) @classmethod def from_dict(self, d): return chunks(*self._unpack_args(d['kwargs']), **d['options']) class xmap(_basemap): _task_name = 'celery.map' def __repr__(self): task, it = self._unpack_args(self.kwargs) return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100)) Signature.register_type(xmap) class xstarmap(_basemap): _task_name = 'celery.starmap' def __repr__(self): task, it = self._unpack_args(self.kwargs) return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100)) Signature.register_type(xstarmap) class chunks(Signature): _unpack_args = itemgetter('task', 'it', 'n') def __init__(self, task, it, n, **options): Signature.__init__( self, 'celery.chunks', (), {'task': task, 'it': regen(it), 'n': n}, immutable=True, **options ) @classmethod def from_dict(self, d): return chunks(*self._unpack_args(d['kwargs']), **d['options']) def apply_async(self, args=(), kwargs={}, **opts): return self.group().apply_async(args, kwargs, **opts) def __call__(self, **options): return self.group()(**options) def group(self): # need to evaluate generators task, it, n = self._unpack_args(self.kwargs) return group(xstarmap(task, part) for part in _chunks(iter(it), n)) @classmethod def apply_chunks(cls, task, it, n): return cls(task, it, n)() Signature.register_type(chunks) def _maybe_group(tasks): if isinstance(tasks, group): tasks = list(tasks.tasks) else: tasks = regen(tasks if is_list(tasks) else tasks) return tasks class group(Signature): def __init__(self, *tasks, **options): if len(tasks) == 1: tasks = _maybe_group(tasks[0]) Signature.__init__( self, 'celery.group', (), {'tasks': tasks}, **options ) self.tasks, self.subtask_type = tasks, 'group' @classmethod def from_dict(self, d): tasks = d['kwargs']['tasks'] if d['args'] and tasks: # partial args passed on to all tasks in the group (Issue #1057). for task in tasks: task['args'] = d['args'] + task['args'] return group(tasks, **kwdict(d['options'])) def __call__(self, *partial_args, **options): tasks, result, gid, args = self.type.prepare( options, map(Signature.clone, self.tasks), partial_args, ) return self.type(tasks, result, gid, args) def _freeze(self, _id=None): opts = self.options try: gid = opts['group'] except KeyError: gid = opts['group'] = uuid() new_tasks, results = [], [] for task in self.tasks: task = maybe_subtask(task).clone() results.append(task._freeze()) new_tasks.append(task) self.tasks = self.kwargs['tasks'] = new_tasks return GroupResult(gid, results) def skew(self, start=1.0, stop=None, step=1.0): _next_skew = fxrange(start, stop, step, repeatlast=True).next for task in self.tasks: task.set(countdown=_next_skew()) return self def __iter__(self): return iter(self.tasks) def __repr__(self): return repr(self.tasks) Signature.register_type(group) class chord(Signature): Chord = Chord def __init__(self, header, body=None, task='celery.chord', args=(), kwargs={}, **options): Signature.__init__( self, task, args, dict(kwargs, header=_maybe_group(header), body=maybe_subtask(body)), **options ) self.subtask_type = 'chord' @classmethod def from_dict(self, d): args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) return self(*args, **kwdict(d)) @staticmethod def _unpack_args(header=None, body=None, **kwargs): # Python signatures are better at extracting keys from dicts # than manually popping things off. return (header, body), kwargs def __call__(self, body=None, **kwargs): _chord = self.Chord body = (body or self.kwargs['body']).clone() kwargs = dict(self.kwargs, body=body, **kwargs) if _chord.app.conf.CELERY_ALWAYS_EAGER: return self.apply((), kwargs) callback_id = body.options.setdefault('task_id', uuid()) _chord(**kwargs) return _chord.AsyncResult(callback_id) def clone(self, *args, **kwargs): s = Signature.clone(self, *args, **kwargs) # need to make copy of body try: s.kwargs['body'] = s.kwargs['body'].clone() except (AttributeError, KeyError): pass return s def link(self, callback): self.body.link(callback) return callback def link_error(self, errback): self.body.link_error(errback) return errback def __repr__(self): if self.body: return self.body.reprcall(self.tasks) return '<chord without body: %r>' % (self.tasks, ) @property def tasks(self): return self.kwargs['header'] @property def body(self): return self.kwargs.get('body') Signature.register_type(chord) def subtask(varies, *args, **kwargs): if not (args or kwargs) and isinstance(varies, dict): if isinstance(varies, Signature): return varies.clone() return Signature.from_dict(varies) return Signature(varies, *args, **kwargs) def maybe_subtask(d): if d is not None and isinstance(d, dict) and not isinstance(d, Signature): return subtask(d) return d
mozilla/firefox-flicks
vendor-local/lib/python/celery/canvas.py
Python
bsd-3-clause
14,524
from __future__ import print_function, division, absolute_import import numpy as np from PLIdentify import PL_identify import cPickle as pk data = pk.load(open('./dump_robust.txt', 'r')) I_rec = data['I_rec'] # import ipdb;ipdb.set_trace() n = len(I_rec) # window size m = I_rec[0].shape[0] # no. of PLs D = np.zeros((m, n)) for j in xrange(n): for i in xrange(m): D[i, j] = I_rec[j][i, 0] lamb = 1.5 x = PL_identify(D, lamb) if x is None: print('No feasible selection') import sys sys.exit(0) print('x, ', x) select_D = D[np.nonzero(x)[0], :] org_entro = np.min(D, axis=0) select_entro = np.min(select_D, axis=0) import matplotlib.pyplot as plt plt.subplot(311) plt.plot(D.T) plt.subplot(312) plt.plot(select_D.T) plt.subplot(313) plt.plot(org_entro, 'r-+') plt.plot(select_entro, 'b-*') print('select [%d] PLs out of [%d] PLs'%(sum(x), len(x))) plt.legend(['orginal', 'selected']) plt.show() import ipdb;ipdb.set_trace()
hbhzwj/SADIT
Example/Configs_anomaly_TCNS/PLIdentify/RunPLIdentify.py
Python
gpl-3.0
949
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_autoupdate_push_update short_description: Configure push updates in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system_autoupdate feature and push_update category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true system_autoupdate_push_update: description: - Configure push updates. default: null type: dict suboptions: address: description: - Push update override server. type: str override: description: - Enable/disable push update override server. type: str choices: - enable - disable port: description: - Push update override port. (Do not overlap with other service ports) type: int status: description: - Enable/disable push updates. type: str choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure push updates. fortios_system_autoupdate_push_update: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" system_autoupdate_push_update: address: "<your_own_value>" override: "enable" port: "5" status: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_autoupdate_push_update_data(json): option_list = ['address', 'override', 'port', 'status'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_autoupdate_push_update(data, fos): vdom = data['vdom'] system_autoupdate_push_update_data = data['system_autoupdate_push_update'] filtered_data = underscore_to_hyphen(filter_system_autoupdate_push_update_data(system_autoupdate_push_update_data)) return fos.set('system.autoupdate', 'push-update', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_system_autoupdate(data, fos): if data['system_autoupdate_push_update']: resp = system_autoupdate_push_update(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "system_autoupdate_push_update": { "required": False, "type": "dict", "default": None, "options": { "address": {"required": False, "type": "str"}, "override": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "port": {"required": False, "type": "int"}, "status": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system_autoupdate(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system_autoupdate(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
tumbl3w33d/ansible
lib/ansible/modules/network/fortios/fortios_system_autoupdate_push_update.py
Python
gpl-3.0
9,556
# -*- coding: utf-8 -*- # Copyright (c) 2011 Rene Dohmen acidjunk@gmail.com # # filewalker.py # This program is free software; you can redistribute it and/or # modify it under the terms of version 3 of the GNU General Public License # as published by the Free Software Foundation. A copy of this license should # be included in the file GPL-3. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ####### IMPORTANT READ THIS #################################################### # This is a script to collect info for the photoalbum # ################################################################################ import os, sys, glob xml_header = """<?xml version="1.0" encoding="utf-8"?> <album name="Museum in echt 2010" nofPicture="79">""" xml_footer = """</album> </xml>""" xml = xml_header dirname = "nl_Album_4" os.chdir(dirname) matches = glob.glob('*.jpg') matches.sort() for fileName in matches: #os.system("mv %s %s" % (fileName,fileName[:-4])) print "Found: %s" % fileName title=raw_input("\nEnter title:") text=raw_input("\nEnter text:") xmlPart="""\t<photo name="%s"> <title>%s</title> <text>%s</text> </photo>""" % (fileName,title,text) xml = "%s\n%s" % (xml, xmlPart) print xml
childsplay-mobi/cp-pygame
lib/CPData/PhotoalbumData/filewalker.py
Python
gpl-3.0
1,697
# Copyright (C) Linaro Limited 2015,2016,2017,2019 # Author: Matt Hart <matthew.hart@linaro.org> # Author: Milo Casagrande <milo.casagrande@linaro.org> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this library; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Create the build email report.""" import pymongo import urllib import urlparse import models import utils.db import utils.report.common as rcommon # Register normal Unicode gettext. G_ = rcommon.L10N.ugettext # Register plural forms Unicode gettext. P_ = rcommon.L10N.ungettext # pylint: disable=star-args BUILD_SEARCH_FIELDS = [ models.ARCHITECTURE_KEY, models.DEFCONFIG_FULL_KEY, models.BUILD_ENVIRONMENT_KEY, models.ERRORS_KEY, models.FILE_SERVER_RESOURCE_KEY, models.ID_KEY, models.STATUS_KEY, models.WARNINGS_KEY, ] BUILD_SEARCH_SORT = [ (models.BUILD_ENVIRONMENT_KEY, pymongo.ASCENDING), (models.DEFCONFIG_KEY, pymongo.ASCENDING), (models.DEFCONFIG_FULL_KEY, pymongo.ASCENDING), (models.ARCHITECTURE_KEY, pymongo.ASCENDING) ] # Various build URLS. DEFCONFIG_ID_URL = (u"{build_url:s}/id/{build_id:s}/") LOG_URL = ( u"{storage_url:s}/{file_server_resource:s}/" + utils.BUILD_LOGS_DIR) ERR_LOG_URL = ( u"{storage_url:s}/{file_server_resource:s}/" + utils.BUILD_ERRORS_FILE) WARN_LOG_URL = ( u"{storage_url:s}/{file_server_resource:s}/" + utils.BUILD_WARNINGS_FILE) MISM_LOG_URL = ( u"{storage_url:s}/{file_server_resource:s}/" + utils.BUILD_MISMATCHES_FILE) # Other template strings. DEFCONFIG_URL_HTML = \ u"<a href=\"{defconfig_url:s}\">{defconfig:s} ({build_environment:s})</a>" STATUS_HTML = ( u"<a style=\"color: {red:s}\" href=\"{log_url:s}\">{status:s}</a>" ) ERR_STR_HTML = ( u"<a style=\"color: {red:s};\" href=\"{err_log_url:s}\">" u"{err_string:s}</a>" ) WARN_STR_HTML = ( u"<a style=\"color: {yellow:s};\" href=\"{warn_log_url:s}\">" u"{warn_string:s}</a>" ) def _make_quoted_url(base, items): url = urlparse.urljoin( base, '/'.join(urllib.quote_plus(str(item)) for item in items) ) if not url.endswith('/'): url += '/' return url def _get_errors_count(results): """Parse the build data and get errors and warnings. :param results: The results to parse. :type results: pymongo.cursor.Cursor. :return The errors data structure, the errors and warnings count and the build id value. """ parsed_data = {} total_errors = total_warnings = 0 for result in results: data = { key: result[field] for key, field in [ ("defconfig", models.DEFCONFIG_FULL_KEY), ("build_environment", models.BUILD_ENVIRONMENT_KEY), ("status", models.STATUS_KEY), ("build_id", models.BUILD_ID_KEY), ("file_server_resource", models.FILE_SERVER_RESOURCE_KEY), ] } res_errors = result.get(models.ERRORS_COUNT_KEY, 0) res_warnings = result.get(models.WARNINGS_COUNT_KEY, 0) if res_errors: total_errors += res_errors if res_warnings: total_warnings += res_warnings data.update({ "errors": res_errors, "warnings": res_warnings, }) arch_data = parsed_data.setdefault( result[models.ARCHITECTURE_KEY], list()) arch_data.append(data) return parsed_data, total_errors, total_warnings def _parse_build_data(results): """Parse the build data to provide a writable data structure. Loop through the build data found, and create a new dictionary whose keys are the architectures and their values a list of tuples of (defconfig, status, build_id). :param results: The results to parse. :type results: pymongo.cursor.Cursor. :return A dictionary. """ parsed_data = {} for result in results: build_data = { key: result[field] for key, field in [ ("defconfig", models.DEFCONFIG_FULL_KEY), ("build_environment", models.BUILD_ENVIRONMENT_KEY), ("status", models.STATUS_KEY), ("build_id", models.ID_KEY), ("file_server_resource", models.FILE_SERVER_RESOURCE_KEY), ] } arch_data = parsed_data.setdefault( result[models.ARCHITECTURE_KEY], list()) arch_data.append(build_data) return parsed_data # pylint: disable=too-many-locals def _get_build_subject_string(**kwargs): """Create the build email subject line. This is used to created the custom email report line based on the number of values we have. :param total_count: The total number of build reports. :type total_count: integer :param fail_count: The number of failed build reports. :type fail_count: integer :param pass_count: The number of successful build reports. :type pass_count: integer :param job: The name of the job. :type job: string :param kernel: The name of the kernel. :type kernel: string :return The subject string. """ k_get = kwargs.get total_count = k_get("total_count", 0) errors = k_get("errors_count", 0) warnings = k_get("warnings_count", 0) subject_str = u"" base_subject = G_(u"{job:s}/{git_branch:s} build") kernel_name = G_(u"({kernel:s})") failed_builds = G_(u"{fail_count:d} failed") passed_builds = G_(u"{pass_count:d} passed") total_builds = P_( u"{total_count:d} build", u"{total_count:d} builds", total_count) errors_string = P_( u"{errors_count:d} error", u"{errors_count:d} errors", errors) warnings_string = P_( u"{warnings_count:d} warning", u"{warnings_count:d} warnings", warnings) # Base format string to create the subject line. # 1st, 2nd, 3rd, 4th: job name, total count, fail count, pass count. # The last one is always the kernel/git-describe name. # The others will contain errors and warnings count. # next build: 0 failed, 10 passed (next-20150401) base_0 = G_(u"{:s}: {:s}: {:s}, {:s} {:s}") # next build: 0 failed, 10 passed, 2 warnings (next-20150401) base_1 = G_(u"{:s}: {:s}: {:s}, {:s}, {:s} {:s}") # next build: 0 failed, 10 passed, 1 error, 2 warnings (next-20150401) base_2 = G_(u"{:s}: {:s}: {:s}, {:s}, {:s}, {:s} {:s}") if errors == 0 and warnings == 0: subject_str = base_0.format( base_subject, total_builds, failed_builds, passed_builds, kernel_name) elif errors == 0 and warnings != 0: subject_str = base_1.format( base_subject, total_builds, failed_builds, passed_builds, warnings_string, kernel_name) elif errors != 0 and warnings != 0: subject_str = base_2.format( base_subject, total_builds, failed_builds, passed_builds, errors_string, warnings_string, kernel_name) elif errors != 0 and warnings == 0: subject_str = base_1.format( base_subject, total_builds, failed_builds, passed_builds, errors_string, kernel_name) # Now fill in the values. subject_str = subject_str.format(**kwargs) return subject_str # pylint: disable=too-many-branches # pylint: disable=too-many-statements def _parse_and_structure_results(**kwargs): """Parse the results and create a data structure for the templates. Create a special data structure to be consumed by the template engine. By default it will create the strings for TXT and HTML templates. The special template will then use the correct format. The template data structure is as follows: { "summary": { "txt": ["List of TXT summary strings"], "html: ["List of HTML summary strings"] }, "data": { "arch": ["List of defconfigs"] } } :param failed_data: The failed data structure. :type failed_data: dictionary :param error_data: The error data structure. :type error_data: dictionary :param fail_count: The number of failures. :type fail_count: integer :param errors_count: The number of errors. :type errors_count: integer :param warnings_count: The number of warnings. :type warnings_count: integer :return The template data structure as a dictionary object. """ platforms = {} k_get = kwargs.get error_data = k_get("error_data", None) errors_count = k_get("errors_count", 0) fail_count = k_get("fail_count", 0) failed_data = k_get("failed_data", None) warnings_count = k_get("warnings_count", 0) # Local substitutions dictionary, common to both data structures parsed. gen_subs = { "build_url": k_get("build_url"), "err_log_url": ERR_LOG_URL, "defconfig_url": DEFCONFIG_ID_URL, "job": k_get("job"), "kernel": k_get("kernel"), "git_branch": k_get("git_branch"), "log_url": LOG_URL, "mism_log_url": MISM_LOG_URL, "red": rcommon.HTML_RED, "storage_url": k_get("storage_url"), "warn_log_url": WARN_LOG_URL, "yellow": rcommon.HTML_YELLOW } if failed_data: platforms["failed_data"] = {} platforms["failed_data"]["summary"] = {} platforms["failed_data"]["summary"]["txt"] = [] platforms["failed_data"]["summary"]["html"] = [] failure_summary = P_( u"Build Failure Detected:", u"Build Failures Detected:", fail_count) platforms["failed_data"]["summary"]["txt"].append(failure_summary) platforms["failed_data"]["summary"]["html"].append(failure_summary) platforms["failed_data"]["data"] = {} failed_struct = platforms["failed_data"]["data"] subs = gen_subs.copy() for arch, arch_data in failed_data.iteritems(): subs["arch"] = arch arch_string = G_(u"{arch:s}:").format(**subs) failed_struct[arch_string] = [] failed_append = failed_struct[arch_string].append for struct in arch_data: subs.update({ key: struct[key] for key in [ "defconfig", "build_environment", "status", "build_id", "file_server_resource", ]} ) txt_str = G_( u"{defconfig:s}: ({build_environment:s}) {status:s}" ).format(**subs) html_str = ( DEFCONFIG_URL_HTML.format(**subs).format(**subs), STATUS_HTML.format(**subs).format(**subs)) failed_append((txt_str, html_str)) else: platforms["failed_data"] = None if errors_count or warnings_count: platforms["error_data"] = {} if errors_count > 0 and warnings_count > 0: summary_string = G_(u"Errors and Warnings Detected:") elif errors_count > 0 and warnings_count == 0: summary_string = G_(u"Errors Detected:") elif errors_count == 0 and warnings_count > 0: summary_string = G_(u"Warnings Detected:") platforms["error_data"]["summary"] = {} platforms["error_data"]["summary"]["txt"] = [summary_string] platforms["error_data"]["summary"]["html"] = [summary_string] platforms["error_data"]["data"] = {} error_struct = platforms["error_data"]["data"] subs = gen_subs.copy() for arch, arch_data in error_data.iteritems(): subs["arch"] = arch arch_string = G_(u"{:s}:").format(arch) arch_errors = error_struct[arch_string] = [] for struct in arch_data: subs.update({ key: struct[key] for key in [ "defconfig", "build_environment", "status", "build_id", "warnings", "errors", "file_server_resource", ] }) errors = subs["errors"] warnings = subs["warnings"] if errors == 0 and warnings == 0: continue txt_desc_str = "" html_desc_str = "" err_string = P_( u"{errors:d} error", u"{errors:d} errors", errors ) warn_string = P_( u"{warnings:d} warning", u"{warnings:d} warnings", warnings ) subs["err_string"] = err_string subs["warn_string"] = warn_string if errors > 0 and warnings > 0: txt_desc_str = G_( u"{err_string:s}, {warn_string:s}") html_desc_str = ( ERR_STR_HTML.format(**subs).format(**subs), WARN_STR_HTML.format(**subs).format(**subs) ) elif errors > 0 and warnings == 0: txt_desc_str = u"{err_string:s}" html_desc_str = ( ERR_STR_HTML.format(**subs).format(**subs), u"") elif errors == 0 and warnings > 0: txt_desc_str = u"{warn_string:s}" html_desc_str = ( u"", WARN_STR_HTML.format(**subs).format(**subs)) txt_desc_str = txt_desc_str.format(**subs) subs["txt_desc_str"] = txt_desc_str txt_defconfig_str = ( G_(u"{defconfig:s} ({build_environment:s}): " + u"{txt_desc_str:s}").format(**subs) ).format(**subs) html_defconfing_str = ( DEFCONFIG_URL_HTML.format(**subs).format(**subs), html_desc_str) arch_errors.append((txt_defconfig_str, html_defconfing_str)) else: platforms["error_data"] = None return platforms def _create_build_email(**kwargs): """Parse the results and create the email text body to send. :param job: The name of the job. :type job: str :param kernel: The name of the kernel. :type kernel: str :param git_commit: The git commit. :type git_commit: str :param git_url: The git url. :type git_url: str :param git_branch: The git branch. :type git_branch: str :param failed_data: The parsed failed results. :type failed_data: dict :param fail_count: The total number of failed results. :type fail_count: int :param total_count: The total number of results. :type total_count: int :param total_unique_data: The unique values data structure. :type total_unique_data: dictionary :param pass_count: The total number of passed results. :type pass_count: int :param base_url: The base URL to build the dashboard links. :type base_url: string :param build_url: The base URL for the build section of the dashboard. :type build_url: string :param info_email: The email address for the footer note. :type info_email: string :return A tuple with the email body and subject as strings. """ txt_body = None html_body = None subject_str = None k_get = kwargs.get email_format = k_get("email_format") total_unique_data = k_get("total_unique_data", None) failed_data = k_get("failed_data", None) error_data = k_get("error_data", None) subject_str = _get_build_subject_string(**kwargs) built_unique_one = G_(u"Built: {:s}") built_unique_string = None if total_unique_data: unique_archs = rcommon.count_unique( total_unique_data.get("arch", None)) kwargs["unique_archs"] = unique_archs arch_str = P_( u"{unique_archs:d} unique architecture", u"{unique_archs:d} unique architectures", unique_archs ) if unique_archs > 0: built_unique_string = built_unique_one.format(arch_str) if built_unique_string: built_unique_string = built_unique_string.format(**kwargs) build_summary_url = _make_quoted_url( kwargs['base_url'], [ 'build', kwargs['job'], 'branch', kwargs['git_branch'], 'kernel', kwargs['kernel'], ] ) kwargs["built_unique_string"] = built_unique_string kwargs["tree_string"] = G_(u"Tree: {job:s}").format(**kwargs) kwargs["branch_string"] = G_(u"Branch: {git_branch:s}").format(**kwargs) kwargs["git_describe_string"] = G_(u"Git Describe: {kernel:s}").format( **kwargs) kwargs["subject_str"] = subject_str git_url = k_get("git_url") git_commit = k_get("git_commit") translated_git_url = \ rcommon.translate_git_url(git_url, git_commit) or git_url git_txt_string = G_(u"Git URL: {:s}").format(git_url) git_html_string = G_(u"Git URL: <a href=\"{:s}\">{:s}</a>").format( translated_git_url, git_url) kwargs["git_commit_string"] = G_(u"Git Commit: {:s}").format(git_commit) kwargs["git_url_string"] = (git_txt_string, git_html_string) if failed_data or error_data: kwargs["platforms"] = _parse_and_structure_results(**kwargs) if models.EMAIL_TXT_FORMAT_KEY in email_format: kwargs["full_build_summary"] = ( G_(u"Full Build Summary: {:s}").format(build_summary_url)) txt_body = rcommon.create_txt_email("build.txt", **kwargs) if models.EMAIL_HTML_FORMAT_KEY in email_format: # Fix the summary URLs for the HTML email. kwargs["full_build_summary"] = ( G_(u"Full Build Summary: <a href=\"{url:s}\">{url:s}</a>").format( **{"url": build_summary_url})) html_body = rcommon.create_html_email("build.html", **kwargs) return txt_body, html_body, subject_str def create_build_report( job, branch, kernel, email_format, db_options, mail_options=None): """Create the build report email to be sent. :param job: The name of the job. :type job: str :param kernel: The name of the kernel. :type kernel: str :param email_format: The email format to send. :type email_format: list :param db_options: The mongodb database connection parameters. :type db_options: dict :param mail_options: The options necessary to connect to the SMTP server. :type mail_options: dict :return A tuple with the email body and subject as strings or None. """ kwargs = {} txt_body = None html_body = None subject = None # This is used to provide a footer note in the email report. info_email = None fail_count = total_count = 0 errors_count = warnings_count = 0 fail_results = [] if mail_options: info_email = mail_options.get("info_email", None) spec = { models.JOB_KEY: job, models.GIT_BRANCH_KEY: branch, models.KERNEL_KEY: kernel } database = utils.db.get_db_connection(db_options) total_results, total_count = utils.db.find_and_count( database[models.BUILD_COLLECTION], 0, 0, spec=spec, fields=BUILD_SEARCH_FIELDS ) total_unique_data = rcommon.get_unique_data( total_results.clone(), unique_keys=[models.ARCHITECTURE_KEY]) spec[models.STATUS_KEY] = models.FAIL_STATUS fail_results, fail_count = utils.db.find_and_count( database[models.BUILD_COLLECTION], 0, 0, spec=spec, fields=BUILD_SEARCH_FIELDS, sort=BUILD_SEARCH_SORT) failed_data = _parse_build_data(fail_results.clone()) # Retrieve the parsed errors/warnings/mismatches summary and then # the details. errors_spec = { models.JOB_KEY: job, models.GIT_BRANCH_KEY: branch, models.KERNEL_KEY: kernel, } errors_summary = utils.db.find_one2( database[models.ERRORS_SUMMARY_COLLECTION], errors_spec, fields=[ models.ERRORS_KEY, models.WARNINGS_KEY, models.MISMATCHES_KEY ] ) error_details = utils.db.find( database[models.ERROR_LOGS_COLLECTION], 0, 0, spec=errors_spec, sort=[(models.DEFCONFIG_FULL_KEY, 1)] ) error_details = list(d for d in error_details) err_data, errors_count, warnings_count = _get_errors_count(error_details) kwargs = { "base_url": rcommon.DEFAULT_BASE_URL, "build_url": rcommon.DEFAULT_BUILD_URL, "email_format": email_format, "error_data": err_data, "error_details": error_details, "errors_count": errors_count, "errors_summary": errors_summary, "fail_count": fail_count, "failed_data": failed_data, "info_email": info_email, "pass_count": total_count - fail_count, "storage_url": rcommon.DEFAULT_STORAGE_URL, "total_count": total_count, "total_unique_data": total_unique_data, "warnings_count": warnings_count, "git_branch": branch, models.JOB_KEY: job, models.KERNEL_KEY: kernel, } kwargs["git_commit"], kwargs["git_url"] = \ rcommon.get_git_data(job, branch, kernel, db_options) custom_headers = { rcommon.X_REPORT: rcommon.BUILD_REPORT_TYPE, rcommon.X_BRANCH: branch, rcommon.X_TREE: job, rcommon.X_KERNEL: kernel, } if all([fail_count == 0, total_count == 0]): utils.LOG.warn( "Nothing found for '%s-%s-%s': no build email report sent", job, branch, kernel) else: txt_body, html_body, subject = _create_build_email(**kwargs) return txt_body, html_body, subject, custom_headers
kernelci/kernelci-backend
app/utils/report/build.py
Python
lgpl-2.1
22,682
def char_concat(word): mid = len(word) / 2 return ''.join('{}{}'.format(''.join(pair), i) for i, pair in enumerate(zip(word[:mid], word[-mid:][::-1]), start=1))
the-zebulan/CodeWars
katas/kyu_7/character_concatenation.py
Python
mit
236
# pylint: disable=line-too-long import django.contrib.auth.models as auth_models from adlt.core import factories as core_factories def populatedb(): user = auth_models.User.objects.create_superuser( 'superuser', 'superuser@example.com', 'secret', first_name='Super', last_name='User', ) agent = core_factories.AgentFactory(title='Agent', user=user) seimas = core_factories.AgentFactory(title='Lietuvos Respublikos Seimas', user=user) ta = core_factories.DatasetFactory(title='Teisės aktų duomenys', maturity_level=1, agent=seimas, user=user) bd = core_factories.DatasetFactory(title='Balsavimų duomenys', maturity_level=1, agent=seimas, user=user) sn = core_factories.DatasetFactory(title='Seimo narių duomenys', maturity_level=1, agent=seimas, user=user) sd = core_factories.DatasetFactory(title='Seimo darbotvarkės duomenys', maturity_level=0, agent=seimas, user=user) rc = core_factories.AgentFactory(title='Registrų Centras', user=user) ad = core_factories.DatasetFactory(title='Adresų duomenys', maturity_level=0, agent=rc, user=user) archyvas = core_factories.AgentFactory(title='Lietuvos Istorijos Archyvas', user=user) md = core_factories.DatasetFactory(title='Gimimo, Mirties ir Santuokos metrikai', maturity_level=1, agent=archyvas, user=user) ms = core_factories.ProjectFactory(title='manoSeimas.lt', description='Aprašymas.', agent=agent, user=user) ms.datasets.add(ta) ms.datasets.add(bd) ms.datasets.add(sn) ms.datasets.add(sd) ms.datasets.add(ad) teisynas = core_factories.ProjectFactory(title='teisynas.lt', description='Aprašymas.', agent=agent, user=user) teisynas.datasets.add(ta) teisynas.datasets.add(sn) osm = core_factories.ProjectFactory(title='Open Street Map', description='Atviras žemėlapis', agent=agent, user=user) osm.datasets.add(ad) mskaitm = core_factories.ProjectFactory(title='Metrikų skaitmeninimas', description='Metrikų skaitmeninimas.', agent=agent, user=user) mskaitm.datasets.add(md) mskaitm.datasets.add(ad)
sirex/atviriduomenys.lt
adlt/populatedb/services.py
Python
agpl-3.0
2,087
# -*- coding: utf-8 -*- """ Teams pages. """ from .course_page import CoursePage from .discussion import InlineDiscussionPage from ..common.paging import PaginatedUIMixin from .fields import FieldsMixin TOPIC_CARD_CSS = 'div.wrapper-card-core' MY_TEAMS_BUTTON_CSS = 'a.nav-item[data-index="0"]' BROWSE_BUTTON_CSS = 'a.nav-item[data-index="1"]' TEAMS_LINK_CSS = '.action-view' TEAMS_HEADER_CSS = '.teams-header' CREATE_TEAM_LINK_CSS = '.create-team' class TeamsPage(CoursePage): """ Teams page/tab. """ url_path = "teams" def is_browser_on_page(self): """ Checks if teams page is being viewed """ return self.q(css='body.view-teams').present def get_body_text(self): """ Returns the current dummy text. This will be changed once there is more content on the page. """ main_page_content_css = '.page-content-main' self.wait_for( lambda: len(self.q(css=main_page_content_css).text) == 1, description="Body text is present" ) return self.q(css=main_page_content_css).text[0] def active_tab(self): """ Get the active tab. """ return self.q(css='.is-active').attrs('data-url')[0] def browse_topics(self): """ View the Browse tab of the Teams page. """ self.q(css=BROWSE_BUTTON_CSS).click() class MyTeamsPage(CoursePage, PaginatedUIMixin): """ The 'My Teams' tab of the Teams page. """ url_path = "teams/#my-teams" def is_browser_on_page(self): """Check if the "My Teams" tab is being viewed.""" button_classes = self.q(css=MY_TEAMS_BUTTON_CSS).attrs('class') if len(button_classes) == 0: return False return 'is-active' in button_classes[0] @property def team_cards(self): """Get all the team cards on the page.""" return self.q(css='.team-card') class BrowseTopicsPage(CoursePage, PaginatedUIMixin): """ The 'Browse' tab of the Teams page. """ url_path = "teams/#browse" def is_browser_on_page(self): """Check if the Browse tab is being viewed.""" button_classes = self.q(css=BROWSE_BUTTON_CSS).attrs('class') if len(button_classes) == 0: return False return 'is-active' in button_classes[0] @property def topic_cards(self): """Return a list of the topic cards present on the page.""" return self.q(css=TOPIC_CARD_CSS).results def browse_teams_for_topic(self, topic_name): """ Show the teams list for `topic_name`. """ self.q(css=TEAMS_LINK_CSS).filter( text='View Teams in the {topic_name} Topic'.format(topic_name=topic_name) )[0].click() self.wait_for_ajax() class BrowseTeamsPage(CoursePage, PaginatedUIMixin): """ The paginated UI for browsing teams within a Topic on the Teams page. """ def __init__(self, browser, course_id, topic): """ Set up `self.url_path` on instantiation, since it dynamically reflects the current topic. Note that `topic` is a dict representation of a topic following the same convention as a course module's topic. """ super(BrowseTeamsPage, self).__init__(browser, course_id) self.topic = topic self.url_path = "teams/#topics/{topic_id}".format(topic_id=self.topic['id']) def is_browser_on_page(self): """Check if we're on the teams list page for a particular topic.""" self.wait_for_element_presence('.team-actions', 'Wait for the bottom links to be present') has_correct_url = self.url.endswith(self.url_path) teams_list_view_present = self.q(css='.teams-main').present return has_correct_url and teams_list_view_present @property def header_topic_name(self): """Get the topic name displayed by the page header""" return self.q(css=TEAMS_HEADER_CSS + ' .page-title')[0].text @property def header_topic_description(self): """Get the topic description displayed by the page header""" return self.q(css=TEAMS_HEADER_CSS + ' .page-description')[0].text @property def team_cards(self): """Get all the team cards on the page.""" return self.q(css='.team-card') def click_create_team_link(self): """ Click on create team link.""" query = self.q(css=CREATE_TEAM_LINK_CSS) if query.present: query.first.click() self.wait_for_ajax() def click_search_team_link(self): """ Click on create team link.""" query = self.q(css='.search-team-descriptions') if query.present: query.first.click() self.wait_for_ajax() def click_browse_all_teams_link(self): """ Click on browse team link.""" query = self.q(css='.browse-teams') if query.present: query.first.click() self.wait_for_ajax() class CreateTeamPage(CoursePage, FieldsMixin): """ Create team page. """ def __init__(self, browser, course_id, topic): """ Set up `self.url_path` on instantiation, since it dynamically reflects the current topic. Note that `topic` is a dict representation of a topic following the same convention as a course module's topic. """ super(CreateTeamPage, self).__init__(browser, course_id) self.topic = topic self.url_path = "teams/#topics/{topic_id}/create-team".format(topic_id=self.topic['id']) def is_browser_on_page(self): """Check if we're on the create team page for a particular topic.""" has_correct_url = self.url.endswith(self.url_path) teams_create_view_present = self.q(css='.team-edit-fields').present return has_correct_url and teams_create_view_present @property def header_page_name(self): """Get the page name displayed by the page header""" return self.q(css='.page-header .page-title')[0].text @property def header_page_description(self): """Get the page description displayed by the page header""" return self.q(css='.page-header .page-description')[0].text @property def header_page_breadcrumbs(self): """Get the page breadcrumb text displayed by the page header""" return self.q(css='.page-header .breadcrumbs')[0].text @property def validation_message_text(self): """Get the error message text""" return self.q(css='.create-team.wrapper-msg .copy')[0].text def submit_form(self): """Click on create team button""" self.q(css='.create-team .action-primary').first.click() self.wait_for_ajax() def cancel_team(self): """Click on cancel team button""" self.q(css='.create-team .action-cancel').first.click() self.wait_for_ajax() class TeamPage(CoursePage, PaginatedUIMixin): """ The page for a specific Team within the Teams tab """ def __init__(self, browser, course_id, team=None): """ Set up `self.url_path` on instantiation, since it dynamically reflects the current team. """ super(TeamPage, self).__init__(browser, course_id) self.team = team if self.team: self.url_path = "teams/#teams/{topic_id}/{team_id}".format( topic_id=self.team['topic_id'], team_id=self.team['id'] ) def is_browser_on_page(self): """Check if we're on the teams list page for a particular team.""" if self.team: if not self.url.endswith(self.url_path): return False return self.q(css='.team-profile').present @property def discussion_id(self): """Get the id of the discussion module on the page""" return self.q(css='div.discussion-module').attrs('data-discussion-id')[0] @property def discussion_page(self): """Get the discussion as a bok_choy page object""" if not hasattr(self, '_discussion_page'): # pylint: disable=attribute-defined-outside-init self._discussion_page = InlineDiscussionPage(self.browser, self.discussion_id) return self._discussion_page @property def team_name(self): """Get the team's name as displayed in the page header""" return self.q(css='.page-header .page-title')[0].text @property def team_description(self): """Get the team's description as displayed in the page header""" return self.q(css=TEAMS_HEADER_CSS + ' .page-description')[0].text @property def team_members_present(self): """Verifies that team members are present""" return self.q(css='.page-content-secondary .team-members .team-member').present @property def team_capacity_text(self): """Returns team capacity text""" return self.q(css='.page-content-secondary .team-capacity :last-child').text[0] @property def team_location(self): """ Returns team location/country. """ return self.q(css='.page-content-secondary .team-country :last-child').text[0] @property def team_language(self): """ Returns team location/country. """ return self.q(css='.page-content-secondary .team-language :last-child').text[0] @property def team_user_membership_text(self): """Returns the team membership text""" query = self.q(css='.page-content-secondary > .team-user-membership-status') return query.text[0] if query.present else '' @property def team_leave_link_present(self): """Verifies that team leave link is present""" return self.q(css='.leave-team-link').present def click_leave_team_link(self): """ Click on Leave Team link""" self.q(css='.leave-team-link').first.click() self.wait_for_ajax() @property def team_members(self): """Returns the number of team members in this team""" return len(self.q(css='.page-content-secondary .team-member')) def click_first_profile_image(self): """Clicks on first team member's profile image""" self.q(css='.page-content-secondary .members-info > .team-member').first.click() @property def first_member_username(self): """Returns the username of team member""" return self.q(css='.page-content-secondary .tooltip-custom').text[0] def click_join_team_button(self): """ Click on Join Team button""" self.q(css='.join-team .action-primary').first.click() self.wait_for_ajax() @property def join_team_message(self): """ Returns join team message """ self.wait_for_ajax() return self.q(css='.join-team .join-team-message').text[0] @property def join_team_button_present(self): """ Returns True if Join Team button is present else False """ self.wait_for_ajax() return self.q(css='.join-team .action-primary').present @property def join_team_message_present(self): """ Returns True if Join Team message is present else False """ return self.q(css='.join-team .join-team-message').present @property def new_post_button_present(self): """ Returns True if New Post button is present else False """ return self.q(css='.discussion-module .new-post-btn').present
jazztpt/edx-platform
common/test/acceptance/pages/lms/teams.py
Python
agpl-3.0
11,435
from django.template.loader import render_to_string def get_change_plan_form(plan_code, subscription_id): return render_to_string("django_recurly/change_plan_form.html", { "plan_code": plan_code, "subscription_id": subscription_id, })
pakal/django-recurly
django_recurly/helpers/api.py
Python
bsd-3-clause
259
from routes import API_PATH from routes.base import BaseBlueprint from routes.workers.worker import WorkersRoute, WorkerCheckinRoute class Blueprint(BaseBlueprint): def __init__(self): super().__init__("workers", __name__, url_prefix=f"{API_PATH}/workers") self.register_route(WorkersRoute()) self.register_route(WorkerCheckinRoute())
openzim/zimfarm
dispatcher/backend/src/routes/workers/__init__.py
Python
gpl-3.0
366
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class BidiConnection: def __init__(self, session, cdp, devtools_import) -> None: self.session = session self.cdp = cdp self.devtools = devtools_import
SeleniumHQ/selenium
py/selenium/webdriver/remote/bidi_connection.py
Python
apache-2.0
968
# https://oj.leetcode.com/problems/binary-tree-level-order-traversal/ # Definition for a binary tree node class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # @param root, a tree node # @return a list of lists of integers def levelOrder(self, root): if root == None: return [] result, queue = [[root.val]], [[root]] while len(queue) > 0: curr = [] for node in queue[0]: if node.left != None: curr.append(node.left) if node.right != None: curr.append(node.right) queue.pop(0) if len(curr) > 0: queue.append(curr) result.append([x.val for x in curr]) return result s = Solution() print s.levelOrder(None) root = TreeNode(1) root.left = TreeNode(2) print s.levelOrder(root)
yaoxuanw007/forfun
leetcode/python/binaryTreeLevelOrderTraversal.py
Python
mit
847
import os from setuptools import setup, find_packages with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='pybbm-private-messages', version='0.3.1', packages=find_packages(), include_package_data=True, install_requires=[ 'pybbm', 'django_select2' ], test_suite='runtests.runtests', license='MIT License', description='A private messaging plugin for the pybbm forum.', long_description=README, url='https://github.com/skolsuper/pybbm_private_messages', author='James Keys', author_email='skolsuper@gmail.com', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], )
skolsuper/pybbm_private_messages
setup.py
Python
mit
1,386
#!/usr/bin/env python # Modified from # https://github.com/f5devcentral/f5-aws-migrate/blob/master/f5-aws-migrate.py # Assumptions: Script will be run on a BIGI-IP itself import os import sys import re import shutil import tarfile from tempfile import mkstemp from optparse import OptionParser def get_hostname(config_file): hostname = "" global_settings_token_found = False hostname_token_line = 0 with open(config_file, 'r') as f: for line in f: if global_settings_token_found: match = re.search('\s+hostname (.+)', line) if match: hostname = match.group(1) return hostname else: match = re.search('sys global-settings', line) if match: global_settings_token_found = True def get_ip(config_file): # 1nic Autoscale IPv4 Specific # Looking for entry in bigip_base.conf: # net self /Common/10.0.11.241/24 { # address 10.0.11.241/24 # allow-service { # default # } # traffic-group /Common/traffic-group-local-only # vlan /Common/external # } ip = "" line_number = 0 net_self_token_line = 0 with open(config_file, 'r') as f: for line in f: match = re.search('net self', line) if match: net_self_token_line = line_number # Search for address in that token match = re.search(' address (.+)/(.+)', line) if match and (line_number - net_self_token_line == 1): ip = match.group(1) mask = match.group(2) line_number += 1 # could do a little more validation here to make sure not another self-ip configured by accident # octets match management ip, hostname, etc. return ip def get_local_ip(): # 1nic Autoscale IPv4 Specific # "privateIp entry in /shared/vadc/aws/iid-document # is probably most reliable # otherwise, getting it from dhcp # searching for field # "fixed-address 10.0.1.219;" ip = "" with open('/var/lib/dhclient/dhclient.leases', 'r') as f: for line in f: match = re.search('fixed-address (.+);', line) if match: ip = match.group(1) return ip def get_gateway(config_file): # 1nic Autoscale IPv4 Specific # searching for entry # net route /LOCAL_ONLY/default { # gw 10.0.1.1 # network default # } gateway = "" with open(config_file, 'r') as f: for line in f: match = re.search(' gw (.+)', line) if match: gateway = match.group(1) return gateway def get_local_gateway(): # 1nic Autoscale IPv4 Specific # searching for field # "option routers 10.0.1.1;" gateway = "" with open('/var/lib/dhclient/dhclient.leases', 'r') as f: for line in f: match = re.search('option routers (.+);', line) if match: gateway = match.group(1) return gateway def replace(source_file_path, pattern, substring): fh, target_file_path = mkstemp() with open(target_file_path, 'w') as target_file: with open(source_file_path, 'r') as source_file: for line in source_file: if "-" in pattern: newline = re.sub("(%s)$" %(pattern),substring,line) newline = re.sub("(%s)\." %(pattern),substring+'.',newline) target_file.write(newline) else: newline = re.sub("(%s)$" %(pattern),substring,line) newline = re.sub("(%s)\/" %(pattern),substring+'/',newline) target_file.write(newline) os.remove(source_file_path) shutil.move(target_file_path, source_file_path) def removeFiles(dir, pattern): if os.path.exists(dir): for f in os.listdir(dir): if re.search(pattern, f): os.remove(os.path.join(dir, f)) def main(): parser = OptionParser() parser.add_option("--debug-level", action="store", type="int", dest="debug_level", default=0, help="debug level print debug (0-9)") parser.add_option("--cloud-provider", action="store", type="string", dest="cloud_provider", default="" , help="Cloud being utilized, azure or aws, etc...") parser.add_option("--original-ucs", action="store", type="string", dest="original_ucs", help="Original UCS file name") parser.add_option("--updated-ucs", action="store", type="string", dest="updated_ucs", default="updated.ucs", help="Modified UCS file name") parser.add_option("--extract-directory", action="store", type="string", dest="extract_dir", default="ucs_extract_dir", help="name of directory to extract to") parser.add_option("--original-ucs-ip", action="store", type="string", dest="original_ucs_ip", help="ip in original ucs") parser.add_option("--original-ucs-gateway", action="store", type="string", dest="original_ucs_gateway", help="gateway in original ucs") parser.add_option("--dest-ip", action="store", type="string", dest="dest_ip", help="ip of destination instance") parser.add_option("--dest-gateway", action="store", type="string", dest="dest_gateway", help="gateway of destination instance") (options, args) = parser.parse_args() # Set variables from options debug_level = options.debug_level cloud_provider = options.cloud_provider original_ucs = options.original_ucs updated_ucs = options.updated_ucs extract_ucs_dir = options.extract_dir original_ucs_ip = options.original_ucs_ip original_ucs_gateway = options.original_ucs_gateway dest_ip = options.dest_ip dest_gateway = options.dest_gateway if not original_ucs or not cloud_provider: print "Usage: " print (" ./%s --cloud-provider <aws | azure | gce> --original-ucs <ucs_filename>" % (sys.argv[0])) print (" ./%s --cloud-provider <aws | azure | gce> --original-ucs <ucs_filename> --updated-ucs <ucs_filename>" % (sys.argv[0])) print "ex. " print (" ./%s --cloud-provider aws --original-ucs original.ucs --updated-ucs updated.ucs" % (sys.argv[0])) sys.exit() # Open files tar_original = tarfile.open(original_ucs, "r:gz") tar_updated = tarfile.open(updated_ucs, "w:gz") try: tar_original.extractall(path=extract_ucs_dir) except IOError as e: sys.exit("I/O Error({0}): {1} - If the latest UCS file is corrupted and/or the current primary host is stuck at BECOMING_PRIMARY state, the corrupted UCS file needs to be deleted as well as primary host needs to be terminated to allow re-elect the new primary.'".format(e.errno, e.strerror)) bigip_base_file = "/config/bigip_base.conf" if cloud_provider == 'aws': gateway_file = "/config/partitions/LOCAL_ONLY/bigip.conf" else: gateway_file = "/config/bigip.conf" # Grab instance's hostname from UCS dest_hostname = get_hostname(bigip_base_file) original_hostname = get_hostname(extract_ucs_dir + bigip_base_file) # Grab instance's IP from UCS or local config file if not original_ucs_ip: original_ucs_ip = get_ip(extract_ucs_dir + bigip_base_file) if not original_ucs_gateway: original_ucs_gateway = get_gateway(extract_ucs_dir + gateway_file) if not dest_ip: if os.path.isfile(gateway_file): dest_ip = get_ip(bigip_base_file) else: dest_ip = get_local_ip() if not dest_gateway: if os.path.isfile(gateway_file): dest_gateway = get_gateway(gateway_file) else: dest_gateway = get_local_gateway() if debug_level > 0: print "original_hostname: " + original_hostname print "dest_hostname: " + dest_hostname print "original_ucs_ip: " + original_ucs_ip print "original_ucs_gateway: " + original_ucs_gateway print "dest_ip: " + dest_ip print "dest_gateway: " + dest_gateway # Fix string version of addresses with "-". ex. ip-10-0-11-151 original_ucs_ip_str = original_ucs_ip.replace(".", "-") dest_ip_str = dest_ip.replace(".", "-") if debug_level > 0: print "original_ucs_ip_str: " + original_ucs_ip_str print "dest_ip_str: " + dest_ip_str files_to_update = [ "/config/bigip_base.conf", "/config/bigip.conf", "/config/BigDB.dat", "/SPEC-Manifest" ] # Replace Gateway replace(extract_ucs_dir + gateway_file, original_ucs_gateway, dest_gateway) # Replace hostname, IP, String Versions in other files for f in files_to_update: filename = extract_ucs_dir + f if debug_level > 0: print "updating : " + filename replace(filename, original_ucs_ip, dest_ip) replace(filename, original_ucs_ip_str, dest_ip_str) if original_hostname and dest_hostname: replace(filename, original_hostname, dest_hostname) # Remove the cloud directory created by the template so we don't overwrite new code with old path_to_exclude = "/config/cloud/" + cloud_provider shutil.rmtree(extract_ucs_dir + path_to_exclude, ignore_errors=True) # Remove the cloud-libs private key info as you can't load a ucs w/ a passphrase shutil.rmtree(extract_ucs_dir + '/config/partitions/CloudLibsAutoscale', ignore_errors=True) shutil.rmtree(extract_ucs_dir + '/var/tmp/filestore_temp/files_d/CloudLibsAutoscale_d', ignore_errors=True) shutil.rmtree(extract_ucs_dir + '/config/partitions/CloudLibsLocal', ignore_errors=True) shutil.rmtree(extract_ucs_dir + '/var/tmp/filestore_temp/files_d/CloudLibsLocal_d', ignore_errors=True) # Remove the f5-cloud-libs local public keys as they won't match any private keys removeFiles(extract_ucs_dir + '/config/cloud/keys', 'cloudLocal*') # remove the dynad private key os.system("sed -i '/sys dynad key {/ { N ; /\\n[[:space:]]\+key[[:space:]]*\$M\$[^\\n]*/ { N; /\\n[[:space:]]*}/ { d } } }' " + extract_ucs_dir + "/config/bigip_base.conf") tar_updated.add(extract_ucs_dir, arcname='') tar_original.close() tar_updated.close() shutil.rmtree(extract_ucs_dir, ignore_errors=False, onerror=None) print "UCS Update Complete" print "Load UCS with command below:" print " tmsh load /sys ucs " + os.path.abspath(updated_ucs) + " no-license" # Leverage cfn-signal here # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-signal.html if __name__ == "__main__": main()
F5Networks/f5-cloud-libs
scripts/update_autoscale_ucs.py
Python
apache-2.0
10,718
""" Copyright 2012 Ali Ok (aliokATapacheDOTorg) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class IndexController(object): def __init__(self, index_view, dbmanager): """ @type index_view: IndexView @type dbmanager: DbManager """ self.index_view = index_view self.dbmanager = dbmanager def go_home(self): corpora_cursor = self.dbmanager.get_all_corpora() for corpus in corpora_cursor: corpus_id = corpus['_id'] id_of_first_word = self.dbmanager.find_id_of_first_word_in_corpus(corpus_id) number_of_words = self.dbmanager.count_all(corpus_id) number_of_nonparsed_words = self.dbmanager.count_all_nonparsed(corpus_id) parse_percent = 100.0 - (float(number_of_nonparsed_words) / float(number_of_words) * 100.0) self.index_view.add_corpus(corpus, id_of_first_word, number_of_words, parse_percent)
aliok/trnltk
trnltk/morphology/learner/controller/indexcontroller.py
Python
apache-2.0
1,425
# Authors: # Jr Aquino <jr.aquino@citrixonline.com> # # Copyright (C) 2010 Red Hat # see file 'COPYING' for use and warranty information # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from ipalib import api from ipalib import Str from ipalib.plugable import Registry from .baseldap import ( LDAPObject, LDAPCreate, LDAPDelete, LDAPUpdate, LDAPSearch, LDAPRetrieve, LDAPAddMember, LDAPRemoveMember) from ipalib import _, ngettext __doc__ = _(""" Groups of Sudo Commands Manage groups of Sudo Commands. EXAMPLES: Add a new Sudo Command Group: ipa sudocmdgroup-add --desc='administrators commands' admincmds Remove a Sudo Command Group: ipa sudocmdgroup-del admincmds Manage Sudo Command Group membership, commands: ipa sudocmdgroup-add-member --sudocmds=/usr/bin/less --sudocmds=/usr/bin/vim admincmds Manage Sudo Command Group membership, commands: ipa sudocmdgroup-remove-member --sudocmds=/usr/bin/less admincmds Show a Sudo Command Group: ipa sudocmdgroup-show admincmds """) register = Registry() topic = 'sudo' @register() class sudocmdgroup(LDAPObject): """ Sudo Command Group object. """ container_dn = api.env.container_sudocmdgroup object_name = _('sudo command group') object_name_plural = _('sudo command groups') object_class = ['ipaobject', 'ipasudocmdgrp'] permission_filter_objectclasses = ['ipasudocmdgrp'] default_attributes = [ 'cn', 'description', 'member', ] uuid_attribute = 'ipauniqueid' attribute_members = { 'member': ['sudocmd'], } managed_permissions = { 'System: Read Sudo Command Groups': { 'replaces_global_anonymous_aci': True, 'ipapermbindruletype': 'all', 'ipapermright': {'read', 'search', 'compare'}, 'ipapermdefaultattr': { 'businesscategory', 'cn', 'description', 'ipauniqueid', 'member', 'o', 'objectclass', 'ou', 'owner', 'seealso', 'memberuser', 'memberhost', }, }, 'System: Add Sudo Command Group': { 'ipapermright': {'add'}, 'replaces': [ '(target = "ldap:///cn=*,cn=sudocmdgroups,cn=sudo,$SUFFIX")(version 3.0;acl "permission:Add Sudo command group";allow (add) groupdn = "ldap:///cn=Add Sudo command group,cn=permissions,cn=pbac,$SUFFIX";)', ], 'default_privileges': {'Sudo Administrator'}, }, 'System: Delete Sudo Command Group': { 'ipapermright': {'delete'}, 'replaces': [ '(target = "ldap:///cn=*,cn=sudocmdgroups,cn=sudo,$SUFFIX")(version 3.0;acl "permission:Delete Sudo command group";allow (delete) groupdn = "ldap:///cn=Delete Sudo command group,cn=permissions,cn=pbac,$SUFFIX";)', ], 'default_privileges': {'Sudo Administrator'}, }, 'System: Modify Sudo Command Group': { 'ipapermright': {'write'}, 'ipapermdefaultattr': {'description'}, 'default_privileges': {'Sudo Administrator'}, }, 'System: Manage Sudo Command Group Membership': { 'ipapermright': {'write'}, 'ipapermdefaultattr': {'member'}, 'replaces': [ '(targetattr = "member")(target = "ldap:///cn=*,cn=sudocmdgroups,cn=sudo,$SUFFIX")(version 3.0;acl "permission:Manage Sudo command group membership";allow (write) groupdn = "ldap:///cn=Manage Sudo command group membership,cn=permissions,cn=pbac,$SUFFIX";)', ], 'default_privileges': {'Sudo Administrator'}, }, } label = _('Sudo Command Groups') label_singular = _('Sudo Command Group') takes_params = ( Str('cn', cli_name='sudocmdgroup_name', label=_('Sudo Command Group'), primary_key=True, normalizer=lambda value: value.lower(), ), Str('description?', cli_name='desc', label=_('Description'), doc=_('Group description'), ), Str('membercmd_sudocmd?', label=_('Commands'), flags=['no_create', 'no_update', 'no_search'], ), Str('membercmd_sudocmdgroup?', label=_('Sudo Command Groups'), flags=['no_create', 'no_update', 'no_search'], ), ) @register() class sudocmdgroup_add(LDAPCreate): __doc__ = _('Create new Sudo Command Group.') msg_summary = _('Added Sudo Command Group "%(value)s"') @register() class sudocmdgroup_del(LDAPDelete): __doc__ = _('Delete Sudo Command Group.') msg_summary = _('Deleted Sudo Command Group "%(value)s"') @register() class sudocmdgroup_mod(LDAPUpdate): __doc__ = _('Modify Sudo Command Group.') msg_summary = _('Modified Sudo Command Group "%(value)s"') @register() class sudocmdgroup_find(LDAPSearch): __doc__ = _('Search for Sudo Command Groups.') msg_summary = ngettext( '%(count)d Sudo Command Group matched', '%(count)d Sudo Command Groups matched', 0 ) @register() class sudocmdgroup_show(LDAPRetrieve): __doc__ = _('Display Sudo Command Group.') @register() class sudocmdgroup_add_member(LDAPAddMember): __doc__ = _('Add members to Sudo Command Group.') @register() class sudocmdgroup_remove_member(LDAPRemoveMember): __doc__ = _('Remove members from Sudo Command Group.')
apophys/freeipa
ipaserver/plugins/sudocmdgroup.py
Python
gpl-3.0
6,037
import xml.etree.ElementTree as ET import csv import sys import os # # convert Treasury Direct Yield XML File to a CSV File # wget -O ust.xml "http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=year(NEW_DATE)%20eq%202012" # def __clean(child): #tag has prefix xml stuff that's unnecessary return child.tag.split('}')[-1] def __reset_data_types(d, fieldnames): for f in fieldnames: if f == 'date' : d[f] = int(d[f].split("T")[0].replace("-","")) else : d[f] = float(d[f]) def __is_valid(d, fieldnames) : for f in fieldnames: if not f in d : return False return True def __create_node_dict(node, fieldnames): d={} for child in node.getchildren(): tag = __clean(child) tag = tag.split('BC_')[-1] tag = tag.replace('YEAR','y') tag = tag.replace('MONTH','m') tag = tag.replace('NEW_DATE', 'date') tag = tag.replace('DISPLAY', 'd') if tag in fieldnames : d[tag] = child.text if child.text is not None else 0 return d def __parse(file_name, fieldnames): tree = ET.parse(file_name) root = tree.getroot() series=[] for c0 in root.getchildren(): if __clean(c0) == 'entry' : for c1 in c0.getchildren(): if __clean(c1) == 'content' : for c2 in c1.getchildren(): d = __create_node_dict(c2, fieldnames) if not __is_valid(d, fieldnames) : continue __reset_data_types(d, fieldnames) series.append(d) return sorted(series, key=lambda x: x['date']) def __write_csv(csvfile, series, fieldnames): writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for s in series: writer.writerow(s) def parse_xml_save_csv(input_xml_file, output_csv_file) : csvfile = open(output_csv_file, "w") assert csvfile, "unable to create output csv file".format(output_csv_file) fieldnames=['date', '3m', '6m', '1y', '2y', '3y', '5y', '7y', '10y', '30y'] __write_csv(csvfile, __parse(input_xml_file, fieldnames), fieldnames) def main() : assert(len(sys.argv) == 3), "usage : <input_xml_file> <output_csv_file>" input_xml_file, output_csv_file = sys.argv[1], sys.argv[2] assert os.path.exists(input_xml_file), "input xml file {} does not exist".format(input_xml_file) if not os.path.exists(output_csv_file): parse_xml_save_csv(input_xml_file, output_csv_file) else: print("output csv file already exists {}, remove or rename before runing this".format(output_csv_file)) if __name__ == '__main__': main()
jrrpanix/master
examples/python/apps/tsyXMLtoCSV.py
Python
gpl-3.0
2,716
#!/usr/bin/env python # # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # # This file is part of GNUHAWK. # # GNUHAWK is free software: you can redistribute it and/or modify is under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see http://www.gnu.org/licenses/. # import unittest import ossie.utils.testing import os from omniORB import any class ComponentTests(ossie.utils.testing.ScaComponentTestCase): """Test for all component implementations in max_ff_1i""" def testScaBasicBehavior(self): ####################################################################### # Launch the component with the default execparams execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) self.launch(execparams) ####################################################################### # Verify the basic state of the component self.assertNotEqual(self.comp, None) self.assertEqual(self.comp.ref._non_existent(), False) self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True) ####################################################################### # Validate that query returns all expected parameters # Query of '[]' should return the following set of properties expectedProps = [] expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True)) expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True)) props = self.comp.query([]) props = dict((x.id, any.from_any(x.value)) for x in props) # Query may return more than expected, but not less for expectedProp in expectedProps: self.assertEquals(props.has_key(expectedProp.id), True) ####################################################################### # Verify that all expected ports are available for port in self.scd.get_componentfeatures().get_ports().get_uses(): port_obj = self.comp.getPort(str(port.get_usesname())) self.assertNotEqual(port_obj, None) self.assertEqual(port_obj._non_existent(), False) self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True) for port in self.scd.get_componentfeatures().get_ports().get_provides(): port_obj = self.comp.getPort(str(port.get_providesname())) self.assertNotEqual(port_obj, None) self.assertEqual(port_obj._non_existent(), False) self.assertEqual(port_obj._is_a(port.get_repid()), True) ####################################################################### # Make sure start and stop can be called without throwing exceptions self.comp.start() self.comp.stop() ####################################################################### # Simulate regular component shutdown self.comp.releaseObject() # TODO Add additional tests here # # See: # ossie.utils.bulkio.bulkio_helpers, # ossie.utils.bluefile.bluefile_helpers # for modules that will assist with testing components with BULKIO ports if __name__ == "__main__": ossie.utils.testing.main("../max_ff_1i.spd.xml") # By default tests all implementations
RedhawkSDR/integration-gnuhawk
components/max_ff_1i/tests/test_max_ff_1i.py
Python
gpl-3.0
4,061
#!/opt/zenoss/bin/python DEBUG_flag=False import os import sys import re import urllib2 import json import time import datetime def help(): print "Usage:" print "hadoop.py -url=SOURCE_JSON_URL -t MAX_TIME_SINCE_LAST_UPDATE_IN_MINUTES (optional)" sys.exit(3) if (len(sys.argv) >= 2): if (len(sys.argv) == 2): args_str = sys.argv[1] else: args_str = sys.argv[1] +" "+ sys.argv[2] args_str = args_str.lower() if ("-url=" in args_str): url_str = sys.argv[1].replace("-url=","") if ("http://" not in url_str.lower()): url_str = "http://"+url_str time_str = False if ("-t" in args_str): time_str = sys.argv[2].replace("-t", "") #Main code try: request = urllib2.urlopen(url_str) content = request.read() jsondict = json.loads(content) if (jsondict["status"].lower() == "ok"): #Here we are going to check time if needed if (time_str != False): time_from_json = time.strptime(jsondict["updated"], "%Y-%m-%d %H:%M:%S") time_now = time.localtime() time_from_json = datetime.datetime(*time_from_json[0:6]) time_now = datetime.datetime(*time_now[0:6]) time_delta = time_now - time_from_json if (time_delta.days > 0 or ((time_delta.seconds/60) > int(time_str)) ): print "WARNING - Time since last update is greater than %s minutes" % str(time_str) sys.exit(1) subcomponents_lst = jsondict["subcomponents"] for component in subcomponents_lst: if (component["status"].lower() == "ok"): #Seems that subcomponent status is ok, so we can try to check time if needed if (time_str != False): time_from_json = time.strptime(component["updated"], "%Y-%m-%d %H:%M:%S") time_now = time.localtime() time_from_json = datetime.datetime(*time_from_json[0:6]) time_now = datetime.datetime(*time_now[0:6]) time_delta = time_now - time_from_json if (time_delta.days > 0 or ((time_delta.seconds/60) > int(time_str)) ): print 'WARNING - Component \"%s\" has time since last update which greater than %s minutes' % (component["name"],str(time_str)) sys.exit(1) else: print 'WARNING - component \"%s\" has status \"%s\" and message: %s' % (component["name"], component["status"],component["message"]) sys.exit(2) #Seems we are achived this area of code, so seems the everything is okay. #Here we need to collect memory value. mem_component = subcomponents_lst[len(subcomponents_lst)-1] if (mem_component["name"].lower() == "mem"): #Output is okay print "OK - mem: %s" % (mem_component["message"].replace("k","")) else: print "CRITICAL - Source URL has wrong content" sys.exit(1) else: print 'WARNING - Hadoop: %s' % jsondict["message"] sys.exit(2) except (urllib2.HTTPError, urllib2.URLError), e: print 'UNKNOWN - %s' % e sys.exit(3) ########## else: print "UNKNOWN - Wrong arguments" help() sys.exit(3) else: print "UNKNOWN - Not enough arguments" sys.exit(3)
thomasvincent/utilities
NagiosPlugins/check_hadoop/hadoop.py
Python
apache-2.0
3,764
"""URL Request utilities.""" from __future__ import unicode_literals from django.utils.six.moves.urllib.request import Request as BaseURLRequest class URLRequest(BaseURLRequest): """A request that can use any HTTP method. By default, the :py:class:`urllib2.Request` class only supports HTTP GET and HTTP POST methods. This subclass allows for any HTTP method to be specified for the request. """ def __init__(self, url, body='', headers=None, method='GET'): """ Initialize the URLRequest. Args: url (unicode): The URL to make the request against. body (unicode or bytes): The content of the request. headers (dict, optional): Additional headers to attach to the request. method (unicode, optional): The request method. If not provided, it defaults to a ``GET`` request. """ BaseURLRequest.__init__(self, url, body, headers or {}) self.method = method def get_method(self): """Return the HTTP method of the request. Returns: unicode: The HTTP method of the request. """ return self.method
reviewboard/rbintegrations
rbintegrations/util/urlrequest.py
Python
mit
1,260
""" Django settings for my_blog project. Generated by 'django-admin startproject' using Django 1.10.1. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'c%rtl)-6sri+^%#zdm@!5r-=&732iztq*on6byc(5^grelw%fi' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'bootstrap_admin', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'article', ] DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer' #from django.conf import global_settings #TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # 'django.core.context_processors.request', #) BOOTSTRAP_ADMIN_SIDEBAR_MENU = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'my_blog.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'my_blog.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' #STATIC_ROOT = os.path.join(BASE_DIR,'static/') STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ]
spark8103/my_blog_tutorial
my_blog/settings.py
Python
mit
3,594
# -*- coding: utf-8 -*- # https://gist.github.com/114831 # recursive_dictionary.py # Created 2009-05-20 by Jannis Andrija Schnitzer. # # Copyright (c) 2009 Jannis Andrija Schnitzer # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import from django.utils import six __author__ = 'jannis@itisme.org (Jannis Andrija Schnitzer)' class RecursiveDictionary(dict): """RecursiveDictionary provides the methods rec_update and iter_rec_update that can be used to update member dictionaries rather than overwriting them.""" def rec_update(self, other, **third): """Recursively update the dictionary with the contents of other and third like dict.update() does - but don't overwrite sub-dictionaries. Example: >>> d = RecursiveDictionary({'foo': {'bar': 42}}) >>> d.rec_update({'foo': {'baz': 36}}) >>> d {'foo': {'baz': 36, 'bar': 42}} """ try: iterator = six.iteritems(other) except AttributeError: iterator = other self.iter_rec_update(iterator) self.iter_rec_update(six.iteritems(third)) def iter_rec_update(self, iterator): for (key, value) in iterator: if key in self and\ isinstance(self[key], dict) and isinstance(value, dict): self[key] = RecursiveDictionary(self[key]) self[key].rec_update(value) else: self[key] = value # changed version class RecursiveDictionaryWithExcludes(RecursiveDictionary): """ Same as RecursiveDictionary, but respects a list of keys that should be excluded from recursion and handled like a normal dict.update() """ def __init__(self, *args, **kwargs): self.rec_excluded_keys = kwargs.pop('rec_excluded_keys', ()) super(RecursiveDictionaryWithExcludes, self).__init__(*args, **kwargs) def iter_rec_update(self, iterator): for (key, value) in iterator: if key in self and\ isinstance(self[key], dict) and isinstance(value, dict) and\ key not in self.rec_excluded_keys: self[key] = RecursiveDictionaryWithExcludes(self[key], rec_excluded_keys=self.rec_excluded_keys) self[key].rec_update(value) else: self[key] = value
skirsdeda/django-filer
filer/utils/recursive_dictionary.py
Python
bsd-3-clause
3,401
# Orca # # Copyright 2005-2009 Sun Microsystems Inc. # Copyright 2010-2013 The Orca Team. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. """Custom script for LibreOffice.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \ "Copyright (c) 2010-2013 The Orca Team." __license__ = "LGPL" from gi.repository import Gtk import pyatspi import orca.cmdnames as cmdnames import orca.debug as debug import orca.scripts.default as default import orca.guilabels as guilabels import orca.keybindings as keybindings import orca.input_event as input_event import orca.messages as messages import orca.orca as orca import orca.orca_state as orca_state import orca.settings as settings import orca.settings_manager as settings_manager import orca.structural_navigation as structural_navigation from .braille_generator import BrailleGenerator from .formatting import Formatting from .script_utilities import Utilities from .spellcheck import SpellCheck from .speech_generator import SpeechGenerator _settingsManager = settings_manager.getManager() class Script(default.Script): def __init__(self, app): """Creates a new script for the given application. Arguments: - app: the application to create a script for. """ default.Script.__init__(self, app) self.speakSpreadsheetCoordinatesCheckButton = None self.alwaysSpeakSelectedSpreadsheetRangeCheckButton = None self.skipBlankCellsCheckButton = None self.speakCellCoordinatesCheckButton = None self.speakCellHeadersCheckButton = None self.speakCellSpanCheckButton = None # The spreadsheet input line. # self.inputLineForCell = None # Dictionaries for the calc and writer dynamic row and column headers. # self.dynamicColumnHeaders = {} self.dynamicRowHeaders = {} def getBrailleGenerator(self): """Returns the braille generator for this script. """ return BrailleGenerator(self) def getSpeechGenerator(self): """Returns the speech generator for this script. """ return SpeechGenerator(self) def getSpellCheck(self): """Returns the spellcheck for this script.""" return SpellCheck(self) def getFormatting(self): """Returns the formatting strings for this script.""" return Formatting(self) def getUtilities(self): """Returns the utilites for this script.""" return Utilities(self) def getStructuralNavigation(self): """Returns the 'structural navigation' class for this script. """ types = self.getEnabledStructuralNavigationTypes() return structural_navigation.StructuralNavigation(self, types, enabled=False) def getEnabledStructuralNavigationTypes(self): """Returns a list of the structural navigation object types enabled in this script. """ enabledTypes = [structural_navigation.StructuralNavigation.TABLE_CELL] return enabledTypes def setupInputEventHandlers(self): """Defines InputEventHandler fields for this script that can be called by the key and braille bindings. In this particular case, we just want to be able to add a handler to return the contents of the input line. """ default.Script.setupInputEventHandlers(self) self.inputEventHandlers.update( self.structuralNavigation.inputEventHandlers) self.inputEventHandlers["presentInputLineHandler"] = \ input_event.InputEventHandler( Script.presentInputLine, cmdnames.PRESENT_INPUT_LINE) self.inputEventHandlers["setDynamicColumnHeadersHandler"] = \ input_event.InputEventHandler( Script.setDynamicColumnHeaders, cmdnames.DYNAMIC_COLUMN_HEADER_SET) self.inputEventHandlers["clearDynamicColumnHeadersHandler"] = \ input_event.InputEventHandler( Script.clearDynamicColumnHeaders, cmdnames.DYNAMIC_COLUMN_HEADER_CLEAR) self.inputEventHandlers["setDynamicRowHeadersHandler"] = \ input_event.InputEventHandler( Script.setDynamicRowHeaders, cmdnames.DYNAMIC_ROW_HEADER_SET) self.inputEventHandlers["clearDynamicRowHeadersHandler"] = \ input_event.InputEventHandler( Script.clearDynamicRowHeaders, cmdnames.DYNAMIC_ROW_HEADER_CLEAR) self.inputEventHandlers["panBrailleLeftHandler"] = \ input_event.InputEventHandler( Script.panBrailleLeft, cmdnames.PAN_BRAILLE_LEFT, False) # Do not enable learn mode for this action self.inputEventHandlers["panBrailleRightHandler"] = \ input_event.InputEventHandler( Script.panBrailleRight, cmdnames.PAN_BRAILLE_RIGHT, False) # Do not enable learn mode for this action self.inputEventHandlers["whereAmISelectionHandler"] = \ input_event.InputEventHandler( Script.whereAmISelection, cmdnames.WHERE_AM_I_SELECTION) def getAppKeyBindings(self): """Returns the application-specific keybindings for this script.""" keyBindings = keybindings.KeyBindings() keyBindings.add( keybindings.KeyBinding( "a", keybindings.defaultModifierMask, keybindings.ORCA_MODIFIER_MASK, self.inputEventHandlers["presentInputLineHandler"])) keyBindings.add( keybindings.KeyBinding( "r", keybindings.defaultModifierMask, keybindings.ORCA_MODIFIER_MASK, self.inputEventHandlers["setDynamicColumnHeadersHandler"], 1)) keyBindings.add( keybindings.KeyBinding( "r", keybindings.defaultModifierMask, keybindings.ORCA_MODIFIER_MASK, self.inputEventHandlers["clearDynamicColumnHeadersHandler"], 2)) keyBindings.add( keybindings.KeyBinding( "c", keybindings.defaultModifierMask, keybindings.ORCA_MODIFIER_MASK, self.inputEventHandlers["setDynamicRowHeadersHandler"], 1)) keyBindings.add( keybindings.KeyBinding( "c", keybindings.defaultModifierMask, keybindings.ORCA_MODIFIER_MASK, self.inputEventHandlers["clearDynamicRowHeadersHandler"], 2)) bindings = self.structuralNavigation.keyBindings for keyBinding in bindings.keyBindings: keyBindings.add(keyBinding) return keyBindings def getAppPreferencesGUI(self): """Return a GtkGrid containing the application unique configuration GUI items for the current application.""" grid = Gtk.Grid() grid.set_border_width(12) label = guilabels.SPREADSHEET_SPEAK_CELL_COORDINATES value = _settingsManager.getSetting('speakSpreadsheetCoordinates') self.speakSpreadsheetCoordinatesCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.speakSpreadsheetCoordinatesCheckButton.set_active(value) grid.attach(self.speakSpreadsheetCoordinatesCheckButton, 0, 0, 1, 1) label = guilabels.SPREADSHEET_SPEAK_SELECTED_RANGE value = _settingsManager.getSetting('alwaysSpeakSelectedSpreadsheetRange') self.alwaysSpeakSelectedSpreadsheetRangeCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.alwaysSpeakSelectedSpreadsheetRangeCheckButton.set_active(value) grid.attach(self.alwaysSpeakSelectedSpreadsheetRangeCheckButton, 0, 1, 1, 1) tableFrame = Gtk.Frame() grid.attach(tableFrame, 0, 2, 1, 1) label = Gtk.Label(label="<b>%s</b>" % guilabels.TABLE_NAVIGATION) label.set_use_markup(True) tableFrame.set_label_widget(label) tableAlignment = Gtk.Alignment.new(0.5, 0.5, 1, 1) tableAlignment.set_padding(0, 0, 12, 0) tableFrame.add(tableAlignment) tableGrid = Gtk.Grid() tableAlignment.add(tableGrid) label = guilabels.TABLE_SPEAK_CELL_COORDINATES value = _settingsManager.getSetting('speakCellCoordinates') self.speakCellCoordinatesCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.speakCellCoordinatesCheckButton.set_active(value) tableGrid.attach(self.speakCellCoordinatesCheckButton, 0, 0, 1, 1) label = guilabels.TABLE_SPEAK_CELL_SPANS value = _settingsManager.getSetting('speakCellSpan') self.speakCellSpanCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.speakCellSpanCheckButton.set_active(value) tableGrid.attach(self.speakCellSpanCheckButton, 0, 1, 1, 1) label = guilabels.TABLE_ANNOUNCE_CELL_HEADER value = _settingsManager.getSetting('speakCellHeaders') self.speakCellHeadersCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.speakCellHeadersCheckButton.set_active(value) tableGrid.attach(self.speakCellHeadersCheckButton, 0, 2, 1, 1) label = guilabels.TABLE_SKIP_BLANK_CELLS value = _settingsManager.getSetting('skipBlankCells') self.skipBlankCellsCheckButton = \ Gtk.CheckButton.new_with_mnemonic(label) self.skipBlankCellsCheckButton.set_active(value) tableGrid.attach(self.skipBlankCellsCheckButton, 0, 3, 1, 1) spellcheck = self.spellcheck.getAppPreferencesGUI() grid.attach(spellcheck, 0, len(grid.get_children()), 1, 1) grid.show_all() return grid def getPreferencesFromGUI(self): """Returns a dictionary with the app-specific preferences.""" prefs = { 'speakCellSpan': self.speakCellSpanCheckButton.get_active(), 'speakCellHeaders': self.speakCellHeadersCheckButton.get_active(), 'skipBlankCells': self.skipBlankCellsCheckButton.get_active(), 'speakCellCoordinates': self.speakCellCoordinatesCheckButton.get_active(), 'speakSpreadsheetCoordinates': self.speakSpreadsheetCoordinatesCheckButton.get_active(), 'alwaysSpeakSelectedSpreadsheetRange': self.alwaysSpeakSelectedSpreadsheetRangeCheckButton.get_active(), } prefs.update(self.spellcheck.getPreferencesFromGUI()) return prefs def panBrailleLeft(self, inputEvent=None, panAmount=0): """In document content, we want to use the panning keys to browse the entire document. """ if self.flatReviewContext \ or not self.isBrailleBeginningShowing() \ or self.utilities.isSpreadSheetCell(orca_state.locusOfFocus) \ or not self.utilities.isTextArea(orca_state.locusOfFocus): return default.Script.panBrailleLeft(self, inputEvent, panAmount) text = orca_state.locusOfFocus.queryText() string, startOffset, endOffset = text.getTextAtOffset( text.caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START) if 0 < startOffset: text.setCaretOffset(startOffset-1) return True obj = self.utilities.findPreviousObject(orca_state.locusOfFocus) try: text = obj.queryText() except: pass else: orca.setLocusOfFocus(None, obj, notifyScript=False) text.setCaretOffset(text.characterCount) return True return default.Script.panBrailleLeft(self, inputEvent, panAmount) def panBrailleRight(self, inputEvent=None, panAmount=0): """In document content, we want to use the panning keys to browse the entire document. """ if self.flatReviewContext \ or not self.isBrailleEndShowing() \ or self.utilities.isSpreadSheetCell(orca_state.locusOfFocus) \ or not self.utilities.isTextArea(orca_state.locusOfFocus): return default.Script.panBrailleRight(self, inputEvent, panAmount) text = orca_state.locusOfFocus.queryText() string, startOffset, endOffset = text.getTextAtOffset( text.caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START) if endOffset < text.characterCount: text.setCaretOffset(endOffset) return True obj = self.utilities.findNextObject(orca_state.locusOfFocus) try: text = obj.queryText() except: pass else: orca.setLocusOfFocus(None, obj, notifyScript=False) text.setCaretOffset(0) return True return default.Script.panBrailleRight(self, inputEvent, panAmount) def presentInputLine(self, inputEvent): """Presents the contents of the spread sheet input line (assuming we have a handle to it - generated when we first focus on a spread sheet table cell. This will be either the contents of the table cell that has focus or the formula associated with it. Arguments: - inputEvent: if not None, the input event that caused this action. """ if not self.utilities.isSpreadSheetCell(orca_state.locusOfFocus): return inputLine = self.utilities.locateInputLine(orca_state.locusOfFocus) if not inputLine: return text = self.utilities.displayedText(inputLine) if not text: text = messages.EMPTY self.presentMessage(text) def setDynamicColumnHeaders(self, inputEvent): """Set the row for the dynamic header columns to use when speaking calc cell entries. In order to set the row, the user should first set focus to the row that they wish to define and then press Insert-r. Once the user has defined the row, it will be used to first speak this header when moving between columns. Arguments: - inputEvent: if not None, the input event that caused this action. """ cell = orca_state.locusOfFocus if cell and cell.parent.getRole() == pyatspi.ROLE_TABLE_CELL: cell = cell.parent row, column, table = self.utilities.getRowColumnAndTable(cell) if table: self.dynamicColumnHeaders[hash(table)] = row self.presentMessage(messages.DYNAMIC_COLUMN_HEADER_SET % (row+1)) return True def clearDynamicColumnHeaders(self, inputEvent): """Clear the dynamic header column. Arguments: - inputEvent: if not None, the input event that caused this action. """ cell = orca_state.locusOfFocus if cell and cell.parent.getRole() == pyatspi.ROLE_TABLE_CELL: cell = cell.parent row, column, table = self.utilities.getRowColumnAndTable(cell) try: del self.dynamicColumnHeaders[hash(table)] self.presentationInterrupt() self.presentMessage(messages.DYNAMIC_COLUMN_HEADER_CLEARED) except: pass return True def setDynamicRowHeaders(self, inputEvent): """Set the column for the dynamic header rows to use when speaking calc cell entries. In order to set the column, the user should first set focus to the column that they wish to define and then press Insert-c. Once the user has defined the column, it will be used to first speak this header when moving between rows. Arguments: - inputEvent: if not None, the input event that caused this action. """ cell = orca_state.locusOfFocus if cell and cell.parent.getRole() == pyatspi.ROLE_TABLE_CELL: cell = cell.parent row, column, table = self.utilities.getRowColumnAndTable(cell) if table: self.dynamicRowHeaders[hash(table)] = column self.presentMessage( messages.DYNAMIC_ROW_HEADER_SET % self.utilities.columnConvert(column+1)) return True def clearDynamicRowHeaders(self, inputEvent): """Clear the dynamic row headers. Arguments: - inputEvent: if not None, the input event that caused this action. """ cell = orca_state.locusOfFocus if cell and cell.parent.getRole() == pyatspi.ROLE_TABLE_CELL: cell = cell.parent row, column, table = self.utilities.getRowColumnAndTable(cell) try: del self.dynamicRowHeaders[hash(table)] self.presentationInterrupt() self.presentMessage(messages.DYNAMIC_ROW_HEADER_CLEARED) except: pass return True def locusOfFocusChanged(self, event, oldLocusOfFocus, newLocusOfFocus): """Called when the visual object with focus changes. Arguments: - event: if not None, the Event that caused the change - oldLocusOfFocus: Accessible that is the old locus of focus - newLocusOfFocus: Accessible that is the new locus of focus """ # Check to see if this is this is for the find command. See # comment #18 of bug #354463. # if self.findCommandRun and \ event.type.startswith("object:state-changed:focused"): self.findCommandRun = False self.find() return if self.flatReviewContext: self.toggleFlatReviewMode() if self.spellcheck.isSuggestionsItem(newLocusOfFocus) \ and not self.spellcheck.isSuggestionsItem(oldLocusOfFocus): orca.emitRegionChanged(newFocus) self.updateBraille(newLocusOfFocus) self.spellcheck.presentSuggestionListItem(includeLabel=True) return # TODO - JD: Sad hack that wouldn't be needed if LO were fixed. # If we are in the slide presentation scroll pane, also announce # the current page tab. See bug #538056 for more details. # rolesList = [pyatspi.ROLE_SCROLL_PANE, pyatspi.ROLE_PANEL, pyatspi.ROLE_PANEL, pyatspi.ROLE_ROOT_PANE, pyatspi.ROLE_FRAME, pyatspi.ROLE_APPLICATION] if self.utilities.hasMatchingHierarchy(newLocusOfFocus, rolesList): for child in newLocusOfFocus.parent: if child.getRole() == pyatspi.ROLE_PAGE_TAB_LIST: for tab in child: eventState = tab.getState() if eventState.contains(pyatspi.STATE_SELECTED): self.presentObject(tab) # TODO - JD: This is a hack that needs to be done better. For now it # fixes the broken echo previous word on Return. elif newLocusOfFocus and oldLocusOfFocus \ and newLocusOfFocus.getRole() == pyatspi.ROLE_PARAGRAPH \ and oldLocusOfFocus.getRole() == pyatspi.ROLE_PARAGRAPH \ and newLocusOfFocus != oldLocusOfFocus: lastKey, mods = self.utilities.lastKeyAndModifiers() if lastKey == "Return" and _settingsManager.getSetting('enableEchoByWord'): self.echoPreviousWord(oldLocusOfFocus) return # TODO - JD: And this hack is another one that needs to be done better. # But this will get us to speak the entire paragraph when navigation by # paragraph has occurred. event_string, mods = self.utilities.lastKeyAndModifiers() isControlKey = mods & keybindings.CTRL_MODIFIER_MASK isShiftKey = mods & keybindings.SHIFT_MODIFIER_MASK if event_string in ["Up", "Down"] and isControlKey and not isShiftKey: string = self.utilities.displayedText(newLocusOfFocus) if string: voice = self.speechGenerator.voice(obj=newLocusOfFocus, string=string) self.speakMessage(string, voice=voice) self.updateBraille(newLocusOfFocus) try: text = newLocusOfFocus.queryText() except: pass else: self._saveLastCursorPosition(newLocusOfFocus, text.caretOffset) return # Pass the event onto the parent class to be handled in the default way. default.Script.locusOfFocusChanged(self, event, oldLocusOfFocus, newLocusOfFocus) if not newLocusOfFocus: return cell = None if self.utilities.isTextDocumentCell(newLocusOfFocus): cell = newLocusOfFocus elif self.utilities.isTextDocumentCell(newLocusOfFocus.parent): cell = newLocusOfFocus.parent if cell: row, column = self.utilities.coordinatesForCell(cell) self.pointOfReference['lastRow'] = row self.pointOfReference['lastColumn'] = column def onNameChanged(self, event): """Called whenever a property on an object changes. Arguments: - event: the Event """ if self.spellcheck.isCheckWindow(event.source): return # Impress slide navigation. # if self.utilities.isInImpress(event.source) \ and self.utilities.isDrawingView(event.source): title, position, count = \ self.utilities.slideTitleAndPosition(event.source) if title: title += "." msg = messages.PRESENTATION_SLIDE_POSITION % \ {"position" : position, "count" : count} msg = self.utilities.appendString(title, msg) self.presentMessage(msg) default.Script.onNameChanged(self, event) def onActiveChanged(self, event): """Callback for object:state-changed:active accessibility events.""" if not event.source.parent: msg = "SOFFICE: Event source lacks parent" debug.println(debug.LEVEL_INFO, msg, True) return # Prevent this events from activating the find operation. # See comment #18 of bug #354463. if self.findCommandRun: return default.Script.onActiveChanged(self, event) def onActiveDescendantChanged(self, event): """Called when an object who manages its own descendants detects a change in one of its children. Arguments: - event: the Event """ if self.utilities.isSameObject(event.any_data, orca_state.locusOfFocus): return if event.source == self.spellcheck.getSuggestionsList(): if event.source.getState().contains(pyatspi.STATE_FOCUSED): orca.setLocusOfFocus(event, event.any_data, False) self.updateBraille(orca_state.locusOfFocus) self.spellcheck.presentSuggestionListItem() else: self.spellcheck.presentErrorDetails() return if self.utilities.isSpreadSheetCell(event.any_data) \ and not event.any_data.getState().contains(pyatspi.STATE_FOCUSED) \ and not event.source.getState().contains(pyatspi.STATE_FOCUSED) : msg = "SOFFICE: Neither source nor child have focused state. Clearing cache on table." debug.println(debug.LEVEL_INFO, msg, True) event.source.clearCache() default.Script.onActiveDescendantChanged(self, event) def onChildrenAdded(self, event): """Callback for object:children-changed:add accessibility events.""" if self.utilities.isSpreadSheetCell(event.any_data): orca.setLocusOfFocus(event, event.any_data) return if self.utilities.isLastCell(event.any_data): activeRow = self.pointOfReference.get('lastRow', -1) activeCol = self.pointOfReference.get('lastColumn', -1) if activeRow < 0 or activeCol < 0: return if self.utilities.isDead(orca_state.locusOfFocus): orca.setLocusOfFocus(event, event.source, False) self.utilities.handleUndoTextEvent(event) rowCount, colCount = self.utilities.rowAndColumnCount(event.source) if activeRow == rowCount: full = messages.TABLE_ROW_DELETED_FROM_END brief = messages.TABLE_ROW_DELETED else: full = messages.TABLE_ROW_INSERTED_AT_END brief = messages.TABLE_ROW_INSERTED self.presentMessage(full, brief) return default.Script.onChildrenAdded(self, event) def onFocus(self, event): """Callback for focus: accessibility events.""" # NOTE: This event type is deprecated and Orca should no longer use it. # This callback remains just to handle bugs in applications and toolkits # during the remainder of the unstable (3.11) development cycle. if self.utilities.isSameObject(orca_state.locusOfFocus, event.source): return if self.utilities.isFocusableLabel(event.source): orca.setLocusOfFocus(event, event.source) return role = event.source.getRole() if self.utilities.isZombie(event.source) \ or role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_LIST]: comboBox = self.utilities.containingComboBox(event.source) if comboBox: orca.setLocusOfFocus(event, comboBox, True) return # This seems to be something we inherit from Gtk+ if role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_PASSWORD_TEXT]: orca.setLocusOfFocus(event, event.source) return # Ditto. if role == pyatspi.ROLE_PUSH_BUTTON: orca.setLocusOfFocus(event, event.source) return # Ditto. if role == pyatspi.ROLE_TOGGLE_BUTTON: orca.setLocusOfFocus(event, event.source) return # Ditto. if role == pyatspi.ROLE_COMBO_BOX: orca.setLocusOfFocus(event, event.source) return # Ditto. if role == pyatspi.ROLE_PANEL and event.source.name: orca.setLocusOfFocus(event, event.source) return def onFocusedChanged(self, event): """Callback for object:state-changed:focused accessibility events.""" if self._inSayAll: return if self._lastCommandWasStructNav: return if not event.detail1: return if self.utilities.isAnInputLine(event.source): msg = "SOFFICE: Event ignored: spam from inputLine" debug.println(debug.LEVEL_INFO, msg, True) return if event.source.childCount and self.utilities.isAnInputLine(event.source[0]): msg = "SOFFICE: Event ignored: spam from inputLine parent" debug.println(debug.LEVEL_INFO, msg, True) return role = event.source.getRole() if role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_LIST]: comboBox = self.utilities.containingComboBox(event.source) if comboBox: orca.setLocusOfFocus(event, comboBox, True) return parent = event.source.parent if parent and parent.getRole() == pyatspi.ROLE_TOOL_BAR: default.Script.onFocusedChanged(self, event) return # TODO - JD: Verify this is still needed ignoreRoles = [pyatspi.ROLE_FILLER, pyatspi.ROLE_PANEL] if role in ignoreRoles: return # We will present this when the selection changes. if role == pyatspi.ROLE_MENU: return if self.utilities._flowsFromOrToSelection(event.source): return if role == pyatspi.ROLE_PARAGRAPH: obj, offset = self.pointOfReference.get("lastCursorPosition", (None, -1)) start, end, string = self.utilities.getCachedTextSelection(obj) if start != end: return keyString, mods = self.utilities.lastKeyAndModifiers() if keyString in ["Left", "Right"]: orca.setLocusOfFocus(event, event.source, False) return if self.utilities.isSpreadSheetTable(event.source) and orca_state.locusOfFocus: if self.utilities.isDead(orca_state.locusOfFocus): msg = "SOFFICE: Event believed to be post-editing focus claim. Dead locusOfFocus." debug.println(debug.LEVEL_INFO, msg, True) orca.setLocusOfFocus(event, event.source, False) return if orca_state.locusOfFocus.getRole() in [pyatspi.ROLE_PARAGRAPH, pyatspi.ROLE_TABLE_CELL]: msg = "SOFFICE: Event believed to be post-editing focus claim based on role." debug.println(debug.LEVEL_INFO, msg, True) orca.setLocusOfFocus(event, event.source, False) return default.Script.onFocusedChanged(self, event) def onCaretMoved(self, event): """Callback for object:text-caret-moved accessibility events.""" if event.detail1 == -1: return if event.source.getRole() == pyatspi.ROLE_PARAGRAPH \ and not event.source.getState().contains(pyatspi.STATE_FOCUSED): event.source.clearCache() if event.source.getState().contains(pyatspi.STATE_FOCUSED): msg = "SOFFICE: Clearing cache was needed due to missing state-changed event." debug.println(debug.LEVEL_INFO, msg, True) if self.utilities._flowsFromOrToSelection(event.source): return if self._lastCommandWasStructNav: return if self.utilities.isSpreadSheetCell(orca_state.locusOfFocus): msg = "SOFFICE: locusOfFocus %s is spreadsheet cell" % orca_state.locusOfFocus debug.println(debug.LEVEL_INFO, msg, True) if not self.utilities.isCellBeingEdited(event.source): msg = "SOFFICE: Event ignored: Source is not cell being edited." debug.println(debug.LEVEL_INFO, msg, True) return super().onCaretMoved(event) def onCheckedChanged(self, event): """Callback for object:state-changed:checked accessibility events.""" obj = event.source role = obj.getRole() parentRole = obj.parent.getRole() if not role in [pyatspi.ROLE_TOGGLE_BUTTON, pyatspi.ROLE_PUSH_BUTTON] \ or not parentRole == pyatspi.ROLE_TOOL_BAR: default.Script.onCheckedChanged(self, event) return sourceWindow = self.utilities.topLevelObject(obj) focusWindow = self.utilities.topLevelObject(orca_state.locusOfFocus) if sourceWindow != focusWindow: return # Announce when the toolbar buttons are toggled if we just toggled # them; not if we navigated to some text. weToggledIt = False if isinstance(orca_state.lastInputEvent, input_event.MouseButtonEvent): x = orca_state.lastInputEvent.x y = orca_state.lastInputEvent.y weToggledIt = obj.queryComponent().contains(x, y, 0) elif obj.getState().contains(pyatspi.STATE_FOCUSED): weToggledIt = True else: keyString, mods = self.utilities.lastKeyAndModifiers() navKeys = ["Up", "Down", "Left", "Right", "Page_Up", "Page_Down", "Home", "End", "N"] wasCommand = mods & keybindings.COMMAND_MODIFIER_MASK weToggledIt = wasCommand and keyString not in navKeys if weToggledIt: self.presentObject(obj, alreadyFocused=True, interrupt=True) def onSelectedChanged(self, event): """Callback for object:state-changed:selected accessibility events.""" full, brief = "", "" if self.utilities.isSelectedTextDeletionEvent(event): msg = "SOFFICE: Change is believed to be due to deleting selected text" debug.println(debug.LEVEL_INFO, msg, True) full = messages.SELECTION_DELETED elif self.utilities.isSelectedTextRestoredEvent(event): msg = "SOFFICE: Selection is believed to be due to restoring selected text" debug.println(debug.LEVEL_INFO, msg, True) if self.utilities.handleUndoTextEvent(event): full = messages.SELECTION_RESTORED if full or brief: self.presentMessage(full, brief) self.utilities.updateCachedTextSelection(event.source) return super().onSelectedChanged(event) def onSelectionChanged(self, event): """Callback for object:selection-changed accessibility events.""" if self.utilities.isSpreadSheetTable(event.source): if _settingsManager.getSetting('onlySpeakDisplayedText'): return if _settingsManager.getSetting('alwaysSpeakSelectedSpreadsheetRange'): self.utilities.speakSelectedCellRange(event.source) return if self.utilities.handleRowAndColumnSelectionChange(event.source): return self.utilities.handleCellSelectionChange(event.source) return if not self.utilities.isComboBoxSelectionChange(event): super().onSelectionChanged(event) return selectedChildren = self.utilities.selectedChildren(event.source) if len(selectedChildren) == 1 \ and self.utilities.containingComboBox(event.source) == \ self.utilities.containingComboBox(orca_state.locusOfFocus): orca.setLocusOfFocus(event, selectedChildren[0], True) def onTextSelectionChanged(self, event): """Callback for object:text-selection-changed accessibility events.""" if self.utilities.isComboBoxNoise(event): msg = "SOFFICE: Event is believed to be combo box noise" debug.println(debug.LEVEL_INFO, msg, True) return if self.utilities.isDead(event.source): msg = "SOFFICE: Ignoring event from dead source." debug.println(debug.LEVEL_INFO, msg, True) return if event.source != orca_state.locusOfFocus \ and event.source.getState().contains(pyatspi.STATE_FOCUSED): orca.setLocusOfFocus(event, event.source, False) super().onTextSelectionChanged(event) def getTextLineAtCaret(self, obj, offset=None, startOffset=None, endOffset=None): """To-be-removed. Returns the string, caretOffset, startOffset.""" if obj.parent.getRole() == pyatspi.ROLE_COMBO_BOX: try: text = obj.queryText() except NotImplementedError: return ["", 0, 0] if text.caretOffset < 0: [lineString, startOffset, endOffset] = text.getTextAtOffset( 0, pyatspi.TEXT_BOUNDARY_LINE_START) # Sometimes we get the trailing line-feed -- remove it # if lineString[-1:] == "\n": lineString = lineString[:-1] return [lineString, 0, startOffset] textLine = super().getTextLineAtCaret(obj, offset, startOffset, endOffset) if not obj.getState().contains(pyatspi.STATE_FOCUSED): textLine[0] = self.utilities.displayedText(obj) return textLine def onWindowActivated(self, event): """Callback for window:activate accessibility events.""" super().onWindowActivated(event) if not self.spellcheck.isCheckWindow(event.source): return if event.source[0].getRole() == pyatspi.ROLE_DIALOG: orca.setLocusOfFocus(event, event.source[0], False) self.spellcheck.presentErrorDetails() def onWindowDeactivated(self, event): """Callback for window:deactivate accessibility events.""" self._lastCommandWasStructNav = False super().onWindowDeactivated(event) self.spellcheck.deactivate() def whereAmISelection(self, inputEvent=None, obj=None): obj = obj or orca_state.locusOfFocus if not self.utilities.isSpreadSheetCell(obj): if self.utilities.inDocumentContent(obj): # Because for some reason, the document implements the selection # interface as if it were a spreadsheet or listbox. *sigh* return super()._whereAmISelectedText(inputEvent, obj) return super().whereAmISelection(inputEvent, obj) return self.utilities.speakSelectedCellRange(self.utilities.getTable(obj))
GNOME/orca
src/orca/scripts/apps/soffice/script.py
Python
lgpl-2.1
37,876
# coding: utf-8 # # python-enzymegraph # A Python package for generating models of enzymes under the quasi-steady-state assumption (QSSA). # # ## Requirements # Currently tested with only Python 3.4. # # Requires [sympy](https://github.com/sympy/sympy). # # ## Installation # pip install git+git://github.com/robjstan/python-enzymegraph.git # # ## References # Gunawardena, J. # A linear framework for time-scale separation in nonlinear biochemical systems. # PLoS ONE (2012) # # Gabow, H. N. & Myers, E. W. # Finding all spanning trees of directed and undirected graphs. # SIAM Journal on Computing (1978) # # ## Example # Biochemical example is the Michaelis-Menten function with product inhibition. # $$E + S \underset{k_2}{\overset{k_1}{\rightleftharpoons}} # ES \overset{k_\text{cat}}{\rightleftharpoons} # EP \underset{k_4}{\overset{k_3}{\rightleftharpoons}} # E + P$$ # ### Python setup # In[1]: from enzymegraph import * # In[2]: from sympy import * from numpy import linspace from scipy.integrate import odeint import matplotlib.pyplot as plt get_ipython().magic('matplotlib inline') # ### Symbolic setup # # First we need to initialise the varaibles that describe the biochemical species and complexes. # # e = free enzyme # s = substrate # p = product # es = enzyme bound to substrate # ep = enzyme bound to product # In[3]: e, s, p, es, ep = symbols('e s p es ep', positive = True) # Then we need to describe the edges between these species/complexes, labelled with their rates, including the uptake of substrate and product. # In[4]: k1, k2, k3, k4, kcat = symbols('k_1 k_2 k_3, k_4 k_cat', positive = True) edges = {(e, es): k1*s, (es, e): k2, (e, ep): k3*p, (ep, e): k4, (es,ep): kcat, } #### Creating the graph object # #### *enzymegraph.enzymegraph(...)* # # The class constructor *enzymegraph.enzymegraph(...)* will take as its first argument either: # * a dictionary with keys=edges and values=edge labels # * or, a list of edges (edge labels assumed to be equal to 1) # In[5]: graph = enzymegraph(edges) graph # In[6]: edges = list(edges.keys()) enzymegraph(edges) # #### *enzymegraph.from_matrix(...)* # The class constructor *enzymegraph.from_matrix(...)* will take as its first argument a matrix of edges, with an optional second argument giving vertex names. # In[7]: m = [[ 0,k1*s, k3*p], [k2, 0, kcat], [k4, 0, 0]] v = [e, es, ep] enzymegraph.from_matrix(m, v).edges #### ODE outputs # #### *graph.ode_model()* # The class method *enzymegraph.ode_model()* returns the equivalent mass-action (ODE) model of the system. # In[8]: for var, ode in graph.ode_model().items(): print("d%s/dt = %s" % (var, ode)) # #### *graph.ode_function()* # The method *enzymegraph.ode_function()* returns the equivalent mass-action (ODE) model of the system as a python function (suitable for simulation using *scipy.odeint(...)*. # # This requires three extra parameters to be given: # * param_dict, a dictionary giving specific values for the system parameters # * variables, a list of the model variables in a desired order # * extra_odes, a dictionary of additional ODEs that complete the system (e.g. equations that describe the production of substrate and product) # In[9]: param_dict = {k1:1, k2:0.1, k3:2, k4:0.5, kcat:1} variables = [e, s, p, es, ep] extra_odes = {s: k2*es - k1*e*s, p: k4*ep - k3*e*p} ode_func = graph.ode_function(param_dict, variables, extra_odes) ode_func # In[10]: concs = zip(*odeint(ode_func, [1,1,0,0,0], linspace(0,10,100))) for conc, var in zip(concs, variables): plt.plot(conc, label=var) plt.xlabel("time") plt.ylabel("concentration") plt.legend() plt.show() #### Spanning trees # #### *graph.spanning_trees()* # The method *enzymegraph.spanning_trees()* enumerates all the possible spanning trees of the graph, returning these as *enzymegraph* objects. # In[11]: for span_tree in graph.spanning_trees(): print(span_tree.edges) #### TikZ output # #### *graph.graph_as_tikz(...)* # # The method *enzymegraph.graph_as_tikz(...)* outputs the graph as TikZ commands, suitable for inclusion in a LaTeX document. # # This requires extra parameters to be given: # * vertex_pos, a dictionary giving Tikz position arguments for each of the complexes # * edge_styles (optional), a dictionary giving a Tikz line argument for any of the edges # * relabel (optional), whether to relabel the output TikZ nodes (for instance if their names are otherwise complicated (and so may cause TikZ errors) # * vertex_text_formatter (optional), a lambda that takes the symbolic label of the vertex and returns a formatted string. # * edge_text_formatter (optional), a lambda that takes the symbolic label of the edge and returns a formatted string. # In[12]: vertex_pos = {e: "(0,0)", es: "(240:2)", ep: "(300:2)",} edge_styles = {(e, es): "to[out=210,in=90]", (e, ep): "to[out=330,in=90]",} print(graph.graph_as_tikz(vertex_pos, edge_styles, relabel=False)) # #### *graph.spanning_trees_as_tikz(...)* (beta) # The method *enzymegraph.spanning_trees_as_tikz(...)* outputs all the spanning trees of the system as TikZ commands, suitable for inclusion in a LaTeX document. # # This requires extra parameters, as described under *enzymegraph.graph_as_tikz(...)*. # In[13]: print(graph.spanning_trees_as_tikz(vertex_pos, edge_styles, relabel=False)) #### Basis element # #### *graph.basis_element()* # The method *enzymegraph.basis_element()* outputs a dictionary of the basis element of the graph. # In[14]: for var, basis_elem in graph.basis_element().items(): print("%s = %s" % (var, basis_elem)) # #### *graph.qssa_replacements(...)* # The method *enzymegraph.qssa_replacements(...)* outputs each the quasi-steady-state solution for each intermediate, given in terms of the total enzyme (the parameter for which should be given as the first term). # In[15]: et = symbols('e_t', positive = True) qssa_reps = graph.qssa_replacements(et) for var, rep in qssa_reps.items(): print("%s = %s" % (var, rep)) # These replacements can be used to substitute into the original ODE system. # # e.g. $ \frac{dp}{dt} = k_4 ep - k_3 p e $ # In[16]: dpdt = k4*ep - k3*p*e print("dp/dt = %s" % dpdt.subs(qssa_reps)) # In[16]:
robjstan/python-enzymegraph
example/enzymegraph-example.py
Python
mit
6,370
#!/usr/bin/env python # # Copyright 2008,2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from gnuradio import gr, gr_unittest import sys import random primes = (2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53, 59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131, 137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223, 227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311) class test_fft(gr_unittest.TestCase): def setUp(self): pass def tearDown(self): self.tb = None def assert_fft_ok2(self, expected_result, result_data): expected_result = expected_result[:len(result_data)] self.assertComplexTuplesAlmostEqual2 (expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4) def assert_fft_float_ok2(self, expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4): expected_result = expected_result[:len(result_data)] self.assertFloatTuplesAlmostEqual2 (expected_result, result_data, abs_eps, rel_eps) def test_001(self): tb = gr.top_block() fft_size = 32 src_data = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)]) expected_result = ((4377+4516j), (-1706.1268310546875+1638.4256591796875j), (-915.2083740234375+660.69427490234375j), (-660.370361328125+381.59600830078125j), (-499.96044921875+238.41630554199219j), (-462.26748657226562+152.88948059082031j), (-377.98440551757812+77.5928955078125j), (-346.85821533203125+47.152004241943359j), (-295+20j), (-286.33609008789062-22.257017135620117j), (-271.52999877929688-33.081821441650391j), (-224.6358642578125-67.019538879394531j), (-244.24473571777344-91.524826049804688j), (-203.09068298339844-108.54627227783203j), (-198.45195007324219-115.90768432617188j), (-182.97744750976562-128.12318420410156j), (-167-180j), (-130.33688354492188-173.83778381347656j), (-141.19784545898438-190.28807067871094j), (-111.09677124023438-214.48896789550781j), (-70.039543151855469-242.41630554199219j), (-68.960540771484375-228.30015563964844j), (-53.049201965332031-291.47097778320312j), (-28.695289611816406-317.64553833007812j), (57-300j), (45.301143646240234-335.69509887695312j), (91.936195373535156-373.32437133789062j), (172.09465026855469-439.275146484375j), (242.24473571777344-504.47515869140625j), (387.81732177734375-666.6788330078125j), (689.48553466796875-918.2142333984375j), (1646.539306640625-1694.1956787109375j)) src = gr.vector_source_c(src_data) s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size) fft = gr.fft_vcc(fft_size, True, [], False) v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size) dst = gr.vector_sink_c() tb.connect(src, s2v, fft, v2s, dst) tb.run() result_data = dst.data() #print 'expected:', expected_result #print 'results: ', result_data #self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5) self.assert_fft_ok2(expected_result, result_data) def test_002(self): tb = gr.top_block() fft_size = 32 tmp_data = ((4377+4516j), (-1706.1268310546875+1638.4256591796875j), (-915.2083740234375+660.69427490234375j), (-660.370361328125+381.59600830078125j), (-499.96044921875+238.41630554199219j), (-462.26748657226562+152.88948059082031j), (-377.98440551757812+77.5928955078125j), (-346.85821533203125+47.152004241943359j), (-295+20j), (-286.33609008789062-22.257017135620117j), (-271.52999877929688-33.081821441650391j), (-224.6358642578125-67.019538879394531j), (-244.24473571777344-91.524826049804688j), (-203.09068298339844-108.54627227783203j), (-198.45195007324219-115.90768432617188j), (-182.97744750976562-128.12318420410156j), (-167-180j), (-130.33688354492188-173.83778381347656j), (-141.19784545898438-190.28807067871094j), (-111.09677124023438-214.48896789550781j), (-70.039543151855469-242.41630554199219j), (-68.960540771484375-228.30015563964844j), (-53.049201965332031-291.47097778320312j), (-28.695289611816406-317.64553833007812j), (57-300j), (45.301143646240234-335.69509887695312j), (91.936195373535156-373.32437133789062j), (172.09465026855469-439.275146484375j), (242.24473571777344-504.47515869140625j), (387.81732177734375-666.6788330078125j), (689.48553466796875-918.2142333984375j), (1646.539306640625-1694.1956787109375j)) src_data = tuple([x/fft_size for x in tmp_data]) expected_result = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)]) src = gr.vector_source_c(src_data) s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size) fft = gr.fft_vcc(fft_size, False, [], False) v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size) dst = gr.vector_sink_c() tb.connect(src, s2v, fft, v2s, dst) tb.run() result_data = dst.data() #print 'expected:', expected_result #print 'results: ', result_data #self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5) self.assert_fft_ok2(expected_result, result_data) def test_003(self): # Same test as above, only use 2 threads tb = gr.top_block() fft_size = 32 tmp_data = ((4377+4516j), (-1706.1268310546875+1638.4256591796875j), (-915.2083740234375+660.69427490234375j), (-660.370361328125+381.59600830078125j), (-499.96044921875+238.41630554199219j), (-462.26748657226562+152.88948059082031j), (-377.98440551757812+77.5928955078125j), (-346.85821533203125+47.152004241943359j), (-295+20j), (-286.33609008789062-22.257017135620117j), (-271.52999877929688-33.081821441650391j), (-224.6358642578125-67.019538879394531j), (-244.24473571777344-91.524826049804688j), (-203.09068298339844-108.54627227783203j), (-198.45195007324219-115.90768432617188j), (-182.97744750976562-128.12318420410156j), (-167-180j), (-130.33688354492188-173.83778381347656j), (-141.19784545898438-190.28807067871094j), (-111.09677124023438-214.48896789550781j), (-70.039543151855469-242.41630554199219j), (-68.960540771484375-228.30015563964844j), (-53.049201965332031-291.47097778320312j), (-28.695289611816406-317.64553833007812j), (57-300j), (45.301143646240234-335.69509887695312j), (91.936195373535156-373.32437133789062j), (172.09465026855469-439.275146484375j), (242.24473571777344-504.47515869140625j), (387.81732177734375-666.6788330078125j), (689.48553466796875-918.2142333984375j), (1646.539306640625-1694.1956787109375j)) src_data = tuple([x/fft_size for x in tmp_data]) expected_result = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)]) nthreads = 2 src = gr.vector_source_c(src_data) s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size) fft = gr.fft_vcc(fft_size, False, [], False, nthreads) v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size) dst = gr.vector_sink_c() tb.connect(src, s2v, fft, v2s, dst) tb.run() result_data = dst.data() self.assert_fft_ok2(expected_result, result_data) if __name__ == '__main__': gr_unittest.run(test_fft, "test_fft.xml")
RedhawkSDR/integration-gnuhawk
qa/tests/qa_fft.py
Python
gpl-3.0
10,015
# $Id$ import os from itcc.Molecule import atom, molecule __revision__ = '$Rev$' def hatree2kcalpermol(hatree): return 627.51 * float(hatree) def kcalpermol2hatree(kcalpermol): return float(kcalpermol) / 627.51 def out2arch(ifname): TAILLINENUMBER = 100 ifile = os.popen('tail -n %i %s' % (TAILLINENUMBER, ifname)) state = False lines = '' for line in ifile: if not state: if line.startswith(' 1\\1\\'): state = True lines += line[1:-1] if lines.endswith('\\\\@'): break else: lines += line[1:-1] if lines.endswith('\\\\@'): break ifile.close() lines = lines.split('\\') return lines def out2ene(ifname): lines = out2arch(ifname) for x in lines: if x.startswith('HF='): x = x[3:] return [float(y) for y in x.split(',')] return None def findstrline(outfname, keystr): result = [] cmdline = 'grep -n -- "%s" %s' % (keystr, outfname) ifile = os.popen(cmdline) for line in ifile: result.append(int(line[:line.index(':')])) ifile.close() return result def out2mol(outfname): result = [] line1 = findstrline(outfname, "Optimized Parameters") line2 = findstrline(outfname, "Enter /home/user/g../l202\.exe") line3 = findstrline(outfname, "---------------------------------------------------------------------") i2 = 0 i3 = 0 for i1, x in enumerate(line1): mol = molecule.Molecule() while line2[i2] < x: i2 += 1 y = line2[i2-1] while line3[i3] < y: i3 += 1 z1 = line3[i3+1] z2 = line3[i3+2] cmdline = 'head -n %i %s | tail -n %i' % \ (z2-1, outfname, z2-z1-1) ifile = os.popen(cmdline) for line in ifile: words = line.split() atm = atom.Atom(int(words[1])) atm.x = float(words[3]) atm.y = float(words[4]) atm.z = float(words[5]) mol.atoms.append(atm) ifile.close() result.append(mol) return result
lidaobing/itcc
itcc/torsionfit/gaussian.py
Python
gpl-3.0
2,223
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import unicode_literals #=============================================================================== # # Dependencies # #------------------------------------------------------------------------------- from layman.utils import path from layman.overlays.source import OverlaySource #=============================================================================== # # Class StubOverlay # #------------------------------------------------------------------------------- class StubOverlay(OverlaySource): ''' Handles overlays with missing modules. ''' type = 'N/A' type_key = 'n/a' def __init__(self, parent, config, _location, ignore = 0): super(StubOverlay, self).__init__(parent, config, _location, ignore) self.branch = self.parent.branch self.info = {'name': self.parent.name, 'type': self.parent.ovl_type} self.missing_msg = 'Overlay "%(name)s" is missing "%(type)s" module!'\ % self.info self.hint = 'Did you install layman with "%(type)s" support?'\ % self.info def add(self, base): '''Add overlay.''' self.output.error(self.missing_msg) self.output.warn(self.hint) return True def update(self, base, src): ''' Updates overlay src-url. ''' self.output.error(self.missing_msg) self.output.warn(self.hint) return True def sync(self, base): '''Sync overlay.''' self.output.error(self.missing_msg) self.output.warn(self.hint) return True def supported(self): '''Overlay type supported?''' return False
gentoo/layman
layman/overlays/modules/stub/stub.py
Python
gpl-2.0
1,731
from __future__ import unicode_literals from django.db import models # Create your models here. class book1(models.Model): value1 = models.IntegerField() value2 = models.IntegerField() value3 = models.IntegerField() value4 = models.IntegerField() value5 = models.IntegerField() value6 = models.IntegerField() value7 = models.IntegerField() value8 = models.IntegerField() value9 = models.IntegerField() value10 = models.IntegerField() table_type = models.IntegerField() class book2(models.Model): value1 = models.DecimalField(max_digits=7, decimal_places=7) value2 = models.DecimalField(max_digits=7, decimal_places=7) value3 = models.DecimalField(max_digits=7, decimal_places=7) value4 = models.DecimalField(max_digits=7, decimal_places=7) value5 = models.DecimalField(max_digits=7, decimal_places=7) value6 = models.DecimalField(max_digits=7, decimal_places=7) value7 = models.DecimalField(max_digits=7, decimal_places=7) value8 =models.DecimalField(max_digits=7, decimal_places=7) value9 = models.DecimalField(max_digits=7, decimal_places=7) value10 = models.DecimalField(max_digits=7, decimal_places=7) table_type = models.IntegerField() class book3(models.Model): value1 = models.DecimalField(max_digits=7, decimal_places=7) value2 = models.DecimalField(max_digits=7, decimal_places=7) value3 = models.DecimalField(max_digits=7, decimal_places=7) value4 = models.DecimalField(max_digits=7, decimal_places=7) value5 = models.DecimalField(max_digits=7, decimal_places=7) value6 = models.DecimalField(max_digits=7, decimal_places=7) value7 = models.DecimalField(max_digits=7, decimal_places=7) value8 =models.DecimalField(max_digits=7, decimal_places=7) value9 = models.DecimalField(max_digits=7, decimal_places=7) value10 = models.DecimalField(max_digits=7, decimal_places=7) table_type = models.IntegerField() class book4(models.Model): value1 = models.DecimalField(max_digits=7, decimal_places=7) value2 = models.DecimalField(max_digits=7, decimal_places=7) value3 = models.DecimalField(max_digits=7, decimal_places=7) value4 = models.DecimalField(max_digits=7, decimal_places=7) value5 = models.DecimalField(max_digits=7, decimal_places=7) value6 = models.DecimalField(max_digits=7, decimal_places=7) value7 = models.DecimalField(max_digits=7, decimal_places=7) value8 =models.DecimalField(max_digits=7, decimal_places=7) value9 = models.DecimalField(max_digits=7, decimal_places=7) value10 = models.DecimalField(max_digits=7, decimal_places=7) table_type = models.IntegerField() class book5(models.Model): value1 = models.DecimalField(max_digits=7, decimal_places=7) value2 = models.DecimalField(max_digits=7, decimal_places=7) value3 = models.DecimalField(max_digits=7, decimal_places=7) value4 = models.DecimalField(max_digits=7, decimal_places=7) value5 = models.DecimalField(max_digits=7, decimal_places=7) value6 = models.DecimalField(max_digits=7, decimal_places=7) value7 = models.DecimalField(max_digits=7, decimal_places=7) value8 =models.DecimalField(max_digits=7, decimal_places=7) value9 = models.DecimalField(max_digits=7, decimal_places=7) value10 = models.DecimalField(max_digits=7, decimal_places=7) table_type = models.IntegerField() class book6(models.Model): value1 = models.DecimalField(max_digits=7, decimal_places=7) value2 = models.DecimalField(max_digits=7, decimal_places=7) value3 = models.DecimalField(max_digits=7, decimal_places=7) value4 = models.DecimalField(max_digits=7, decimal_places=7) value5 = models.DecimalField(max_digits=7, decimal_places=7) value6 = models.DecimalField(max_digits=7, decimal_places=7) value7 = models.DecimalField(max_digits=7, decimal_places=7) value8 =models.DecimalField(max_digits=7, decimal_places=7) value9 = models.DecimalField(max_digits=7, decimal_places=7) value10 = models.DecimalField(max_digits=7, decimal_places=7) table_type = models.IntegerField()
dilwaria/financialgraph
graphs/models.py
Python
mit
4,109
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os import socket import math import threading import heapq from botocore.compat import six from botocore.exceptions import IncompleteReadError from botocore.vendored.requests.packages.urllib3.exceptions import \ ReadTimeoutError from s3transfer.compat import SOCKET_ERROR from s3transfer.compat import seekable from s3transfer.exceptions import RetriesExceededError from s3transfer.futures import IN_MEMORY_DOWNLOAD_TAG from s3transfer.utils import random_file_extension from s3transfer.utils import get_callbacks from s3transfer.utils import invoke_progress_callbacks from s3transfer.utils import calculate_range_parameter from s3transfer.utils import FunctionContainer from s3transfer.utils import CountCallbackInvoker from s3transfer.utils import StreamReaderProgress from s3transfer.utils import DeferredOpenFile from s3transfer.tasks import Task from s3transfer.tasks import SubmissionTask logger = logging.getLogger(__name__) S3_RETRYABLE_ERRORS = ( socket.timeout, SOCKET_ERROR, ReadTimeoutError, IncompleteReadError ) class DownloadOutputManager(object): """Base manager class for handling various types of files for downloads This class is typically used for the DownloadSubmissionTask class to help determine the following: * Provides the fileobj to write to downloads to * Get a task to complete once everything downloaded has been written The answers/implementations differ for the various types of file outputs that may be accepted. All implementations must subclass and override public methods from this class. """ def __init__(self, osutil, transfer_coordinator, io_executor): self._osutil = osutil self._transfer_coordinator = transfer_coordinator self._io_executor = io_executor @classmethod def is_compatible(cls, download_target, osutil): """Determines if the target for the download is compatible with manager :param download_target: The target for which the upload will write data to. :param osutil: The os utility to be used for the transfer :returns: True if the manager can handle the type of target specified otherwise returns False. """ raise NotImplementedError('must implement is_compatible()') def get_download_task_tag(self): """Get the tag (if any) to associate all GetObjectTasks :rtype: s3transfer.futures.TaskTag :returns: The tag to associate all GetObjectTasks with """ return None def get_fileobj_for_io_writes(self, transfer_future): """Get file-like object to use for io writes in the io executor :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The future associated with upload request returns: A file-like object to write to """ raise NotImplementedError('must implement get_fileobj_for_io_writes()') def queue_file_io_task(self, fileobj, data, offset): """Queue IO write for submission to the IO executor. This method accepts an IO executor and information about the downloaded data, and handles submitting this to the IO executor. This method may defer submission to the IO executor if necessary. """ self._transfer_coordinator.submit( self._io_executor, self.get_io_write_task(fileobj, data, offset) ) def get_io_write_task(self, fileobj, data, offset): """Get an IO write task for the requested set of data This task can be ran immediately or be submitted to the IO executor for it to run. :type fileobj: file-like object :param fileobj: The file-like object to write to :type data: bytes :param data: The data to write out :type offset: integer :param offset: The offset to write the data to in the file-like object :returns: An IO task to be used to write data to a file-like object """ return IOWriteTask( self._transfer_coordinator, main_kwargs={ 'fileobj': fileobj, 'data': data, 'offset': offset, } ) def get_final_io_task(self): """Get the final io task to complete the download This is needed because based on the architecture of the TransferManager the final tasks will be sent to the IO executor, but the executor needs a final task for it to signal that the transfer is done and all done callbacks can be run. :rtype: s3transfer.tasks.Task :returns: A final task to completed in the io executor """ raise NotImplementedError( 'must implement get_final_io_task()') def _get_fileobj_from_filename(self, filename): f = DeferredOpenFile( filename, mode='wb', open_function=self._osutil.open) # Make sure the file gets closed and we remove the temporary file # if anything goes wrong during the process. self._transfer_coordinator.add_failure_cleanup(f.close) return f class DownloadFilenameOutputManager(DownloadOutputManager): def __init__(self, osutil, transfer_coordinator, io_executor): super(DownloadFilenameOutputManager, self).__init__( osutil, transfer_coordinator, io_executor) self._final_filename = None self._temp_filename = None self._temp_fileobj = None @classmethod def is_compatible(cls, download_target, osutil): return isinstance(download_target, six.string_types) def get_fileobj_for_io_writes(self, transfer_future): fileobj = transfer_future.meta.call_args.fileobj self._final_filename = fileobj self._temp_filename = fileobj + os.extsep + random_file_extension() self._temp_fileobj = self._get_temp_fileobj() return self._temp_fileobj def get_final_io_task(self): # A task to rename the file from the temporary file to its final # location is needed. This should be the last task needed to complete # the download. return IORenameFileTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'fileobj': self._temp_fileobj, 'final_filename': self._final_filename, 'osutil': self._osutil }, is_final=True ) def _get_temp_fileobj(self): f = self._get_fileobj_from_filename(self._temp_filename) self._transfer_coordinator.add_failure_cleanup( self._osutil.remove_file, self._temp_filename) return f class DownloadSeekableOutputManager(DownloadOutputManager): @classmethod def is_compatible(cls, download_target, osutil): return seekable(download_target) def get_fileobj_for_io_writes(self, transfer_future): # Return the fileobj provided to the future. return transfer_future.meta.call_args.fileobj def get_final_io_task(self): # This task will serve the purpose of signaling when all of the io # writes have finished so done callbacks can be called. return CompleteDownloadNOOPTask( transfer_coordinator=self._transfer_coordinator) class DownloadNonSeekableOutputManager(DownloadOutputManager): def __init__(self, osutil, transfer_coordinator, io_executor, defer_queue=None): super(DownloadNonSeekableOutputManager, self).__init__( osutil, transfer_coordinator, io_executor) if defer_queue is None: defer_queue = DeferQueue() self._defer_queue = defer_queue self._io_submit_lock = threading.Lock() @classmethod def is_compatible(cls, download_target, osutil): return hasattr(download_target, 'write') def get_download_task_tag(self): return IN_MEMORY_DOWNLOAD_TAG def get_fileobj_for_io_writes(self, transfer_future): return transfer_future.meta.call_args.fileobj def get_final_io_task(self): return CompleteDownloadNOOPTask( transfer_coordinator=self._transfer_coordinator) def queue_file_io_task(self, fileobj, data, offset): with self._io_submit_lock: writes = self._defer_queue.request_writes(offset, data) for write in writes: data = write['data'] logger.debug("Queueing IO offset %s for fileobj: %s", write['offset'], fileobj) super( DownloadNonSeekableOutputManager, self).queue_file_io_task( fileobj, data, offset) def get_io_write_task(self, fileobj, data, offset): return IOStreamingWriteTask( self._transfer_coordinator, main_kwargs={ 'fileobj': fileobj, 'data': data, } ) class DownloadSpecialFilenameOutputManager(DownloadNonSeekableOutputManager): def __init__(self, osutil, transfer_coordinator, io_executor, defer_queue=None): super(DownloadSpecialFilenameOutputManager, self).__init__( osutil, transfer_coordinator, io_executor, defer_queue) self._fileobj = None @classmethod def is_compatible(cls, download_target, osutil): return isinstance(download_target, six.string_types) and \ osutil.is_special_file(download_target) def get_fileobj_for_io_writes(self, transfer_future): filename = transfer_future.meta.call_args.fileobj self._fileobj = self._get_fileobj_from_filename(filename) return self._fileobj def get_final_io_task(self): # Make sure the file gets closed once the transfer is done. return IOCloseTask( transfer_coordinator=self._transfer_coordinator, is_final=True, main_kwargs={'fileobj': self._fileobj}) class DownloadSubmissionTask(SubmissionTask): """Task for submitting tasks to execute a download""" def _get_download_output_manager_cls(self, transfer_future, osutil): """Retrieves a class for managing output for a download :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future for the request :type osutil: s3transfer.utils.OSUtils :param osutil: The os utility associated to the transfer :rtype: class of DownloadOutputManager :returns: The appropriate class to use for managing a specific type of input for downloads. """ download_manager_resolver_chain = [ DownloadSpecialFilenameOutputManager, DownloadFilenameOutputManager, DownloadSeekableOutputManager, DownloadNonSeekableOutputManager, ] fileobj = transfer_future.meta.call_args.fileobj for download_manager_cls in download_manager_resolver_chain: if download_manager_cls.is_compatible(fileobj, osutil): return download_manager_cls raise RuntimeError( 'Output %s of type: %s is not supported.' % ( fileobj, type(fileobj))) def _submit(self, client, config, osutil, request_executor, io_executor, transfer_future, bandwidth_limiter=None): """ :param client: The client associated with the transfer manager :type config: s3transfer.manager.TransferConfig :param config: The transfer config associated with the transfer manager :type osutil: s3transfer.utils.OSUtil :param osutil: The os utility associated to the transfer manager :type request_executor: s3transfer.futures.BoundedExecutor :param request_executor: The request executor associated with the transfer manager :type io_executor: s3transfer.futures.BoundedExecutor :param io_executor: The io executor associated with the transfer manager :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for :type bandwidth_limiter: s3transfer.bandwidth.BandwidthLimiter :param bandwidth_limiter: The bandwidth limiter to use when downloading streams """ if transfer_future.meta.size is None: # If a size was not provided figure out the size for the # user. response = client.head_object( Bucket=transfer_future.meta.call_args.bucket, Key=transfer_future.meta.call_args.key, **transfer_future.meta.call_args.extra_args ) transfer_future.meta.provide_transfer_size( response['ContentLength']) download_output_manager = self._get_download_output_manager_cls( transfer_future, osutil)(osutil, self._transfer_coordinator, io_executor) # If it is greater than threshold do a ranged download, otherwise # do a regular GetObject download. if transfer_future.meta.size < config.multipart_threshold: self._submit_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter) else: self._submit_ranged_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter) def _submit_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter): call_args = transfer_future.meta.call_args # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Get the needed callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Get the final io task to run once the download is complete. final_task = download_output_manager.get_final_io_task() # Submit the task to download the object. self._transfer_coordinator.submit( request_executor, ImmediatelyWriteIOGetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': call_args.extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, 'bandwidth_limiter': bandwidth_limiter }, done_callbacks=[final_task] ), tag=get_object_tag ) def _submit_ranged_download_request(self, client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter): call_args = transfer_future.meta.call_args # Get the needed progress callbacks for the task progress_callbacks = get_callbacks(transfer_future, 'progress') # Get a handle to the file that will be used for writing downloaded # contents fileobj = download_output_manager.get_fileobj_for_io_writes( transfer_future) # Determine the number of parts part_size = config.multipart_chunksize num_parts = int( math.ceil(transfer_future.meta.size / float(part_size))) # Get any associated tags for the get object task. get_object_tag = download_output_manager.get_download_task_tag() # Callback invoker to submit the final io task once all downloads # are complete. finalize_download_invoker = CountCallbackInvoker( self._get_final_io_task_submission_callback( download_output_manager, io_executor ) ) for i in range(num_parts): # Calculate the range parameter range_parameter = calculate_range_parameter( part_size, i, num_parts) # Inject the Range parameter to the parameters to be passed in # as extra args extra_args = {'Range': range_parameter} extra_args.update(call_args.extra_args) finalize_download_invoker.increment() # Submit the ranged downloads self._transfer_coordinator.submit( request_executor, GetObjectTask( transfer_coordinator=self._transfer_coordinator, main_kwargs={ 'client': client, 'bucket': call_args.bucket, 'key': call_args.key, 'fileobj': fileobj, 'extra_args': extra_args, 'callbacks': progress_callbacks, 'max_attempts': config.num_download_attempts, 'start_index': i * part_size, 'download_output_manager': download_output_manager, 'io_chunksize': config.io_chunksize, 'bandwidth_limiter': bandwidth_limiter }, done_callbacks=[finalize_download_invoker.decrement] ), tag=get_object_tag ) finalize_download_invoker.finalize() def _get_final_io_task_submission_callback(self, download_manager, io_executor): final_task = download_manager.get_final_io_task() return FunctionContainer( self._transfer_coordinator.submit, io_executor, final_task) def _calculate_range_param(self, part_size, part_index, num_parts): # Used to calculate the Range parameter start_range = part_index * part_size if part_index == num_parts - 1: end_range = '' else: end_range = start_range + part_size - 1 range_param = 'bytes=%s-%s' % (start_range, end_range) return range_param class GetObjectTask(Task): def _main(self, client, bucket, key, fileobj, extra_args, callbacks, max_attempts, download_output_manager, io_chunksize, start_index=0, bandwidth_limiter=None): """Downloads an object and places content into io queue :param client: The client to use when calling GetObject :param bucket: The bucket to download from :param key: The key to download from :param fileobj: The file handle to write content to :param exta_args: Any extra arguements to include in GetObject request :param callbacks: List of progress callbacks to invoke on download :param max_attempts: The number of retries to do when downloading :param download_output_manager: The download output manager associated with the current download. :param io_chunksize: The size of each io chunk to read from the download stream and queue in the io queue. :param start_index: The location in the file to start writing the content of the key to. :param bandwidth_limiter: The bandwidth limiter to use when throttling the downloading of data in streams. """ last_exception = None for i in range(max_attempts): try: response = client.get_object( Bucket=bucket, Key=key, **extra_args) streaming_body = StreamReaderProgress( response['Body'], callbacks) if bandwidth_limiter: streaming_body = \ bandwidth_limiter.get_bandwith_limited_stream( streaming_body, self._transfer_coordinator) current_index = start_index chunks = DownloadChunkIterator(streaming_body, io_chunksize) for chunk in chunks: # If the transfer is done because of a cancellation # or error somewhere else, stop trying to submit more # data to be written and break out of the download. if not self._transfer_coordinator.done(): self._handle_io( download_output_manager, fileobj, chunk, current_index ) current_index += len(chunk) else: return return except S3_RETRYABLE_ERRORS as e: logger.debug("Retrying exception caught (%s), " "retrying request, (attempt %s / %s)", e, i, max_attempts, exc_info=True) last_exception = e # Also invoke the progress callbacks to indicate that we # are trying to download the stream again and all progress # for this GetObject has been lost. invoke_progress_callbacks( callbacks, start_index - current_index) continue raise RetriesExceededError(last_exception) def _handle_io(self, download_output_manager, fileobj, chunk, index): download_output_manager.queue_file_io_task(fileobj, chunk, index) class ImmediatelyWriteIOGetObjectTask(GetObjectTask): """GetObjectTask that immediately writes to the provided file object This is useful for downloads where it is known only one thread is downloading the object so there is no reason to go through the overhead of using an IO queue and executor. """ def _handle_io(self, download_output_manager, fileobj, chunk, index): task = download_output_manager.get_io_write_task(fileobj, chunk, index) task() class IOWriteTask(Task): def _main(self, fileobj, data, offset): """Pulls off an io queue to write contents to a file :param f: The file handle to write content to :param data: The data to write :param offset: The offset to write the data to. """ fileobj.seek(offset) fileobj.write(data) class IOStreamingWriteTask(Task): """Task for writing data to a non-seekable stream.""" def _main(self, fileobj, data): """Write data to a fileobj. Data will be written directly to the fileboj without any prior seeking. :param fileobj: The fileobj to write content to :param data: The data to write """ fileobj.write(data) class IORenameFileTask(Task): """A task to rename a temporary file to its final filename :param f: The file handle that content was written to. :param final_filename: The final name of the file to rename to upon completion of writing the contents. :param osutil: OS utility """ def _main(self, fileobj, final_filename, osutil): fileobj.close() osutil.rename_file(fileobj.name, final_filename) class IOCloseTask(Task): """A task to close out a file once the download is complete. :param fileobj: The fileobj to close. """ def _main(self, fileobj): fileobj.close() class CompleteDownloadNOOPTask(Task): """A NOOP task to serve as an indicator that the download is complete Note that the default for is_final is set to True because this should always be the last task. """ def __init__(self, transfer_coordinator, main_kwargs=None, pending_main_kwargs=None, done_callbacks=None, is_final=True): super(CompleteDownloadNOOPTask, self).__init__( transfer_coordinator=transfer_coordinator, main_kwargs=main_kwargs, pending_main_kwargs=pending_main_kwargs, done_callbacks=done_callbacks, is_final=is_final ) def _main(self): pass class DownloadChunkIterator(object): def __init__(self, body, chunksize): """Iterator to chunk out a downloaded S3 stream :param body: A readable file-like object :param chunksize: The amount to read each time """ self._body = body self._chunksize = chunksize self._num_reads = 0 def __iter__(self): return self def __next__(self): chunk = self._body.read(self._chunksize) self._num_reads += 1 if chunk: return chunk elif self._num_reads == 1: # Even though the response may have not had any # content, we still want to account for an empty object's # existance so return the empty chunk for that initial # read. return chunk raise StopIteration() next = __next__ class DeferQueue(object): """IO queue that defers write requests until they are queued sequentially. This class is used to track IO data for a *single* fileobj. You can send data to this queue, and it will defer any IO write requests until it has the next contiguous block available (starting at 0). """ def __init__(self): self._writes = [] self._pending_offsets = set() self._next_offset = 0 def request_writes(self, offset, data): """Request any available writes given new incoming data. You call this method by providing new data along with the offset associated with the data. If that new data unlocks any contiguous writes that can now be submitted, this method will return all applicable writes. This is done with 1 method call so you don't have to make two method calls (put(), get()) which acquires a lock each method call. """ if offset < self._next_offset: # This is a request for a write that we've already # seen. This can happen in the event of a retry # where if we retry at at offset N/2, we'll requeue # offsets 0-N/2 again. return [] writes = [] if offset in self._pending_offsets: # We've already queued this offset so this request is # a duplicate. In this case we should ignore # this request and prefer what's already queued. return [] heapq.heappush(self._writes, (offset, data)) self._pending_offsets.add(offset) while self._writes and self._writes[0][0] == self._next_offset: next_write = heapq.heappop(self._writes) writes.append({'offset': next_write[0], 'data': next_write[1]}) self._pending_offsets.remove(next_write[0]) self._next_offset += len(next_write[1]) return writes
VirtueSecurity/aws-extender
BappModules/s3transfer/download.py
Python
mit
28,306
def extractRandomnessShelfBlogspotCom(item): ''' Parser for 'randomness-shelf.blogspot.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractRandomnessShelfBlogspotCom.py
Python
bsd-3-clause
569
#!/usr/bin/python3 import sys from ifsApprover.web import app sys.path.append("../") # run standalone for debugging if __name__ == "__main__": app.run(debug=False)
ktt-ol/ifs-approver
backend/cgi/ifs-approver.wsgi.py
Python
mit
170
""" homeassistant.components.switch.scsgate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for SCSGate switches. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.scsgate/ """ import logging import homeassistant.components.scsgate as scsgate from homeassistant.components.switch import SwitchDevice from homeassistant.const import ATTR_ENTITY_ID DEPENDENCIES = ['scsgate'] def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Add the SCSGate swiches defined inside of the configuration file. """ logger = logging.getLogger(__name__) _setup_traditional_switches( logger=logger, config=config, add_devices_callback=add_devices_callback) _setup_scenario_switches( logger=logger, config=config, hass=hass) def _setup_traditional_switches(logger, config, add_devices_callback): """ Add traditional SCSGate switches """ traditional = config.get('traditional') switches = [] if traditional: for _, entity_info in traditional.items(): if entity_info['scs_id'] in scsgate.SCSGATE.devices: continue logger.info( "Adding %s scsgate.traditional_switch", entity_info['name']) name = entity_info['name'] scs_id = entity_info['scs_id'] switch = SCSGateSwitch( name=name, scs_id=scs_id, logger=logger) switches.append(switch) add_devices_callback(switches) scsgate.SCSGATE.add_devices_to_register(switches) def _setup_scenario_switches(logger, config, hass): """ Add only SCSGate scenario switches """ scenario = config.get("scenario") if scenario: for _, entity_info in scenario.items(): if entity_info['scs_id'] in scsgate.SCSGATE.devices: continue logger.info( "Adding %s scsgate.scenario_switch", entity_info['name']) name = entity_info['name'] scs_id = entity_info['scs_id'] switch = SCSGateScenarioSwitch( name=name, scs_id=scs_id, logger=logger, hass=hass) scsgate.SCSGATE.add_device(switch) class SCSGateSwitch(SwitchDevice): """ Provides a SCSGate switch. """ def __init__(self, scs_id, name, logger): self._name = name self._scs_id = scs_id self._toggled = False self._logger = logger @property def scs_id(self): """ SCS ID """ return self._scs_id @property def should_poll(self): """ No polling needed for a SCSGate switch. """ return False @property def name(self): """ Returns the name of the device if any. """ return self._name @property def is_on(self): """ True if switch is on. """ return self._toggled def turn_on(self, **kwargs): """ Turn the device on. """ from scsgate.tasks import ToggleStatusTask scsgate.SCSGATE.append_task( ToggleStatusTask( target=self._scs_id, toggled=True)) self._toggled = True self.update_ha_state() def turn_off(self, **kwargs): """ Turn the device off. """ from scsgate.tasks import ToggleStatusTask scsgate.SCSGATE.append_task( ToggleStatusTask( target=self._scs_id, toggled=False)) self._toggled = False self.update_ha_state() def process_event(self, message): """ Handle a SCSGate message related with this switch""" if self._toggled == message.toggled: self._logger.info( "Switch %s, ignoring message %s because state already active", self._scs_id, message) # Nothing changed, ignoring return self._toggled = message.toggled self.update_ha_state() command = "off" if self._toggled: command = "on" self.hass.bus.fire( 'button_pressed', { ATTR_ENTITY_ID: self._scs_id, 'state': command } ) class SCSGateScenarioSwitch: """ Provides a SCSGate scenario switch. This switch is always in a 'off" state, when toggled it's used to trigger events """ def __init__(self, scs_id, name, logger, hass): self._name = name self._scs_id = scs_id self._logger = logger self._hass = hass @property def scs_id(self): """ SCS ID """ return self._scs_id @property def name(self): """ Returns the name of the device if any. """ return self._name def process_event(self, message): """ Handle a SCSGate message related with this switch""" from scsgate.messages import StateMessage, ScenarioTriggeredMessage if isinstance(message, StateMessage): scenario_id = message.bytes[4] elif isinstance(message, ScenarioTriggeredMessage): scenario_id = message.scenario else: self._logger.warn( "Scenario switch: received unknown message %s", message) return self._hass.bus.fire( 'scenario_switch_triggered', { ATTR_ENTITY_ID: int(self._scs_id), 'scenario_id': int(scenario_id, 16) } )
toddeye/home-assistant
homeassistant/components/switch/scsgate.py
Python
mit
5,589
from __future__ import annotations import copy from datetime import timedelta from textwrap import dedent from typing import ( Callable, Hashable, Literal, final, no_type_check, ) import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, NaT, Period, Timedelta, Timestamp, to_offset, ) from pandas._typing import ( FrameOrSeries, IndexLabel, T, TimedeltaConvertibleTypes, TimestampConvertibleTypes, npt, ) from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import ( Appender, Substitution, deprecate_nonkeyword_arguments, doc, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) import pandas.core.algorithms as algos from pandas.core.apply import ResamplerWindowApply from pandas.core.base import ( DataError, PandasObject, ) import pandas.core.common as com from pandas.core.generic import ( NDFrame, _shared_docs, ) from pandas.core.groupby.generic import SeriesGroupBy from pandas.core.groupby.groupby import ( BaseGroupBy, GroupBy, _pipe_template, get_groupby, ) from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper from pandas.core.indexes.api import Index from pandas.core.indexes.datetimes import ( DatetimeIndex, date_range, ) from pandas.core.indexes.period import ( PeriodIndex, period_range, ) from pandas.core.indexes.timedeltas import ( TimedeltaIndex, timedelta_range, ) from pandas.tseries.frequencies import ( is_subperiod, is_superperiod, ) from pandas.tseries.offsets import ( DateOffset, Day, Nano, Tick, ) _shared_docs_kwargs: dict[str, str] = {} class Resampler(BaseGroupBy, PandasObject): """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. It's easiest to use obj.resample(...) to use Resampler. Parameters ---------- obj : Series or DataFrame groupby : TimeGrouper axis : int, default 0 kind : str or None 'period', 'timestamp' to override default index treatment Returns ------- a Resampler of the appropriate type Notes ----- After resampling, see aggregate, apply, and transform functions. """ grouper: BinGrouper exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat # to the groupby descriptor _attributes = [ "freq", "axis", "closed", "label", "convention", "loffset", "kind", "origin", "offset", ] def __init__( self, obj: FrameOrSeries, groupby: TimeGrouper, axis: int = 0, kind=None, *, selection=None, **kwargs, ): self.groupby = groupby self.keys = None self.sort = True self.axis = axis self.kind = kind self.squeeze = False self.group_keys = True self.as_index = True self.groupby._set_grouper(self._convert_obj(obj), sort=True) self.binner, self.grouper = self._get_binner() self._selection = selection @final def _shallow_copy(self, obj, **kwargs): """ return a new object with the replacement attributes """ if isinstance(obj, self._constructor): obj = obj.obj for attr in self._attributes: if attr not in kwargs: kwargs[attr] = getattr(self, attr) return self._constructor(obj, **kwargs) def __str__(self) -> str: """ Provide a nice str repr of our rolling object. """ attrs = ( f"{k}={getattr(self.groupby, k)}" for k in self._attributes if getattr(self.groupby, k, None) is not None ) return f"{type(self).__name__} [{', '.join(attrs)}]" def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self._attributes: return getattr(self.groupby, attr) if attr in self.obj: return self[attr] return object.__getattribute__(self, attr) # error: Signature of "obj" incompatible with supertype "BaseGroupBy" @property def obj(self) -> FrameOrSeries: # type: ignore[override] # error: Incompatible return value type (got "Optional[Any]", # expected "FrameOrSeries") return self.groupby.obj # type: ignore[return-value] @property def ax(self): # we can infer that this is a PeriodIndex/DatetimeIndex/TimedeltaIndex, # but skipping annotating bc the overrides overwhelming return self.groupby.ax @property def _from_selection(self) -> bool: """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return self.groupby is not None and ( self.groupby.key is not None or self.groupby.level is not None ) def _convert_obj(self, obj: FrameOrSeries) -> FrameOrSeries: """ Provide any conversions for the object in order to correctly handle. Parameters ---------- obj : Series or DataFrame Returns ------- Series or DataFrame """ return obj._consolidate() def _get_binner_for_time(self): raise AbstractMethodError(self) @final def _get_binner(self): """ Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ binner, bins, binlabels = self._get_binner_for_time() assert len(bins) == len(binlabels) bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer) return binner, bin_grouper @Substitution( klass="Resampler", examples=""" >>> df = pd.DataFrame({'A': [1, 2, 3, 4]}, ... index=pd.date_range('2012-08-02', periods=4)) >>> df A 2012-08-02 1 2012-08-03 2 2012-08-04 3 2012-08-05 4 To get the difference between each 2-day period's maximum and minimum value in one pass, you can do >>> df.resample('2D').pipe(lambda x: x.max() - x.min()) A 2012-08-02 1 2012-08-04 1""", ) @Appender(_pipe_template) def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: return super().pipe(func, *args, **kwargs) _agg_see_also_doc = dedent( """ See Also -------- DataFrame.groupby.aggregate : Aggregate using callable, string, dict, or list of string/callables. DataFrame.resample.transform : Transforms the Series on each group based on the given function. DataFrame.aggregate: Aggregate using one or more operations over the specified axis. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5], ... index=pd.date_range('20130101', periods=5, freq='s')) >>> s 2013-01-01 00:00:00 1 2013-01-01 00:00:01 2 2013-01-01 00:00:02 3 2013-01-01 00:00:03 4 2013-01-01 00:00:04 5 Freq: S, dtype: int64 >>> r = s.resample('2s') >>> r.agg(np.sum) 2013-01-01 00:00:00 3 2013-01-01 00:00:02 7 2013-01-01 00:00:04 5 Freq: 2S, dtype: int64 >>> r.agg(['sum', 'mean', 'max']) sum mean max 2013-01-01 00:00:00 3 1.5 2 2013-01-01 00:00:02 7 3.5 4 2013-01-01 00:00:04 5 5.0 5 >>> r.agg({'result': lambda x: x.mean() / x.std(), ... 'total': np.sum}) result total 2013-01-01 00:00:00 2.121320 3 2013-01-01 00:00:02 4.949747 7 2013-01-01 00:00:04 NaN 5 >>> r.agg(average="mean", total="sum") average total 2013-01-01 00:00:00 1.5 3 2013-01-01 00:00:02 3.5 7 2013-01-01 00:00:04 5.0 5 """ ) @doc( _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, klass="DataFrame", axis="", ) def aggregate(self, func=None, *args, **kwargs): result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: how = func result = self._groupby_and_aggregate(how, *args, **kwargs) result = self._apply_loffset(result) return result agg = aggregate apply = aggregate def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> s = pd.Series([1, 2], ... index=pd.date_range('20180101', ... periods=2, ... freq='1h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: H, dtype: int64 >>> resampled = s.resample('15min') >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) 2018-01-01 00:00:00 NaN 2018-01-01 01:00:00 NaN Freq: H, dtype: float64 """ return self._selected_obj.groupby(self.groupby).transform(arg, *args, **kwargs) def _downsample(self, f): raise AbstractMethodError(self) def _upsample(self, f, limit=None, fill_value=None): raise AbstractMethodError(self) def _gotitem(self, key, ndim: int, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ grouper = self.grouper if subset is None: subset = self.obj grouped = get_groupby(subset, by=None, grouper=grouper, axis=self.axis) # try the key selection try: return grouped[key] except KeyError: return grouped def _groupby_and_aggregate(self, how, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ grouper = self.grouper obj = self._selected_obj grouped = get_groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except DataError: # got TypeErrors on aggregation result = grouped.apply(how, *args, **kwargs) except (AttributeError, KeyError): # we have a non-reducing function; try to evaluate # alternatively we want to evaluate only a column of the input # test_apply_to_one_column_of_df the function being applied references # a DataFrame column, but aggregate_item_by_item operates column-wise # on Series, raising AttributeError or KeyError # (depending on whether the column lookup uses getattr/__getitem__) result = grouped.apply(how, *args, **kwargs) except ValueError as err: if "Must produce aggregated value" in str(err): # raised in _aggregate_named # see test_apply_without_aggregation, test_apply_with_mutated_index pass else: raise # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result) def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ # error: Cannot determine type of 'loffset' needs_offset = ( isinstance( self.loffset, # type: ignore[has-type] (DateOffset, timedelta, np.timedelta64), ) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: # error: Cannot determine type of 'loffset' result.index = result.index + self.loffset # type: ignore[has-type] self.loffset = None return result def _get_resampler_for_grouping(self, groupby): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby) def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj # When index is all NaT, result is empty but index is not result.index = _asfreq_compat(obj.index[:0], freq=self.freq) result.name = getattr(obj, "name", None) return result def pad(self, limit=None): """ Forward fill the values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- An upsampled Series. See Also -------- Series.fillna: Fill NA/NaN values using the specified method. DataFrame.fillna: Fill NA/NaN values using the specified method. """ return self._upsample("pad", limit=limit) ffill = pad def nearest(self, limit=None): """ Resample by using the nearest value. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The `nearest` method will replace ``NaN`` values that appeared in the resampled data with the value from the nearest member of the sequence, based on the index value. Missing values that existed in the original data will not be modified. If `limit` is given, fill only this many values in each direction for each of the original values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series or DataFrame An upsampled Series or DataFrame with ``NaN`` values filled with their nearest value. See Also -------- backfill : Backward fill the new missing values in the resampled data. pad : Forward fill ``NaN`` values. Examples -------- >>> s = pd.Series([1, 2], ... index=pd.date_range('20180101', ... periods=2, ... freq='1h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: H, dtype: int64 >>> s.resample('15min').nearest() 2018-01-01 00:00:00 1 2018-01-01 00:15:00 1 2018-01-01 00:30:00 2 2018-01-01 00:45:00 2 2018-01-01 01:00:00 2 Freq: 15T, dtype: int64 Limit the number of upsampled values imputed by the nearest: >>> s.resample('15min').nearest(limit=1) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 Freq: 15T, dtype: float64 """ return self._upsample("nearest", limit=limit) def backfill(self, limit=None): """ Backward fill the new missing values in the resampled data. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. Missing values that existed in the original data will not be modified. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series, DataFrame An upsampled Series or DataFrame with backward filled NaN values. See Also -------- bfill : Alias of backfill. fillna : Fill NaN values using the specified method, which can be 'backfill'. nearest : Fill NaN values with nearest neighbor starting from center. pad : Forward fill NaN values. Series.fillna : Fill NaN values in the Series using the specified method, which can be 'backfill'. DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'backfill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) Examples -------- Resampling a Series: >>> s = pd.Series([1, 2, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: H, dtype: int64 >>> s.resample('30min').backfill() 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('15min').backfill(limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15T, dtype: float64 Resampling a DataFrame that has missing values: >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, ... index=pd.date_range('20180101', periods=3, ... freq='h')) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample('30min').backfill() a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 >>> df.resample('15min').backfill(limit=2) a b 2018-01-01 00:00:00 2.0 1.0 2018-01-01 00:15:00 NaN NaN 2018-01-01 00:30:00 NaN 3.0 2018-01-01 00:45:00 NaN 3.0 2018-01-01 01:00:00 NaN 3.0 2018-01-01 01:15:00 NaN NaN 2018-01-01 01:30:00 6.0 5.0 2018-01-01 01:45:00 6.0 5.0 2018-01-01 02:00:00 6.0 5.0 """ return self._upsample("backfill", limit=limit) bfill = backfill def fillna(self, method, limit=None): """ Fill missing values introduced by upsampling. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). Missing values that existed in the original data will not be modified. Parameters ---------- method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'} Method to use for filling holes in resampled data * 'pad' or 'ffill': use previous valid observation to fill gap (forward fill). * 'backfill' or 'bfill': use next valid observation to fill gap. * 'nearest': use nearest valid observation to fill gap. limit : int, optional Limit of how many consecutive missing values to fill. Returns ------- Series or DataFrame An upsampled Series or DataFrame with missing values filled. See Also -------- backfill : Backward fill NaN values in the resampled data. pad : Forward fill NaN values in the resampled data. nearest : Fill NaN values in the resampled data with nearest neighbor starting from center. interpolate : Fill NaN values using interpolation. Series.fillna : Fill NaN values in the Series using the specified method, which can be 'bfill' and 'ffill'. DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'bfill' and 'ffill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) Examples -------- Resampling a Series: >>> s = pd.Series([1, 2, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: H, dtype: int64 Without filling the missing values you get: >>> s.resample("30min").asfreq() 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 2.0 2018-01-01 01:30:00 NaN 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> s.resample('30min').fillna("backfill") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('15min').fillna("backfill", limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15T, dtype: float64 >>> s.resample('30min').fillna("pad") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 1 2018-01-01 01:00:00 2 2018-01-01 01:30:00 2 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('30min').fillna("nearest") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 Missing values present before the upsampling are not affected. >>> sm = pd.Series([1, None, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> sm 2018-01-01 00:00:00 1.0 2018-01-01 01:00:00 NaN 2018-01-01 02:00:00 3.0 Freq: H, dtype: float64 >>> sm.resample('30min').fillna('backfill') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> sm.resample('30min').fillna('pad') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 1.0 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 NaN 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> sm.resample('30min').fillna('nearest') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 DataFrame resampling is done column-wise. All the same options are available. >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, ... index=pd.date_range('20180101', periods=3, ... freq='h')) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample('30min').fillna("bfill") a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 """ return self._upsample(method, limit=limit) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"]) @doc(NDFrame.interpolate, **_shared_docs_kwargs) def interpolate( self, method="linear", axis=0, limit=None, inplace=False, limit_direction="forward", limit_area=None, downcast=None, **kwargs, ): """ Interpolate values according to different methods. """ result = self._upsample("asfreq") return result.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs, ) def asfreq(self, fill_value=None): """ Return the values at the new freq, essentially a reindex. Parameters ---------- fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- DataFrame or Series Values at the specified freq. See Also -------- Series.asfreq: Convert TimeSeries to specified frequency. DataFrame.asfreq: Convert TimeSeries to specified frequency. """ return self._upsample("asfreq", fill_value=fill_value) def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. Returns ------- DataFrame or Series Standard deviation of values within each group. """ nv.validate_resampler_func("std", args, kwargs) # error: Unexpected keyword argument "ddof" for "_downsample" return self._downsample("std", ddof=ddof) # type: ignore[call-arg] def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. Returns ------- DataFrame or Series Variance of values within each group. """ nv.validate_resampler_func("var", args, kwargs) # error: Unexpected keyword argument "ddof" for "_downsample" return self._downsample("var", ddof=ddof) # type: ignore[call-arg] @doc(GroupBy.size) def size(self): result = self._downsample("size") if not len(self.ax): from pandas import Series if self._selected_obj.ndim == 1: name = self._selected_obj.name else: name = None result = Series([], index=result.index, dtype="int64", name=name) return result @doc(GroupBy.count) def count(self): result = self._downsample("count") if not len(self.ax): if self._selected_obj.ndim == 1: result = type(self._selected_obj)( [], index=result.index, dtype="int64", name=self._selected_obj.name ) else: from pandas import DataFrame result = DataFrame( [], index=result.index, columns=result.columns, dtype="int64" ) return result def quantile(self, q=0.5, **kwargs): """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles. """ # error: Unexpected keyword argument "q" for "_downsample" # error: Too many arguments for "_downsample" return self._downsample("quantile", q=q, **kwargs) # type: ignore[call-arg] # downsample methods for method in ["sum", "prod", "min", "max", "first", "last"]: def f(self, _method=method, min_count=0, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) return self._downsample(_method, min_count=min_count) f.__doc__ = getattr(GroupBy, method).__doc__ setattr(Resampler, method, f) # downsample methods for method in ["mean", "sem", "median", "ohlc"]: def g(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) return self._downsample(_method) g.__doc__ = getattr(GroupBy, method).__doc__ setattr(Resampler, method, g) # series only methods for method in ["nunique"]: def h(self, _method=method): return self._downsample(_method) h.__doc__ = getattr(SeriesGroupBy, method).__doc__ setattr(Resampler, method, h) class _GroupByMixin(PandasObject): """ Provide the groupby facilities. """ _attributes: list[str] # in practice the same as Resampler._attributes _selection: IndexLabel | None = None def __init__(self, obj, parent=None, groupby=None, **kwargs): # reached via ._gotitem and _get_resampler_for_grouping if parent is None: parent = obj # initialize our GroupByMixin object with # the resampler attributes for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) self._selection = kwargs.get("selection") self.binner = parent.binner self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True self.groupby = copy.copy(parent.groupby) @no_type_check def _apply(self, f, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result) _upsample = _apply _downsample = _apply _groupby_and_aggregate = _apply @final def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ # create a new object to prevent aliasing if subset is None: # error: "GotItemMixin" has no attribute "obj" subset = self.obj # type: ignore[attr-defined] # we need to make a shallow copy of ourselves # with the same groupby kwargs = {attr: getattr(self, attr) for attr in self._attributes} # Try to select from a DataFrame, falling back to a Series try: groupby = self._groupby[key] except IndexError: groupby = self._groupby selection = None if subset.ndim == 2 and ( (lib.is_scalar(key) and key in subset) or lib.is_list_like(key) ): selection = key new_rs = type(self)( subset, groupby=groupby, parent=self, selection=selection, **kwargs ) return new_rs class DatetimeIndexResampler(Resampler): @property def _resampler_for_grouping(self): return DatetimeIndexResamplerGroupby def _get_binner_for_time(self): # this is how we are actually creating the bins if self.kind == "period": return self.groupby._get_time_period_bins(self.ax) return self.groupby._get_time_bins(self.ax) def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ how = com.get_cython_func(how) or how ax = self.ax obj = self._selected_obj if not len(ax): # reset to the new freq obj = obj.copy() obj.index = obj.index._with_freq(self.freq) assert obj.index.freq == self.freq, (obj.index.freq, self.freq) return obj # do we have a regular frequency # error: Item "None" of "Optional[Any]" has no attribute "binlabels" if ( (ax.freq is not None or ax.inferred_freq is not None) and len(self.grouper.binlabels) > len(ax) and how is None ): # let's do an asfreq return self.asfreq() # we are downsampling # we want to call the actual grouper method here result = obj.groupby(self.grouper, axis=self.axis).aggregate(how, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result) def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == "right": binner = binner[1:] else: binner = binner[:-1] return binner def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna: Fill NA/NaN values using the specified method. """ if self.axis: raise AssertionError("axis must be 0") if self._from_selection: raise ValueError( "Upsampling from level= or on= selection " "is not supported, use .set_index(...) " "to explicitly set index to datetime-like" ) ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if ( limit is None and to_offset(ax.inferred_freq) == self.freq and len(obj) == len(res_index) ): result = obj.copy() result.index = res_index else: result = obj.reindex( res_index, method=method, limit=limit, fill_value=fill_value ) result = self._apply_loffset(result) return self._wrap_result(result) def _wrap_result(self, result): result = super()._wrap_result(result) # we may have a different kind that we were asked originally # convert if needed if self.kind == "period" and not isinstance(result.index, PeriodIndex): result.index = result.index.to_period(self.freq) return result class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler): """ Provides a resample of a groupby implementation """ @property def _constructor(self): return DatetimeIndexResampler class PeriodIndexResampler(DatetimeIndexResampler): @property def _resampler_for_grouping(self): return PeriodIndexResamplerGroupby def _get_binner_for_time(self): if self.kind == "timestamp": return super()._get_binner_for_time() return self.groupby._get_period_bins(self.ax) def _convert_obj(self, obj: FrameOrSeries) -> FrameOrSeries: obj = super()._convert_obj(obj) if self._from_selection: # see GH 14008, GH 12871 msg = ( "Resampling from level= or on= selection " "with a PeriodIndex is not currently supported, " "use .set_index(...) to explicitly set index" ) raise NotImplementedError(msg) if self.loffset is not None: # Cannot apply loffset/timedelta to PeriodIndex -> convert to # timestamps self.kind = "timestamp" # convert to timestamp if self.kind == "timestamp": obj = obj.to_timestamp(how=self.convention) return obj def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._downsample(how, **kwargs) how = com.get_cython_func(how) or how ax = self.ax if is_subperiod(ax.freq, self.freq): # Downsampling return self._groupby_and_aggregate(how, **kwargs) elif is_superperiod(ax.freq, self.freq): if how == "ohlc": # GH #13083 # upsampling to subperiods is handled as an asfreq, which works # for pure aggregating/reducing methods # OHLC reduces along the time dimension, but creates multiple # values for each period -> handle by _groupby_and_aggregate() return self._groupby_and_aggregate(how) return self.asfreq() elif ax.freq == self.freq: return self.asfreq() raise IncompatibleFrequency( f"Frequency {ax.freq} cannot be resampled to {self.freq}, " "as they are not sub or super periods" ) def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : {'backfill', 'bfill', 'pad', 'ffill'} Method for upsampling. limit : int, default None Maximum size gap to fill when reindexing. fill_value : scalar, default None Value to use for missing values. See Also -------- .fillna: Fill NA/NaN values using the specified method. """ # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._upsample(method, limit=limit, fill_value=fill_value) ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) new_obj = _take_new_index( obj, indexer, new_index, axis=self.axis, ) return self._wrap_result(new_obj) class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler): """ Provides a resample of a groupby implementation. """ @property def _constructor(self): return PeriodIndexResampler class TimedeltaIndexResampler(DatetimeIndexResampler): @property def _resampler_for_grouping(self): return TimedeltaIndexResamplerGroupby def _get_binner_for_time(self): return self.groupby._get_time_delta_bins(self.ax) def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index is allowed to be greater than original range so we don't need to change the length of a binner, GH 13022 """ return binner class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler): """ Provides a resample of a groupby implementation. """ @property def _constructor(self): return TimedeltaIndexResampler def get_resampler(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind) get_resampler.__doc__ = Resampler.__doc__ def get_resampler_for_grouping( groupby, rule, how=None, fill_method=None, limit=None, kind=None, on=None, **kwargs ): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' tg = TimeGrouper(freq=rule, key=on, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) return resampler._get_resampler_for_grouping(groupby=groupby) class TimeGrouper(Grouper): """ Custom groupby class for time-interval grouping. Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges closed : closed end of interval; 'left' or 'right' label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex """ _attributes = Grouper._attributes + ( "closed", "label", "how", "loffset", "kind", "convention", "origin", "offset", ) def __init__( self, freq="Min", closed: Literal["left", "right"] | None = None, label: str | None = None, how="mean", axis=0, fill_method=None, limit=None, loffset=None, kind: str | None = None, convention: str | None = None, base: int | None = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, **kwargs, ): # Check for correctness of the keyword arguments which would # otherwise silently use the default if misspelled if label not in {None, "left", "right"}: raise ValueError(f"Unsupported value {label} for `label`") if closed not in {None, "left", "right"}: raise ValueError(f"Unsupported value {closed} for `closed`") if convention not in {None, "start", "end", "e", "s"}: raise ValueError(f"Unsupported value {convention} for `convention`") freq = to_offset(freq) end_types = {"M", "A", "Q", "BM", "BA", "BQ", "W"} rule = freq.rule_code if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types): if closed is None: closed = "right" if label is None: label = "right" else: # The backward resample sets ``closed`` to ``'right'`` by default # since the last value should be considered as the edge point for # the last bin. When origin in "end" or "end_day", the value for a # specific ``Timestamp`` index stands for the resample result from # the current ``Timestamp`` minus ``freq`` to the current # ``Timestamp`` with a right close. if origin in ["end", "end_day"]: if closed is None: closed = "right" if label is None: label = "right" else: if closed is None: closed = "left" if label is None: label = "left" self.closed = closed self.label = label self.kind = kind self.convention = convention or "E" self.convention = self.convention.lower() self.how = how self.fill_method = fill_method self.limit = limit if origin in ("epoch", "start", "start_day", "end", "end_day"): self.origin = origin else: try: self.origin = Timestamp(origin) except (ValueError, TypeError) as err: raise ValueError( "'origin' should be equal to 'epoch', 'start', 'start_day', " "'end', 'end_day' or " f"should be a Timestamp convertible type. Got '{origin}' instead." ) from err try: self.offset = Timedelta(offset) if offset is not None else None except (ValueError, TypeError) as err: raise ValueError( "'offset' should be a Timedelta convertible type. " f"Got '{offset}' instead." ) from err # always sort time groupers kwargs["sort"] = True # Handle deprecated arguments since v1.1.0 of `base` and `loffset` (GH #31809) if base is not None and offset is not None: raise ValueError("'offset' and 'base' cannot be present at the same time") if base and isinstance(freq, Tick): # this conversion handle the default behavior of base and the # special case of GH #10530. Indeed in case when dealing with # a TimedeltaIndex base was treated as a 'pure' offset even though # the default behavior of base was equivalent of a modulo on # freq_nanos. self.offset = Timedelta(base * freq.nanos // freq.n) if isinstance(loffset, str): loffset = to_offset(loffset) self.loffset = loffset super().__init__(freq=freq, axis=axis, **kwargs) def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == "period": return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError( "Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " f"but got an instance of '{type(ax).__name__}'" ) def _get_grouper(self, obj, validate: bool = True): # create the resampler and return our binner r = self._get_resampler(obj) return r.binner, r.grouper, r.obj def _get_time_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError( "axis must be a DatetimeIndex, but got " f"an instance of {type(ax).__name__}" ) if len(ax) == 0: binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels first, last = _get_timestamp_range_edges( ax.min(), ax.max(), self.freq, closed=self.closed, origin=self.origin, offset=self.offset, ) # GH #12037 # use first/last directly instead of call replace() on them # because replace() will swallow the nanosecond part # thus last bin maybe slightly before the end if the end contains # nanosecond part and lead to `Values falls after last bin` error # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback # has noted that ambiguous=True provides the most sensible result binner = labels = date_range( freq=self.freq, start=first, end=last, tz=ax.tz, name=ax.name, ambiguous=True, nonexistent="shift_forward", ) ax_values = ax.asi8 binner, bin_edges = self._adjust_bin_edges(binner, ax_values) # general version, knowing nothing about relative frequencies bins = lib.generate_bins_dt64( ax_values, bin_edges, self.closed, hasnans=ax.hasnans ) if self.closed == "right": labels = binner if self.label == "right": labels = labels[1:] elif self.label == "right": labels = labels[1:] if ax.hasnans: binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) # if we end up with more labels than bins # adjust the labels # GH4076 if len(bins) < len(labels): labels = labels[: len(bins)] return binner, bins, labels def _adjust_bin_edges(self, binner, ax_values): # Some hacks for > daily data, see #1471, #1458, #1483 if self.freq != "D" and is_superperiod(self.freq, "D"): if self.closed == "right": # GH 21459, GH 9119: Adjust the bins relative to the wall time bin_edges = binner.tz_localize(None) bin_edges = bin_edges + timedelta(1) - Nano(1) bin_edges = bin_edges.tz_localize(binner.tz).asi8 else: bin_edges = binner.asi8 # intraday values on last day if bin_edges[-2] > ax_values.max(): bin_edges = bin_edges[:-1] binner = binner[:-1] else: bin_edges = binner.asi8 return binner, bin_edges def _get_time_delta_bins(self, ax: TimedeltaIndex): if not isinstance(ax, TimedeltaIndex): raise TypeError( "axis must be a TimedeltaIndex, but got " f"an instance of {type(ax).__name__}" ) if not len(ax): binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels start, end = ax.min(), ax.max() labels = binner = timedelta_range( start=start, end=end, freq=self.freq, name=ax.name ) end_stamps = labels + self.freq bins = ax.searchsorted(end_stamps, side="left") if self.offset: # GH 10530 & 31809 labels += self.offset if self.loffset: # GH 33498 labels += self.loffset return binner, bins, labels def _get_time_period_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError( "axis must be a DatetimeIndex, but got " f"an instance of {type(ax).__name__}" ) freq = self.freq if not len(ax): binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name) return binner, [], labels labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() if ax.tz: end_stamps = end_stamps.tz_localize(ax.tz) bins = ax.searchsorted(end_stamps, side="left") return binner, bins, labels def _get_period_bins(self, ax: PeriodIndex): if not isinstance(ax, PeriodIndex): raise TypeError( "axis must be a PeriodIndex, but got " f"an instance of {type(ax).__name__}" ) memb = ax.asfreq(self.freq, how=self.convention) # NaT handling as in pandas._lib.lib.generate_bins_dt64() nat_count = 0 if memb.hasnans: nat_count = np.sum(memb._isnan) memb = memb[~memb._isnan] if not len(memb): # index contains no valid (non-NaT) values bins = np.array([], dtype=np.int64) binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) if len(ax) > 0: # index is all NaT binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax)) return binner, bins, labels freq_mult = self.freq.n start = ax.min().asfreq(self.freq, how=self.convention) end = ax.max().asfreq(self.freq, how="end") bin_shift = 0 if isinstance(self.freq, Tick): # GH 23882 & 31809: get adjusted bin edge labels with 'origin' # and 'origin' support. This call only makes sense if the freq is a # Tick since offset and origin are only used in those cases. # Not doing this check could create an extra empty bin. p_start, end = _get_period_range_edges( start, end, self.freq, closed=self.closed, origin=self.origin, offset=self.offset, ) # Get offset for bin edge (not label edge) adjustment start_offset = Period(start, self.freq) - Period(p_start, self.freq) # error: Item "Period" of "Union[Period, Any]" has no attribute "n" bin_shift = start_offset.n % freq_mult # type: ignore[union-attr] start = p_start labels = binner = period_range( start=start, end=end, freq=self.freq, name=ax.name ) i8 = memb.asi8 # when upsampling to subperiods, we need to generate enough bins expected_bins_count = len(binner) * freq_mult i8_extend = expected_bins_count - (i8[-1] - i8[0]) rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult) rng += freq_mult # adjust bin edge indexes to account for base rng -= bin_shift # Wrap in PeriodArray for PeriodArray.searchsorted prng = type(memb._data)(rng, dtype=memb.dtype) bins = memb.searchsorted(prng, side="left") if nat_count > 0: binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count) return binner, bins, labels def _take_new_index( obj: FrameOrSeries, indexer: npt.NDArray[np.intp], new_index: Index, axis: int = 0 ) -> FrameOrSeries: if isinstance(obj, ABCSeries): new_values = algos.take_nd(obj._values, indexer) # error: Incompatible return value type (got "Series", expected "FrameOrSeries") return obj._constructor( # type: ignore[return-value] new_values, index=new_index, name=obj.name ) elif isinstance(obj, ABCDataFrame): if axis == 1: raise NotImplementedError("axis 1 is not supported") new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) # error: Incompatible return value type # (got "DataFrame", expected "FrameOrSeries") return obj._constructor(new_mgr) # type: ignore[return-value] else: raise ValueError("'obj' should be either a Series or a DataFrame") def _get_timestamp_range_edges( first: Timestamp, last: Timestamp, freq: BaseOffset, closed: Literal["right", "left"] = "left", origin="start_day", offset: Timedelta | None = None, ) -> tuple[Timestamp, Timestamp]: """ Adjust the `first` Timestamp to the preceding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. freq : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default "left" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): raise ValueError("The origin must have the same timezone as the index.") elif origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones origin = Timestamp("1970-01-01", tz=index_tz) if isinstance(freq, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints first = first.tz_localize(None) last = last.tz_localize(None) if isinstance(origin, Timestamp): origin = origin.tz_localize(None) first, last = _adjust_dates_anchored( first, last, freq, closed=closed, origin=origin, offset=offset ) if isinstance(freq, Day): first = first.tz_localize(index_tz) last = last.tz_localize(index_tz) else: first = first.normalize() last = last.normalize() if closed == "left": first = Timestamp(freq.rollback(first)) else: first = Timestamp(first - freq) last = Timestamp(last + freq) return first, last def _get_period_range_edges( first: Period, last: Period, freq: BaseOffset, closed: Literal["right", "left"] = "left", origin="start_day", offset: Timedelta | None = None, ) -> tuple[Period, Period]: """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default "left" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first_ts = first.to_timestamp() last_ts = last.to_timestamp() adjust_first = not freq.is_on_offset(first_ts) adjust_last = freq.is_on_offset(last_ts) first_ts, last_ts = _get_timestamp_range_edges( first_ts, last_ts, freq, closed=closed, origin=origin, offset=offset ) first = (first_ts + int(adjust_first) * freq).to_period(freq) last = (last_ts - int(adjust_last) * freq).to_period(freq) return first, last def _insert_nat_bin( binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int ) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]: # NaT handling as in pandas._lib.lib.generate_bins_dt64() # shift bins by the number of NaT assert nat_count > 0 bins += nat_count bins = np.insert(bins, 0, nat_count) binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) return binner, bins, labels def _adjust_dates_anchored( first: Timestamp, last: Timestamp, freq: Tick, closed: Literal["right", "left"] = "right", origin="start_day", offset: Timedelta | None = None, ) -> tuple[Timestamp, Timestamp]: # First and last offsets should be calculated from the start day to fix an # error cause by resampling across multiple days when a one day period is # not a multiple of the frequency. See GH 8683 # To handle frequencies that are not multiple or divisible by a day we let # the possibility to define a fixed origin timestamp. See GH 31809 origin_nanos = 0 # origin == "epoch" if origin == "start_day": origin_nanos = first.normalize().value elif origin == "start": origin_nanos = first.value elif isinstance(origin, Timestamp): origin_nanos = origin.value elif origin in ["end", "end_day"]: origin = last if origin == "end" else last.ceil("D") sub_freq_times = (origin.value - first.value) // freq.nanos if closed == "left": sub_freq_times += 1 first = origin - sub_freq_times * freq origin_nanos = first.value origin_nanos += offset.value if offset else 0 # GH 10117 & GH 19375. If first and last contain timezone information, # Perform the calculation in UTC in order to avoid localizing on an # Ambiguous or Nonexistent time. first_tzinfo = first.tzinfo last_tzinfo = last.tzinfo if first_tzinfo is not None: first = first.tz_convert("UTC") if last_tzinfo is not None: last = last.tz_convert("UTC") foffset = (first.value - origin_nanos) % freq.nanos loffset = (last.value - origin_nanos) % freq.nanos if closed == "right": if foffset > 0: # roll back fresult = first.value - foffset else: fresult = first.value - freq.nanos if loffset > 0: # roll forward lresult = last.value + (freq.nanos - loffset) else: # already the end of the road lresult = last.value else: # closed == 'left' if foffset > 0: fresult = first.value - foffset else: # start of the road fresult = first.value if loffset > 0: # roll forward lresult = last.value + (freq.nanos - loffset) else: lresult = last.value + freq.nanos fresult = Timestamp(fresult) lresult = Timestamp(lresult) if first_tzinfo is not None: fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo) if last_tzinfo is not None: lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo) return fresult, lresult def asfreq( obj: FrameOrSeries, freq, method=None, how=None, normalize: bool = False, fill_value=None, ) -> FrameOrSeries: """ Utility frequency conversion method for Series/DataFrame. See :meth:`pandas.NDFrame.asfreq` for full documentation. """ if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = "E" new_obj = obj.copy() new_obj.index = obj.index.asfreq(freq, how=how) elif len(obj.index) == 0: new_obj = obj.copy() new_obj.index = _asfreq_compat(obj.index, freq) else: dti = date_range(obj.index.min(), obj.index.max(), freq=freq) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: new_obj.index = new_obj.index.normalize() return new_obj def _asfreq_compat(index: DatetimeIndex | PeriodIndex | TimedeltaIndex, freq): """ Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. Parameters ---------- index : PeriodIndex, DatetimeIndex, or TimedeltaIndex freq : DateOffset Returns ------- same type as index """ if len(index) != 0: # This should never be reached, always checked by the caller raise ValueError( "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex" ) new_index: Index if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) else: # pragma: no cover raise TypeError(type(index)) return new_index
dsm054/pandas
pandas/core/resample.py
Python
bsd-3-clause
67,558
def add_root_indicator_segment(): root_indicators = { 'bash': ' \\$ ', 'zsh': ' \\$ ', 'bare': ' $ ', } bg = Color.CMD_PASSED_BG fg = Color.CMD_PASSED_FG if powerline.args.prev_error != 0: fg = Color.CMD_FAILED_FG bg = Color.CMD_FAILED_BG powerline.append(root_indicators[powerline.args.shell], fg, bg) add_root_indicator_segment()
fellipecastro/powerline-shell
segments/root.py
Python
mit
397
""" HTML Widget classes """ try: set except NameError: from sets import Set as set # Python 2.3 fallback import copy from itertools import chain from django.conf import settings from django.utils.datastructures import MultiValueDict, MergeDict from django.utils.html import escape, conditional_escape from django.utils.translation import ugettext from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe from django.utils import datetime_safe from datetime import time from util import flatatt from urlparse import urljoin __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', ) MEDIA_TYPES = ('css','js') class Media(StrAndUnicode): def __init__(self, media=None, **kwargs): if media: media_attrs = media.__dict__ else: media_attrs = kwargs self._css = {} self._js = [] for name in MEDIA_TYPES: getattr(self, 'add_' + name)(media_attrs.get(name, None)) # Any leftover attributes must be invalid. # if media_attrs != {}: # raise TypeError, "'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()) def __unicode__(self): return self.render() def render(self): return mark_safe(u'\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES]))) def render_js(self): return [u'<script type="text/javascript" src="%s"></script>' % self.absolute_path(path) for path in self._js] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = self._css.keys() media.sort() return chain(*[ [u'<link href="%s" type="text/css" media="%s" rel="stylesheet" />' % (self.absolute_path(path), medium) for path in self._css[medium]] for medium in media]) def absolute_path(self, path): if path.startswith(u'http://') or path.startswith(u'https://') or path.startswith(u'/'): return path return urljoin(settings.MEDIA_URL,path) def __getitem__(self, name): "Returns a Media object that only contains media of the given type" if name in MEDIA_TYPES: return Media(**{name: getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) def add_js(self, data): if data: self._js.extend([path for path in data if path not in self._js]) def add_css(self, data): if data: for medium, paths in data.items(): self._css.setdefault(medium, []).extend([path for path in paths if path not in self._css[medium]]) def __add__(self, other): combined = Media() for name in MEDIA_TYPES: getattr(combined, 'add_' + name)(getattr(self, '_' + name, None)) getattr(combined, 'add_' + name)(getattr(other, '_' + name, None)) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists if hasattr(super(cls, self), 'media'): base = super(cls, self).media else: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend == True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) else: return Media(definition) else: return base return property(_media) class MediaDefiningClass(type): "Metaclass for classes that can have media definitions" def __new__(cls, name, bases, attrs): new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(object): __metaclass__ = MediaDefiningClass is_hidden = False # Determines whether this corresponds to an <input type="hidden">. needs_multipart_form = False # Determines does this widget need multipart-encrypted form def __init__(self, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj def render(self, name, value, attrs=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ raise NotImplementedError def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." attrs = dict(self.attrs, **kwargs) if extra_attrs: attrs.update(extra_attrs) return attrs def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, returns the value of this widget. Returns None if it's not provided. """ return data.get(name, None) def _has_changed(self, initial, data): """ Return True if data differs from initial. """ # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or inital value we get # is None, replace it w/ u''. if data is None: data_value = u'' else: data_value = data if initial is None: initial_value = u'' else: initial_value = initial if force_unicode(initial_value) != force_unicode(data_value): return True return False def id_for_label(self, id_): """ Returns the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Returns None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ id_for_label = classmethod(id_for_label) class Input(Widget): """ Base class for all <input> widgets (except type='checkbox' and type='radio', which are special). """ input_type = None # Subclasses must define this. def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) if value != '': # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(value) return mark_safe(u'<input%s />' % flatatt(final_attrs)) class TextInput(Input): input_type = 'text' class PasswordInput(Input): input_type = 'password' def __init__(self, attrs=None, render_value=True): super(PasswordInput, self).__init__(attrs) self.render_value = render_value def render(self, name, value, attrs=None): if not self.render_value: value=None return super(PasswordInput, self).render(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' is_hidden = True class MultipleHiddenInput(HiddenInput): """ A widget that handles <input type="hidden"> for fields that have a list of values. """ def __init__(self, attrs=None, choices=()): super(MultipleHiddenInput, self).__init__(attrs) # choices can be any iterable self.choices = choices def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) return mark_safe(u'\n'.join([(u'<input%s />' % flatatt(dict(value=force_unicode(v), **final_attrs))) for v in value])) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) class FileInput(Input): input_type = 'file' needs_multipart_form = True def render(self, name, value, attrs=None): return super(FileInput, self).render(name, None, attrs=attrs) def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name, None) def _has_changed(self, initial, data): if data is None: return False return True class Textarea(Widget): def __init__(self, attrs=None): # The 'rows' and 'cols' attributes are required for HTML correctness. self.attrs = {'cols': '40', 'rows': '10'} if attrs: self.attrs.update(attrs) def render(self, name, value, attrs=None): if value is None: value = '' value = force_unicode(value) final_attrs = self.build_attrs(attrs, name=name) return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), conditional_escape(force_unicode(value)))) class DateTimeInput(Input): input_type = 'text' format = '%Y-%m-%d %H:%M:%S' # '2006-10-25 14:30:59' def __init__(self, attrs=None, format=None): super(DateTimeInput, self).__init__(attrs) if format: self.format = format def render(self, name, value, attrs=None): if value is None: value = '' elif hasattr(value, 'strftime'): value = datetime_safe.new_datetime(value) value = value.strftime(self.format) return super(DateTimeInput, self).render(name, value, attrs) class TimeInput(Input): input_type = 'text' def render(self, name, value, attrs=None): if value is None: value = '' elif isinstance(value, time): value = value.replace(microsecond=0) return super(TimeInput, self).render(name, value, attrs) class CheckboxInput(Widget): def __init__(self, attrs=None, check_test=bool): super(CheckboxInput, self).__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = check_test def render(self, name, value, attrs=None): final_attrs = self.build_attrs(attrs, type='checkbox', name=name) try: result = self.check_test(value) except: # Silently catch exceptions result = False if result: final_attrs['checked'] = 'checked' if value not in ('', True, False, None): # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_unicode(value) return mark_safe(u'<input%s />' % flatatt(final_attrs)) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False return super(CheckboxInput, self).value_from_datadict(data, files, name) def _has_changed(self, initial, data): # Sometimes data or initial could be None or u'' which should be the # same thing as False. return bool(initial) != bool(data) class Select(Widget): def __init__(self, attrs=None, choices=()): super(Select, self).__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def render(self, name, value, attrs=None, choices=()): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) output = [u'<select%s>' % flatatt(final_attrs)] options = self.render_options(choices, [value]) if options: output.append(options) output.append('</select>') return mark_safe(u'\n'.join(output)) def render_options(self, choices, selected_choices): def render_option(option_value, option_label): option_value = force_unicode(option_value) selected_html = (option_value in selected_choices) and u' selected="selected"' or '' return u'<option value="%s"%s>%s</option>' % ( escape(option_value), selected_html, conditional_escape(force_unicode(option_label))) # Normalize to strings. selected_choices = set([force_unicode(v) for v in selected_choices]) output = [] for option_value, option_label in chain(self.choices, choices): if isinstance(option_label, (list, tuple)): output.append(u'<optgroup label="%s">' % escape(force_unicode(option_value))) for option in option_label: output.append(render_option(*option)) output.append(u'</optgroup>') else: output.append(render_option(option_value, option_label)) return u'\n'.join(output) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ((u'1', ugettext('Unknown')), (u'2', ugettext('Yes')), (u'3', ugettext('No'))) super(NullBooleanSelect, self).__init__(attrs, choices) def render(self, name, value, attrs=None, choices=()): try: value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value] except KeyError: value = u'1' return super(NullBooleanSelect, self).render(name, value, attrs, choices) def value_from_datadict(self, data, files, name): value = data.get(name, None) return {u'2': True, u'3': False, True: True, False: False}.get(value, None) def _has_changed(self, initial, data): # Sometimes data or initial could be None or u'' which should be the # same thing as False. return bool(initial) != bool(data) class SelectMultiple(Select): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, name=name) output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)] options = self.render_options(choices, value) if options: output.append(options) output.append('</select>') return mark_safe(u'\n'.join(output)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) def _has_changed(self, initial, data): if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True for value1, value2 in zip(initial, data): if force_unicode(value1) != force_unicode(value2): return True return False class RadioInput(StrAndUnicode): """ An object used by RadioFieldRenderer that represents a single <input type='radio'>. """ def __init__(self, name, value, attrs, choice, index): self.name, self.value = name, value self.attrs = attrs self.choice_value = force_unicode(choice[0]) self.choice_label = force_unicode(choice[1]) self.index = index def __unicode__(self): if 'id' in self.attrs: label_for = ' for="%s_%s"' % (self.attrs['id'], self.index) else: label_for = '' choice_label = conditional_escape(force_unicode(self.choice_label)) return mark_safe(u'<label%s>%s %s</label>' % (label_for, self.tag(), choice_label)) def is_checked(self): return self.value == self.choice_value def tag(self): if 'id' in self.attrs: self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index) final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value) if self.is_checked(): final_attrs['checked'] = 'checked' return mark_safe(u'<input%s />' % flatatt(final_attrs)) class RadioFieldRenderer(StrAndUnicode): """ An object used by RadioSelect to enable customization of radio widgets. """ def __init__(self, name, value, attrs, choices): self.name, self.value, self.attrs = name, value, attrs self.choices = choices def __iter__(self): for i, choice in enumerate(self.choices): yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx) def __unicode__(self): return self.render() def render(self): """Outputs a <ul> for this set of radio fields.""" return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self])) class RadioSelect(Select): renderer = RadioFieldRenderer def __init__(self, *args, **kwargs): # Override the default renderer if we were passed one. renderer = kwargs.pop('renderer', None) if renderer: self.renderer = renderer super(RadioSelect, self).__init__(*args, **kwargs) def get_renderer(self, name, value, attrs=None, choices=()): """Returns an instance of the renderer.""" if value is None: value = '' str_value = force_unicode(value) # Normalize to string. final_attrs = self.build_attrs(attrs) choices = list(chain(self.choices, choices)) return self.renderer(name, str_value, final_attrs, choices) def render(self, name, value, attrs=None, choices=()): return self.get_renderer(name, value, attrs, choices).render() def id_for_label(self, id_): # RadioSelect is represented by multiple <input type="radio"> fields, # each of which has a distinct ID. The IDs are made distinct by a "_X" # suffix, where X is the zero-based index of the radio field. Thus, # the label for a RadioSelect should reference the first one ('_0'). if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class CheckboxSelectMultiple(SelectMultiple): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = [u'<ul>'] # Normalize to strings str_values = set([force_unicode(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = u' for="%s"' % final_attrs['id'] else: label_for = '' cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values) option_value = force_unicode(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_unicode(option_label)) output.append(u'<li><label%s>%s %s</label></li>' % (label_for, rendered_cb, option_label)) output.append(u'</ul>') return mark_safe(u'\n'.join(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. Its render() method is different than other widgets', because it has to figure out how to split a single value for display in multiple widgets. The ``value`` argument can be one of two things: * A list. * A normal value (e.g., a string) that has been "compressed" from a list of values. In the second case -- i.e., if the value is NOT a list -- render() will first "decompress" the value into a list before rendering it. It does so by calling the decompress() method, which MultiWidget subclasses must implement. This method takes a single "compressed" value and returns a list. When render() does its HTML rendering, each value in the list is rendered with the corresponding widget -- the first value is rendered in the first widget, the second value is rendered in the second widget, etc. Subclasses may implement format_output(), which takes the list of rendered widgets and returns a string of HTML that formats them any way you'd like. You'll probably want to use this class with MultiValueField. """ def __init__(self, widgets, attrs=None): self.widgets = [isinstance(w, type) and w() or w for w in widgets] super(MultiWidget, self).__init__(attrs) def render(self, name, value, attrs=None): # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ id_for_label = classmethod(id_for_label) def value_from_datadict(self, data, files, name): return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)] def _has_changed(self, initial, data): if initial is None: initial = [u'' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.decompress(initial) for widget, initial, data in zip(self.widgets, initial, data): if widget._has_changed(initial, data): return True return False def format_output(self, rendered_widgets): """ Given a list of rendered widgets (as strings), returns a Unicode string representing the HTML for the whole lot. This hook allows you to format the HTML design of the widgets, if needed. """ return u''.join(rendered_widgets) def decompress(self, value): """ Returns a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): "Media for a multiwidget is the combination of all media of the subwidgets" media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) class SplitDateTimeWidget(MultiWidget): """ A Widget that splits datetime input into two <input type="text"> boxes. """ def __init__(self, attrs=None): widgets = (TextInput(attrs=attrs), TextInput(attrs=attrs)) super(SplitDateTimeWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: return [value.date(), value.time().replace(microsecond=0)] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A Widget that splits datetime input into two <input type="hidden"> inputs. """ def __init__(self, attrs=None): widgets = (HiddenInput(attrs=attrs), HiddenInput(attrs=attrs)) super(SplitDateTimeWidget, self).__init__(widgets, attrs)
AloneRoad/Inforlearn
vendor/django/forms/widgets.py
Python
apache-2.0
25,335
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def predict(project_id, model_id, content): """Predict.""" # [START automl_language_sentiment_analysis_predict] from google.cloud import automl # TODO(developer): Uncomment and set the following variables # project_id = "YOUR_PROJECT_ID" # model_id = "YOUR_MODEL_ID" # content = "text to predict" prediction_client = automl.PredictionServiceClient() # Get the full path of the model. model_full_id = automl.AutoMlClient.model_path(project_id, "us-central1", model_id) # Supported mime_types: 'text/plain', 'text/html' # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet text_snippet = automl.TextSnippet(content=content, mime_type="text/plain") payload = automl.ExamplePayload(text_snippet=text_snippet) response = prediction_client.predict(name=model_full_id, payload=payload) for annotation_payload in response.payload: print("Predicted class name: {}".format(annotation_payload.display_name)) print( "Predicted sentiment score: {}".format( annotation_payload.text_sentiment.sentiment ) ) # [END automl_language_sentiment_analysis_predict]
googleapis/python-automl
samples/snippets/language_sentiment_analysis_predict.py
Python
apache-2.0
1,791
# BaseSciDocXMLReader and BaseSciDocXMLWriter # # Copyright: (c) Daniel Duma 2015 # Author: Daniel Duma <danielduma@gmail.com> # For license information, see LICENSE.TXT from __future__ import absolute_import from proc.general_utils import loadFileText class BaseSciDocXMLReader(object): """ Base class for all SciDoc readers. """ def __init__(self): pass def readFile(self, filename): """ Load an XML file into a SciDoc. Args: filename: full path to file to read Returns: SciDoc instance """ text=loadFileText(filename) return self.read(text, filename) def read(self, xml, identifier): """ Abstract method that implements the reading. Override in descendant classes. :param xml: full xml string :param identifier: an identifier for this document, e.g. file name If an actual full path, the path will be removed from it when stored :returns: SciDoc instance :rtype: SciDoc """ raise NotImplementedError class BaseSciDocXMLWriter(object): """ Base class for all SciDoc writers. """ def __init__(self): pass def write(self, doc, filename=None): """ """ raise NotImplementedError def main(): pass if __name__ == '__main__': main()
danieldmm/minerva
scidoc/xmlformats/base_classes.py
Python
gpl-3.0
1,485
# This file is part of Indico. # Copyright (C) 2002 - 2019 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from flask import g from sqlalchemy import DDL from sqlalchemy.dialects.postgresql import ARRAY from sqlalchemy.event import listens_for from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import mapper from sqlalchemy.orm.base import NEVER_SET, NO_VALUE from indico.core.db import db from indico.core.db.sqlalchemy.attachments import AttachedItemsMixin from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode from indico.core.db.sqlalchemy.locations import LocationMixin from indico.core.db.sqlalchemy.notes import AttachedNotesMixin from indico.core.db.sqlalchemy.protection import ProtectionManagersMixin from indico.core.db.sqlalchemy.util.models import auto_table_args from indico.core.db.sqlalchemy.util.queries import increment_and_get from indico.core.db.sqlalchemy.util.session import no_autoflush from indico.modules.events.management.util import get_non_inheriting_objects from indico.modules.events.models.persons import AuthorsSpeakersMixin, PersonLinkDataMixin from indico.modules.events.papers.models.papers import Paper from indico.modules.events.papers.models.revisions import PaperRevision, PaperRevisionState from indico.modules.events.sessions.util import session_coordinator_priv_enabled from indico.util.locators import locator_property from indico.util.string import format_repr, return_ascii from indico.web.flask.util import url_for def _get_next_friendly_id(context): """Get the next friendly id for a contribution.""" from indico.modules.events import Event event_id = context.current_parameters['event_id'] # Check first if there is a pre-allocated friendly id # (and use it in that case) friendly_ids = g.get('friendly_ids', {}).get(Contribution, {}).get(event_id, []) if friendly_ids: return friendly_ids.pop(0) assert event_id is not None return increment_and_get(Event._last_friendly_contribution_id, Event.id == event_id) class CustomFieldsMixin(object): """Methods to process custom field data.""" def get_field_value(self, field_id, raw=False): fv = next((v for v in self.field_values if v.contribution_field_id == field_id), None) if raw: return fv else: return fv.friendly_data if fv else '' def set_custom_field(self, field_id, field_value): fv = self.get_field_value(field_id, raw=True) if not fv: field_value_cls = type(self).field_values.prop.mapper.class_ fv = field_value_cls(contribution_field=self.event.get_contribution_field(field_id)) self.field_values.append(fv) old_value = fv.data fv.data = field_value return old_value class Contribution(DescriptionMixin, ProtectionManagersMixin, LocationMixin, AttachedItemsMixin, AttachedNotesMixin, PersonLinkDataMixin, AuthorsSpeakersMixin, CustomFieldsMixin, db.Model): __tablename__ = 'contributions' __auto_table_args = (db.Index(None, 'friendly_id', 'event_id', unique=True, postgresql_where=db.text('NOT is_deleted')), db.Index(None, 'event_id', 'track_id'), db.Index(None, 'event_id', 'abstract_id'), db.Index(None, 'abstract_id', unique=True, postgresql_where=db.text('NOT is_deleted')), db.CheckConstraint("session_block_id IS NULL OR session_id IS NOT NULL", 'session_block_if_session'), db.ForeignKeyConstraint(['session_block_id', 'session_id'], ['events.session_blocks.id', 'events.session_blocks.session_id']), {'schema': 'events'}) location_backref_name = 'contributions' disallowed_protection_modes = frozenset() inheriting_have_acl = True possible_render_modes = {RenderMode.html, RenderMode.markdown} default_render_mode = RenderMode.markdown allow_relationship_preloading = True PRELOAD_EVENT_ATTACHED_ITEMS = True PRELOAD_EVENT_NOTES = True ATTACHMENT_FOLDER_ID_COLUMN = 'contribution_id' @classmethod def allocate_friendly_ids(cls, event, n): """Allocate n Contribution friendly_ids. This is needed so that we can allocate all IDs in one go. Not doing so could result in DB deadlocks. All operations that create more than one contribution should use this method. :param event: the :class:`Event` in question :param n: the number of ids to pre-allocate """ from indico.modules.events import Event fid = increment_and_get(Event._last_friendly_contribution_id, Event.id == event.id, n) friendly_ids = g.setdefault('friendly_ids', {}) friendly_ids.setdefault(cls, {})[event.id] = range(fid - n + 1, fid + 1) @declared_attr def __table_args__(cls): return auto_table_args(cls) id = db.Column( db.Integer, primary_key=True ) #: The human-friendly ID for the contribution friendly_id = db.Column( db.Integer, nullable=False, default=_get_next_friendly_id ) event_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), index=True, nullable=False ) session_id = db.Column( db.Integer, db.ForeignKey('events.sessions.id'), index=True, nullable=True ) session_block_id = db.Column( db.Integer, db.ForeignKey('events.session_blocks.id'), index=True, nullable=True ) track_id = db.Column( db.Integer, db.ForeignKey('events.tracks.id', ondelete='SET NULL'), index=True, nullable=True ) abstract_id = db.Column( db.Integer, db.ForeignKey('event_abstracts.abstracts.id'), index=True, nullable=True ) type_id = db.Column( db.Integer, db.ForeignKey('events.contribution_types.id'), index=True, nullable=True ) title = db.Column( db.String, nullable=False ) code = db.Column( db.String, nullable=False, default='' ) duration = db.Column( db.Interval, nullable=False ) board_number = db.Column( db.String, nullable=False, default='' ) keywords = db.Column( ARRAY(db.String), nullable=False, default=[] ) is_deleted = db.Column( db.Boolean, nullable=False, default=False ) #: The last user-friendly sub-contribution ID _last_friendly_subcontribution_id = db.deferred(db.Column( 'last_friendly_subcontribution_id', db.Integer, nullable=False, default=0 )) event = db.relationship( 'Event', lazy=True, backref=db.backref( 'contributions', primaryjoin='(Contribution.event_id == Event.id) & ~Contribution.is_deleted', cascade='all, delete-orphan', lazy=True ) ) session = db.relationship( 'Session', lazy=True, backref=db.backref( 'contributions', primaryjoin='(Contribution.session_id == Session.id) & ~Contribution.is_deleted', lazy=True ) ) session_block = db.relationship( 'SessionBlock', lazy=True, foreign_keys=[session_block_id], backref=db.backref( 'contributions', primaryjoin='(Contribution.session_block_id == SessionBlock.id) & ~Contribution.is_deleted', lazy=True ) ) type = db.relationship( 'ContributionType', lazy=True, backref=db.backref( 'contributions', lazy=True ) ) acl_entries = db.relationship( 'ContributionPrincipal', lazy=True, cascade='all, delete-orphan', collection_class=set, backref='contribution' ) subcontributions = db.relationship( 'SubContribution', lazy=True, primaryjoin='(SubContribution.contribution_id == Contribution.id) & ~SubContribution.is_deleted', order_by='SubContribution.position', cascade='all, delete-orphan', backref=db.backref( 'contribution', primaryjoin='SubContribution.contribution_id == Contribution.id', lazy=True ) ) abstract = db.relationship( 'Abstract', lazy=True, backref=db.backref( 'contribution', primaryjoin='(Contribution.abstract_id == Abstract.id) & ~Contribution.is_deleted', lazy=True, uselist=False ) ) track = db.relationship( 'Track', lazy=True, backref=db.backref( 'contributions', primaryjoin='(Contribution.track_id == Track.id) & ~Contribution.is_deleted', lazy=True, passive_deletes=True ) ) #: External references associated with this contribution references = db.relationship( 'ContributionReference', lazy=True, cascade='all, delete-orphan', backref=db.backref( 'contribution', lazy=True ) ) #: Persons associated with this contribution person_links = db.relationship( 'ContributionPersonLink', lazy=True, cascade='all, delete-orphan', backref=db.backref( 'contribution', lazy=True ) ) #: Data stored in abstract/contribution fields field_values = db.relationship( 'ContributionFieldValue', lazy=True, cascade='all, delete-orphan', backref=db.backref( 'contribution', lazy=True ) ) #: The accepted paper revision _accepted_paper_revision = db.relationship( 'PaperRevision', lazy=True, viewonly=True, uselist=False, primaryjoin=('(PaperRevision._contribution_id == Contribution.id) & (PaperRevision.state == {})' .format(PaperRevisionState.accepted)), ) #: Paper files not submitted for reviewing pending_paper_files = db.relationship( 'PaperFile', lazy=True, viewonly=True, primaryjoin='(PaperFile._contribution_id == Contribution.id) & (PaperFile.revision_id.is_(None))', ) #: Paper reviewing judges paper_judges = db.relationship( 'User', secondary='event_paper_reviewing.judges', collection_class=set, lazy=True, backref=db.backref( 'judge_for_contributions', collection_class=set, lazy=True ) ) #: Paper content reviewers paper_content_reviewers = db.relationship( 'User', secondary='event_paper_reviewing.content_reviewers', collection_class=set, lazy=True, backref=db.backref( 'content_reviewer_for_contributions', collection_class=set, lazy=True ) ) #: Paper layout reviewers paper_layout_reviewers = db.relationship( 'User', secondary='event_paper_reviewing.layout_reviewers', collection_class=set, lazy=True, backref=db.backref( 'layout_reviewer_for_contributions', collection_class=set, lazy=True ) ) @declared_attr def _paper_last_revision(cls): # Incompatible with joinedload subquery = (db.select([db.func.max(PaperRevision.submitted_dt)]) .where(PaperRevision._contribution_id == cls.id) .correlate_except(PaperRevision) .as_scalar()) return db.relationship( 'PaperRevision', uselist=False, lazy=True, viewonly=True, primaryjoin=db.and_(PaperRevision._contribution_id == cls.id, PaperRevision.submitted_dt == subquery) ) # relationship backrefs: # - _paper_files (PaperFile._contribution) # - _paper_revisions (PaperRevision._contribution) # - attachment_folders (AttachmentFolder.contribution) # - legacy_mapping (LegacyContributionMapping.contribution) # - note (EventNote.contribution) # - room_reservation_links (ReservationLink.contribution) # - timetable_entry (TimetableEntry.contribution) # - vc_room_associations (VCRoomEventAssociation.linked_contrib) @declared_attr def is_scheduled(cls): from indico.modules.events.timetable.models.entries import TimetableEntry query = (db.exists([1]) .where(TimetableEntry.contribution_id == cls.id) .correlate_except(TimetableEntry)) return db.column_property(query, deferred=True) @declared_attr def subcontribution_count(cls): from indico.modules.events.contributions.models.subcontributions import SubContribution query = (db.select([db.func.count(SubContribution.id)]) .where((SubContribution.contribution_id == cls.id) & ~SubContribution.is_deleted) .correlate_except(SubContribution)) return db.column_property(query, deferred=True) @declared_attr def _paper_revision_count(cls): query = (db.select([db.func.count(PaperRevision.id)]) .where(PaperRevision._contribution_id == cls.id) .correlate_except(PaperRevision)) return db.column_property(query, deferred=True) def __init__(self, **kwargs): # explicitly initialize those relationships with None to avoid # an extra query to check whether there is an object associated # when assigning a new one (e.g. during cloning) kwargs.setdefault('note', None) kwargs.setdefault('timetable_entry', None) super(Contribution, self).__init__(**kwargs) @classmethod def preload_acl_entries(cls, event): cls.preload_relationships(cls.query.with_parent(event), 'acl_entries') @property def location_parent(self): if self.session_block_id is not None: return self.session_block elif self.session_id is not None: return self.session else: return self.event @property def protection_parent(self): return self.session if self.session_id is not None else self.event @property def start_dt(self): return self.timetable_entry.start_dt if self.timetable_entry else None @property def end_dt(self): return self.timetable_entry.start_dt + self.duration if self.timetable_entry else None @property def start_dt_poster(self): if self.session and self.session.is_poster and self.timetable_entry and self.timetable_entry.parent: return self.timetable_entry.parent.start_dt @property def end_dt_poster(self): if self.session and self.session.is_poster and self.timetable_entry and self.timetable_entry.parent: return self.timetable_entry.parent.end_dt @property def duration_poster(self): if self.session and self.session.is_poster and self.timetable_entry and self.timetable_entry.parent: return self.timetable_entry.parent.duration @property def start_dt_display(self): """The displayed start time of the contribution. This is the start time of the poster session if applicable, otherwise the start time of the contribution itself. """ return self.start_dt_poster or self.start_dt @property def end_dt_display(self): """The displayed end time of the contribution. This is the end time of the poster session if applicable, otherwise the end time of the contribution itself. """ return self.end_dt_poster or self.end_dt @property def duration_display(self): """The displayed duration of the contribution. This is the duration of the poster session if applicable, otherwise the duration of the contribution itself. """ return self.duration_poster or self.duration @property def submitters(self): return {person_link for person_link in self.person_links if person_link.is_submitter} @locator_property def locator(self): return dict(self.event.locator, contrib_id=self.id) @property def verbose_title(self): return '#{} ({})'.format(self.friendly_id, self.title) @property def paper(self): return Paper(self) if self._paper_last_revision else None def is_paper_reviewer(self, user): return user in self.paper_content_reviewers or user in self.paper_layout_reviewers @return_ascii def __repr__(self): return format_repr(self, 'id', is_deleted=False, _text=self.title) def can_manage(self, user, permission=None, allow_admin=True, check_parent=True, explicit_permission=False): if super(Contribution, self).can_manage(user, permission, allow_admin=allow_admin, check_parent=check_parent, explicit_permission=explicit_permission): return True if (check_parent and self.session_id is not None and self.session.can_manage(user, 'coordinate', allow_admin=allow_admin, explicit_permission=explicit_permission) and session_coordinator_priv_enabled(self.event, 'manage-contributions')): return True return False def get_non_inheriting_objects(self): """Get a set of child objects that do not inherit protection.""" return get_non_inheriting_objects(self) def is_user_associated(self, user, check_abstract=False): if user is None: return False if check_abstract and self.abstract and self.abstract.submitter == user: return True return any(pl.person.user == user for pl in self.person_links if pl.person.user) Contribution.register_protection_events() @listens_for(mapper, 'after_configured', once=True) def _mapper_configured(): Contribution.register_location_events() @listens_for(Contribution.session, 'set') def _set_session_block(target, value, *unused): if value is None: target.session_block = None @listens_for(Contribution.timetable_entry, 'set') @no_autoflush def _set_timetable_entry(target, value, *unused): if value is None: target.session_block = None else: if target.session is not None: target.session_block = value.parent.session_block @listens_for(Contribution.duration, 'set') def _set_duration(target, value, oldvalue, *unused): from indico.modules.events.util import register_time_change if oldvalue in (NEVER_SET, NO_VALUE): return if value != oldvalue and target.timetable_entry is not None: register_time_change(target.timetable_entry) @listens_for(Contribution.__table__, 'after_create') def _add_timetable_consistency_trigger(target, conn, **kw): sql = """ CREATE CONSTRAINT TRIGGER consistent_timetable AFTER INSERT OR UPDATE OF event_id, session_id, session_block_id, duration ON {} DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE events.check_timetable_consistency('contribution'); """.format(target.fullname) DDL(sql).execute(conn)
mvidalgarcia/indico
indico/modules/events/contributions/models/contributions.py
Python
mit
19,957
# Copyright 2019 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import flask import voluptuous from werkzeug import exceptions as http_exceptions from cloudkitty.api.v2 import base from cloudkitty.api.v2 import utils as api_utils from cloudkitty.common import policy from cloudkitty import messaging from cloudkitty import storage_state from cloudkitty.utils import tz as tzutils from cloudkitty.utils import validation as vutils from oslo_log import log LOG = log.getLogger(__name__) class ScopeState(base.BaseResource): @classmethod def reload(cls): super(ScopeState, cls).reload() cls._client = messaging.get_client() cls._storage_state = storage_state.StateManager() @api_utils.paginated @api_utils.add_input_schema('query', { voluptuous.Optional('scope_id', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('scope_key', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('fetcher', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('collector', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('active', default=[]): api_utils.MultiQueryParam(int), }) @api_utils.add_output_schema({'results': [{ voluptuous.Required('scope_id'): vutils.get_string_type(), voluptuous.Required('scope_key'): vutils.get_string_type(), voluptuous.Required('fetcher'): vutils.get_string_type(), voluptuous.Required('collector'): vutils.get_string_type(), voluptuous.Optional( 'last_processed_timestamp'): vutils.get_string_type(), # This "state" property should be removed in the next release. voluptuous.Optional('state'): vutils.get_string_type(), voluptuous.Required('active'): bool, voluptuous.Optional('scope_activation_toggle_date'): vutils.get_string_type(), }]}) def get(self, offset=0, limit=100, scope_id=None, scope_key=None, fetcher=None, collector=None, active=None): policy.authorize( flask.request.context, 'scope:get_state', {'project_id': scope_id or flask.request.context.project_id} ) results = self._storage_state.get_all( identifier=scope_id, scope_key=scope_key, fetcher=fetcher, collector=collector, offset=offset, limit=limit, active=active) if len(results) < 1: raise http_exceptions.NotFound( "No resource found for provided filters.") return { 'results': [{ 'scope_id': r.identifier, 'scope_key': r.scope_key, 'fetcher': r.fetcher, 'collector': r.collector, 'state': r.last_processed_timestamp.isoformat(), 'last_processed_timestamp': r.last_processed_timestamp.isoformat(), 'active': r.active, 'scope_activation_toggle_date': r.scope_activation_toggle_date.isoformat() if r.scope_activation_toggle_date else None } for r in results] } @api_utils.add_input_schema('body', { voluptuous.Exclusive('all_scopes', 'scope_selector'): voluptuous.Boolean(), voluptuous.Exclusive('scope_id', 'scope_selector'): api_utils.MultiQueryParam(str), voluptuous.Optional('scope_key', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('fetcher', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('collector', default=[]): api_utils.MultiQueryParam(str), voluptuous.Optional('last_processed_timestamp'): voluptuous.Coerce(tzutils.dt_from_iso), # This "state" property should be removed in the next release. voluptuous.Optional('state'): voluptuous.Coerce(tzutils.dt_from_iso), }) def put(self, all_scopes=False, scope_id=None, scope_key=None, fetcher=None, collector=None, last_processed_timestamp=None, state=None): policy.authorize( flask.request.context, 'scope:reset_state', {'project_id': scope_id or flask.request.context.project_id} ) if not all_scopes and scope_id is None: raise http_exceptions.BadRequest( "Either all_scopes or a scope_id should be specified.") if not state and not last_processed_timestamp: raise http_exceptions.BadRequest( "Variables 'state' and 'last_processed_timestamp' cannot be " "empty/None. We expect at least one of them.") if state: LOG.warning("The use of 'state' variable is deprecated, and will " "be removed in the next upcomming release. You should " "consider using 'last_processed_timestamp' variable.") results = self._storage_state.get_all( identifier=scope_id, scope_key=scope_key, fetcher=fetcher, collector=collector, ) if len(results) < 1: raise http_exceptions.NotFound( "No resource found for provided filters.") serialized_results = [{ 'scope_id': r.identifier, 'scope_key': r.scope_key, 'fetcher': r.fetcher, 'collector': r.collector, } for r in results] if not last_processed_timestamp: last_processed_timestamp = state self._client.cast({}, 'reset_state', res_data={ 'scopes': serialized_results, 'last_processed_timestamp': last_processed_timestamp.isoformat() }) return {}, 202 @api_utils.add_input_schema('body', { voluptuous.Required('scope_id'): api_utils.SingleQueryParam(str), voluptuous.Optional('scope_key'): api_utils.SingleQueryParam(str), voluptuous.Optional('fetcher'): api_utils.SingleQueryParam(str), voluptuous.Optional('collector'): api_utils.SingleQueryParam(str), voluptuous.Optional('active'): api_utils.SingleQueryParam(bool), }) @api_utils.add_output_schema({ voluptuous.Required('scope_id'): vutils.get_string_type(), voluptuous.Required('scope_key'): vutils.get_string_type(), voluptuous.Required('fetcher'): vutils.get_string_type(), voluptuous.Required('collector'): vutils.get_string_type(), voluptuous.Required('state'): vutils.get_string_type(), voluptuous.Required('active'): bool, voluptuous.Required('scope_activation_toggle_date'): vutils.get_string_type() }) def patch(self, scope_id, scope_key=None, fetcher=None, collector=None, active=None): policy.authorize( flask.request.context, 'scope:patch_state', {'tenant_id': scope_id or flask.request.context.project_id} ) results = self._storage_state.get_all(identifier=scope_id, active=None) if len(results) < 1: raise http_exceptions.NotFound( "No resource found for provided filters.") if len(results) > 1: LOG.debug("Too many resources found with the same scope_id [%s], " "scopes found: [%s].", scope_id, results) raise http_exceptions.NotFound("Too many resources found with " "the same scope_id: %s." % scope_id) scope_to_update = results[0] LOG.debug("Executing update of storage scope: [%s].", scope_to_update) self._storage_state.update_storage_scope(scope_to_update, scope_key=scope_key, fetcher=fetcher, collector=collector, active=active) storage_scopes = self._storage_state.get_all( identifier=scope_id, active=active) update_storage_scope = storage_scopes[0] return { 'scope_id': update_storage_scope.identifier, 'scope_key': update_storage_scope.scope_key, 'fetcher': update_storage_scope.fetcher, 'collector': update_storage_scope.collector, 'state': update_storage_scope.state.isoformat(), 'active': update_storage_scope.active, 'scope_activation_toggle_date': update_storage_scope.scope_activation_toggle_date.isoformat() }
openstack/cloudkitty
cloudkitty/api/v2/scope/state.py
Python
apache-2.0
9,390
from random import randint from ..core.bunk_user import BunkUser from .duel_result import DuelResult """ Class that represents a duel between two bunk_users """ class Duel: def __init__(self, challenger: BunkUser, opponent: BunkUser): self.challenger: BunkUser = challenger self.opponent: BunkUser = opponent self.is_bot_challenge: bool = False # Execute the duel and return the winner def execute(self) -> DuelResult: chal_val: int = randint(0, 100) opnt_val: int = randint(0, 100) winner: BunkUser = None loser: BunkUser = None if chal_val > opnt_val: winner = self.challenger loser = self.opponent else: winner = self.opponent loser = self.challenger result: DuelResult = DuelResult(self.challenger, self.opponent, winner, loser) result.challenger_roll = chal_val result.opponent_roll = opnt_val return result
fugwenna/bunkbot
src/rpg/duel.py
Python
mit
993
# # Copyright 2011 Twitter, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Simple word count example.""" from pycascading.helpers import * @udf_map(produces=['word']) def split_words(tuple): """The function to split the line and return several new tuples. The tuple to operate on is passed in as the first parameter. We are yielding the results in a for loop back. Each word becomes the only field in a new tuple stream, and the string to be split is the 2nd field of the input tuple. """ for word in tuple.get(1).split(): yield [word] def main(): flow = Flow() # The TextLine() scheme produces tuples where the first field is the # offset of the line in the file, and the second is the line as a string. input = flow.source(Hfs(TextLine(), 'pycascading_data/town.txt')) output = flow.tsv_sink('pycascading_data/out') input | split_words | group_by('word', native.count()) | output flow.run(num_reducers=2)
twitter/pycascading
examples/word_count.py
Python
apache-2.0
1,485
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import posixpath from StringIO import StringIO from dmunit import DeviceManagerTestCase class ExecEnvTestCase(DeviceManagerTestCase): def runTest(self): """Exec test with env vars.""" # Push the file localfile = os.path.join('test-files', 'test_script.sh') remotefile = posixpath.join(self.dm.deviceRoot, 'test_script.sh') self.dm.pushFile(localfile, remotefile) # Run the cmd out = StringIO() self.dm.shell(['sh', remotefile], out, env={'THE_ANSWER': 42}) # Rewind the output file out.seek(0) # Make sure first line is 42 line = out.readline() self.assertTrue(int(line) == 42) # Clean up self.dm.removeFile(remotefile)
Yukarumya/Yukarum-Redfoxes
testing/mozbase/mozdevice/sut_tests/test_exec_env.py
Python
mpl-2.0
961
# -*- coding: utf-8 -*- import handlers from django.conf.urls import url, patterns from anaf.core.api.auth import auth_engine from anaf.core.api.doc import documentation_view from anaf.core.api.resource import CsrfExemptResource ad = {'authentication': auth_engine} # events resources eventResource = CsrfExemptResource(handler=handlers.EventHandler, **ad) urlpatterns = patterns('', # Events url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_events_doc"), url(r'^events$', eventResource, name="api_events"), url(r'^event/(?P<object_ptr>\d+)', eventResource, name="api_events"), )
tovmeod/anaf
anaf/events/api/urls.py
Python
bsd-3-clause
723
import logging import os import shutil from typing import Any, Dict, List, Optional from zerver.data_import.import_util import build_attachment, create_converted_data_files class AttachmentHandler: def __init__(self) -> None: self.info_dict: Dict[str, Dict[str, Any]] = dict() def handle_message_data(self, realm_id: int, message_id: int, sender_id: int, attachment: Dict[str, Any], files_dir: str) -> Optional[str]: if not attachment: return None name = attachment['name'] if 'path' not in attachment: logging.info('Skipping HipChat attachment with missing path data: ' + name) return None size = attachment['size'] path = attachment['path'] local_fn = os.path.join(files_dir, path) if not os.path.exists(local_fn): # HipChat has an option to not include these in its # exports, since file uploads can be very large. logging.info('Skipping attachment with no file data: ' + local_fn) return None target_path = os.path.join( str(realm_id), 'HipChatImportAttachment', path, ) if target_path in self.info_dict: logging.info("file used multiple times: " + path) info = self.info_dict[target_path] info['message_ids'].add(message_id) return info['content'] # HipChat provides size info, but it's not # completely trustworthy, so we we just # ask the OS for file details. size = os.path.getsize(local_fn) mtime = os.path.getmtime(local_fn) content = f'[{name}](/user_uploads/{target_path})' info = dict( message_ids={message_id}, sender_id=sender_id, local_fn=local_fn, target_path=target_path, name=name, size=size, mtime=mtime, content=content, ) self.info_dict[target_path] = info return content def write_info(self, output_dir: str, realm_id: int) -> None: attachments: List[Dict[str, Any]] = [] uploads_records: List[Dict[str, Any]] = [] def add_attachment(info: Dict[str, Any]) -> None: build_attachment( realm_id=realm_id, message_ids=info['message_ids'], user_id=info['sender_id'], fileinfo=dict( created=info['mtime'], # minor lie size=info['size'], name=info['name'], ), s3_path=info['target_path'], zerver_attachment=attachments, ) def add_upload(info: Dict[str, Any]) -> None: target_path = info['target_path'] upload_rec = dict( size=info['size'], user_profile_id=info['sender_id'], realm_id=realm_id, s3_path=target_path, path=target_path, content_type=None, ) uploads_records.append(upload_rec) def make_full_target_path(info: Dict[str, Any]) -> str: target_path = info['target_path'] full_target_path = os.path.join( output_dir, 'uploads', target_path, ) full_target_path = os.path.abspath(full_target_path) os.makedirs(os.path.dirname(full_target_path), exist_ok=True) return full_target_path def copy_file(info: Dict[str, Any]) -> None: source_path = info['local_fn'] target_path = make_full_target_path(info) shutil.copyfile(source_path, target_path) logging.info('Start processing attachment files') for info in self.info_dict.values(): add_attachment(info) add_upload(info) copy_file(info) uploads_folder = os.path.join(output_dir, 'uploads') os.makedirs(os.path.join(uploads_folder, str(realm_id)), exist_ok=True) attachment = dict( zerver_attachment=attachments, ) create_converted_data_files(uploads_records, output_dir, '/uploads/records.json') create_converted_data_files(attachment, output_dir, '/attachment.json') logging.info('Done processing attachment files')
timabbott/zulip
zerver/data_import/hipchat_attachment.py
Python
apache-2.0
4,561
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * class Casacore(CMakePackage): """A suite of c++ libraries for radio astronomy data processing.""" homepage = "https://github.com/casacore/casacore" url = "https://github.com/casacore/casacore/archive/v2.4.1.tar.gz" maintainers = ['mpokorny'] version('3.4.0', sha256='31f02ad2e26f29bab4a47a2a69e049d7bc511084a0b8263360e6157356f92ae1') version('3.3.0', sha256='3a714644b908ef6e81489b792cc9b80f6d8267a275e15d38a42a6a5137d39d3d') version('3.2.0', sha256='ae5d3786cb6dfdd7ebc5eecc0c724ff02bbf6929720bc23be43a027978e79a5f') version('3.1.2', sha256='ac94f4246412eb45d503f1019cabe2bb04e3861e1f3254b832d9b1164ea5f281') version('3.1.1', sha256='85d2b17d856592fb206b17e0a344a29330650a4269c80b87f8abb3eaf3dadad4') version('3.1.0', sha256='a6adf2d77ad0d6f32995b1e297fd88d31ded9c3e0bb8f28966d7b35a969f7897') version('3.0.0', sha256='6f0e68fd77b5c96299f7583a03a53a90980ec347bff9dfb4c0abb0e2933e6bcb') version('2.4.1', sha256='58eccc875053b2c6fe44fe53b6463030ef169597ec29926936f18d27b5087d63') depends_on('cmake@3.7.1:', type='build') variant('openmp', default=False, description='Build OpenMP support') variant('shared', default=True, description='Build shared libraries') variant('readline', default=True, description='Build readline support') # see note below about the reason for disabling the "sofa" variant # variant('sofa', default=False, description='Build SOFA support') variant('adios2', default=False, description='Build ADIOS2 support') variant('fftpack', default=False, description='Build FFTPack') variant('hdf5', default=False, description='Build HDF5 support') variant('python', default=False, description='Build python support') # Force dependency on readline in v3.2 and earlier. Although the # presence of readline is tested in CMakeLists.txt, and casacore # can be built without it, there's no way to control that # dependency at build time; since many systems come with readline, # it's better to explicitly depend on it here always. depends_on('readline', when='@:3.2.0') depends_on('readline', when='+readline') depends_on('flex', type='build') depends_on('bison', type='build') depends_on('blas') depends_on('lapack') depends_on('cfitsio') depends_on('wcslib@4.20:+cfitsio') depends_on('fftw@3.0.0: precision=float,double', when='@3.4.0:') depends_on('fftw@3.0.0: precision=float,double', when='~fftpack') # SOFA dependency suffers the same problem in CMakeLists.txt as readline; # force a dependency when building unit tests depends_on('sofa-c', type='test') depends_on('hdf5', when='+hdf5') depends_on('adios2+mpi', when='+adios2') depends_on('mpi', when='+adios2') depends_on('python@2.6:', when='+python') depends_on('boost+python', when='+python') depends_on('py-numpy', when='+python') def cmake_args(self): args = [] spec = self.spec args.append(self.define_from_variant('ENABLE_SHARED', 'shared')) args.append(self.define_from_variant('USE_OPENMP', 'openmp')) args.append(self.define_from_variant('USE_READLINE', 'readline')) args.append(self.define_from_variant('USE_HDF5', 'hdf5')) args.append(self.define_from_variant('USE_ADIOS2', 'adios2')) args.append(self.define_from_variant('USE_MPI', 'adios2')) if spec.satisfies('+adios2'): args.append(self.define('ENABLE_TABLELOCKING', False)) # fftw3 is required by casacore starting with v3.4.0, but the # old fftpack is still available. For v3.4.0 and later, we # always require FFTW3 dependency with the optional addition # of FFTPack. In older casacore versions, only one of FFTW3 or # FFTPack can be selected. if spec.satisfies('@3.4.0:'): if spec.satisfies('+fftpack'): args.append('-DBUILD_FFTPACK_DEPRECATED=YES') args.append(self.define('USE_FFTW3', True)) else: args.append(self.define('USE_FFTW3', spec.satisfies('~fftpack'))) # Python2 and Python3 binding if spec.satisfies('~python'): args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=NO']) elif spec.satisfies('^python@3.0.0:'): args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=YES']) else: args.extend(['-DBUILD_PYTHON=YES', '-DBUILD_PYTHON3=NO']) args.append('-DBUILD_TESTING=OFF') return args def patch(self): # Rely on CMake ability to find hdf5, available since CMake 3.7.X os.remove('cmake/FindHDF5.cmake')
LLNL/spack
var/spack/repos/builtin/packages/casacore/package.py
Python
lgpl-2.1
4,875
from __future__ import absolute_import, unicode_literals import os import os.path from freight.constants import PROJECT_ROOT from freight.exceptions import CommandError class UnknownRevision(CommandError): pass class Vcs(object): ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect') def __init__(self, workspace, url, username=None): self.url = url self.username = username self.workspace = workspace self._path_exists = None @property def path(self): return self.workspace.path def get_default_env(self): return {} def run(self, command, capture=False, workspace=None, *args, **kwargs): if workspace is None: workspace = self.workspace if not self.exists(workspace=workspace): kwargs.setdefault('cwd', None) env = kwargs.pop('env', {}) for key, value in self.get_default_env().iteritems(): env.setdefault(key, value) env.setdefault('FREIGHT_SSH_REPO', self.url) kwargs['env'] = env if capture: handler = workspace.capture else: handler = workspace.run rv = handler(command, *args, **kwargs) if isinstance(rv, basestring): return rv.strip() return rv def exists(self, workspace=None): if workspace is None: workspace = self.workspace return os.path.exists(workspace.path) def clone_or_update(self): if self.exists(): self.update() else: self.clone() def clone(self): raise NotImplementedError def update(self): raise NotImplementedError def checkout(self, ref): raise NotImplementedError def describe(self, ref): """ Given a `ref` return the fully qualified version. """ raise NotImplementedError def get_default_revision(self): raise NotImplementedError
jkimbo/freight
freight/vcs/base.py
Python
apache-2.0
1,988
try: from setuptools import setup except ImportError: from distutils.core import setup #TODO add requirements, author_email config = { 'description' : 'Pattern Detection and Recognition using Deep Learning', 'author' : ['jalFaizy', 'manasikhapke', 'shraddhabgunjal'], 'url' : '', 'author_email' : 'faizankshaikh@gmail.com', 'version' : '0.1', 'license' : 'MIT' 'install_requires' : [], 'packages' : [''], 'scripts' : [], 'name' : 'Text Spotting' } setup(**config)
faizankshaikh/Project
project/setup.py
Python
mit
525
from django.contrib.auth.models import User from django.test import TestCase import json from mixer.backend.django import mixer from catalogue.models import Record, Application class RecordAPITestCase(TestCase): def setUp(self): # Generate a test user for endpoint responses. self.testuser = User.objects.create_user( username='testuser', email='user@dpaw.wa.gov.au.com', password='pass') # Log in testuser by default. self.client.login(username='testuser', password='pass') # Generate some Record objects. mixer.cycle(8).blend(Record, title=mixer.RANDOM) def test_list(self): url = '/catalogue/api/records/' params = {'format': 'json'} resp = self.client.get(url, data=params) self.assertEqual(resp.status_code, 200) def test_list_filter(self): url = '/catalogue/api/records/' params = {'format': 'json'} resp = self.client.get(url, data=params) unfiltered = json.loads(resp.content.decode('utf-8')) records = Record.objects.all() rec1, rec2 = records[0], records[1] # Generate an Application app = mixer.blend(Application, name='test') app.records.add(rec1) params = {'format': 'json', 'application__name': 'test'} resp = self.client.get(url, data=params) self.assertEqual(resp.status_code, 200) self.assertContains(resp, rec1.title) self.assertNotContains(resp, rec2.title) # The filtered response will be shorter than the unfiltered one. filtered = json.loads(resp.content.decode('utf-8')) self.assertTrue(len(unfiltered) > len(filtered))
rockychen-dpaw/oim-cms
catalogue/test_api.py
Python
apache-2.0
1,689
#=============================================================================== # Submodules #=============================================================================== from empty import Empty from plane import Plane from sphere import Sphere from box import Box from tube import Tube from cylinder import Cylinder from mesh import Mesh from ray import Ray
kralf/morsel
python/lib/morsel/nodes/panda/solids/__init__.py
Python
gpl-2.0
364
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'BackupPolicy' db.create_table(u'backup_backuppolicy', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('source_ip', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('source_path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('dest_share', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('notify_email', self.gf('django.db.models.fields.CharField')(unique=True, max_length=4096)), ('start', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)), ('frequency', self.gf('django.db.models.fields.IntegerField')()), ('num_retain', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('backup', ['BackupPolicy']) def backwards(self, orm): # Deleting model 'BackupPolicy' db.delete_table(u'backup_backuppolicy') models = { 'backup.backuppolicy': { 'Meta': {'object_name': 'BackupPolicy'}, 'dest_share': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'frequency': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'notify_email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}), 'num_retain': ('django.db.models.fields.IntegerField', [], {}), 'source_ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'source_path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'start': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}) } } complete_apps = ['backup']
kamal-gade/rockstor-core
src/rockstor/backup/migrations/0001_initial.py
Python
gpl-3.0
2,438
#!/usr/bin/env python # File: transform.py import sys import os import pathlib import magic import multiprocessing as mp import time from urllib.parse import urlparse import bs4 import tqdm import pygments import pygments.lexers import pygments.formatters from selectolax.parser import HTMLParser LEXER = pygments.lexers.get_lexer_by_name('python', stripall=True) FORMATTER = pygments.formatters.HtmlFormatter() def _read(fname): if 'gzip compressed' in magic.from_file(fname): import gzip f = gzip.open(fname) else: f = open(fname, 'rb') html = f.read().decode('utf-8') f.close() return html def _get_level(fname): dirname = os.path.dirname(fname) cnt = 0 while not os.path.isfile(os.path.join(dirname, 'main.css')): dirname = os.path.join(dirname, '..') cnt += 1 return cnt def process(fname): if not fname.endswith('.html'): return html = _read(fname) level = _get_level(fname) IGNORE = [ 'header', 'footer', 'devsite-book-nav', 'nav', 'devsite-header', 'devsite-toc', 'devsite-content-footer', 'devsite-page-rating', 'script' ] tree = HTMLParser(html) tree.strip_tags(IGNORE) for node in tree.css("div.devsite-article-meta"): node.decompose() # remove the TF2 button buttons = tree.css_first("table.tfo-notebook-buttons") if buttons: for node in buttons.css("td"): if "TensorFlow 2" in node.text(): node.decompose() break # point to the new css allcss = tree.css("link[rel='stylesheet']") if allcss: css = allcss[0] css.attrs['href'] = ''.join(['../'] * level) + 'main.css' for k in allcss[1:]: k.decompose() # add method/class declarations title_node = tree.css_first("h1.devsite-page-title") if title_node: # mark method method_node = tree.css_first('h2#methods') if method_node: # print("Find class:", title) title_node.attrs['class'] = 'dash-class' title = title_node.text().strip() children = list(method_node.parent.iter()) for method_idx, node in enumerate(children): if node.attrs.get('id') == 'methods': break for k in range(method_idx, len(children) - 2): if children[k].tag == 'h3' and children[k + 2].tag == 'pre': # is a method: children[k].attrs['class'] = 'dash-method' # print("Find method ", children[k].text()) name_node = children[k].child.child name_node.replace_with(title + "." + name_node.text()) else: title_node.attrs['class'] = 'dash-function' # Change all self-referential links to relative ROOT = './www.tensorflow.org/versions/r1.15/' # change it when version is changed ANCHOR = '/api_docs/python' for link in tree.css('a'): href = link.attrs.get('href', '') href = urlparse(href).path if ANCHOR in href: prefix_url = href.find(ANCHOR) link_fname = os.path.join(ROOT, href[prefix_url + 1:]) if not os.path.isfile(link_fname): link_fname += ".html" if os.path.isfile(link_fname): relpath = os.path.relpath(link_fname, start=os.path.dirname(fname)) link.attrs['href'] = relpath soup = bs4.BeautifulSoup(tree.html, 'lxml') for pycode in soup.findAll('pre', attrs={"class": "lang-python"}): code = pycode.code.text code = pygments.highlight(code, LEXER, FORMATTER) # https://github.com/rushter/selectolax/issues/26 pycode.replaceWith(bs4.BeautifulSoup(code, 'lxml')) MATHJAX = """ <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> """ # mathjax only works with internet head = soup.findAll('head')[0] mathjax = bs4.BeautifulSoup(MATHJAX, 'lxml').findAll('script') head.extend(mathjax) with open(fname, 'w') as f: f.write(str(soup)) if __name__ == '__main__': path = os.path.abspath(sys.argv[1]) if os.path.isfile(path): process(path) elif os.path.isdir(path): files = pathlib.Path(path).glob("**/*.html") files = [os.fspath(x) for x in files] pool = mp.Pool(int(os.cpu_count() * 1.5)) for _ in tqdm.tqdm( pool.imap_unordered(process, files, chunksize=20), total=len(files)): pass pool.close()
ppwwyyxx/dash-docset-tensorflow
transform.py
Python
apache-2.0
4,728
import sys sys.path.insert(0, "../..") from Cube import * if sys.version_info[0] >= 3: raw_input = input #Define global variables globalVarCount = {} globalVarCount['bool'] = 0 globalVarCount['int'] = 0 globalVarCount['float'] = 0 globalVarCount['char'] = 0 localVarCount = {} localVarCount['bool'] = 0 localVarCount['int'] = 0 localVarCount['float'] = 0 localVarCount['char'] = 0 tempVarCount = {} tempVarCount['bool'] = 0 tempVarCount['int'] = 0 tempVarCount['float'] = 0 tempVarCount['char'] = 0 constVarCount = {} constVarCount['bool'] = 0 constVarCount['int'] = 0 constVarCount['float'] = 0 constVarCount['char'] = 0 quadruples = [] operandStack = [] operationStack = [] typeStack = [] constants = {'true':{'value':True, 'type':BOOL}, 'false':{'value':False, 'type':BOOL}} varGlobal = {} varLocal = {} funcGlobal = {} funcArguments = [] variableType = None funcType = None lastVarName = None lastFuncName = None funcTypeNext = False scope = 'global' # Tokens reserved = { 'module' : 'MODULE', 'main' : 'MAIN', 'func' : 'FUNC', 'print' : 'PRINT', 'read' : 'READ', 'if' : 'IF', 'else' : 'ELSE', 'elseif' : 'ELSEIF', 'true' : 'TRUE', 'false' : 'FALSE', 'void' : 'VOID', 'while' : 'WHILE', 'bool' : 'TBOOL', 'int' : 'TINT', 'float' : 'TFLOAT', 'char' : 'TCHAR', 'return' : 'RETURN' } tokens = [ 'ASSIGN', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'LESSTHAN', 'GREATERTHAN', 'LESSTHANEQ', 'GREATERTHANEQ', 'EQUAL', 'DIFFERENT', 'OR', 'AND', 'LEFTBKT', 'RIGHTBKT', 'LEFTSQBKT', 'RIGHTSQBKT', 'LEFTPAREN', 'RIGHTPAREN', 'COMMA', 'SEMICOLON', 'ID', 'NUMBERINT', 'NUMBERFLT', 'STRING' ] + list(reserved.values()) t_ASSIGN = r'=' t_PLUS = r'\+' t_MINUS = r'\-' t_TIMES = r'\*' t_DIVIDE = r'\/' t_LESSTHAN = r'\<' t_GREATERTHAN = r'\>' t_LESSTHANEQ = r'\<=' t_GREATERTHANEQ = r'\>=' t_EQUAL = r'==' t_DIFFERENT = r'!=' t_OR = r'\|\|' t_AND = r'&&' t_LEFTBKT = r'\{' t_RIGHTBKT = r'\}' t_LEFTSQBKT = r'\[' t_RIGHTSQBKT = r'\]' t_LEFTPAREN = r'\(' t_RIGHTPAREN = r'\)' t_COMMA = r'\,' t_SEMICOLON = r'\;' t_NUMBERINT = r'[0-9]+' t_NUMBERFLT = r'[0-9]+\.[0-9]+' t_ignore = " \t" def t_ID(t): r'[a-z_][a-zA-Z0-9_]*' t.type = reserved.get(t.value, 'ID') return t def t_STRING(t): r'\".*\"' return t def t_newline(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) # Build the lexer import ply.lex as lex lex.lex() start = 'moduleg' # For using empty def p_empty(p): '''empty :''' pass def p_moduleg(p): '''moduleg : MODULE ID LEFTBKT module1 maing RIGHTBKT''' def p_module1(p): '''module1 : empty | vars module1 | funcg module1''' def p_vars4(p): '''vars4 : constant | PLUS constant | MINUS constant''' def p_vars3(p): '''vars3 : empty | LEFTSQBKT cteN RIGHTSQBKT convertVariableToArray''' def p_vars2(p): '''vars2 : empty | COMMA vars1''' def p_vars1(p): '''vars1 : ID addVariable vars3 ASSIGN vars4''' def p_vars(p): '''vars : type vars1 vars2 SEMICOLON''' def p_func2(p): '''func2 : empty | RETURN expression SEMICOLON | statute func2''' def p_func1(p): '''func1 : VOID saveFuncTypeVoid ID saveFuncName LEFTPAREN arguments RIGHTPAREN LEFTBKT func2 RIGHTBKT | funcTypeNext type ID saveFuncName LEFTPAREN arguments RIGHTPAREN LEFTBKT func2 RIGHTBKT''' global varLocal global funcArguments addFunction(lastFuncName, funcType, funcArguments) #print('local vars: %s' % varLocal) varLocal = {} funcArguments = [] def p_funcg(p): '''funcg : FUNC changeToLocalScope func1 changeToGlobalScope''' def p_maing(p): '''maing : MAIN changeToLocalScope block''' print(quadruples) # print('local vars: %s' % varLocal) # print('global vars: %s' % varGlobal) # print('functions: %s' % funcGlobal) def p_block1(p): '''block1 : empty | statute block1''' def p_block(p): '''block : LEFTBKT block1 RIGHTBKT''' def p_write(p): '''write : PRINT LEFTPAREN cte RIGHTPAREN SEMICOLON''' def p_readg(p): '''readg : READ LEFTPAREN ID RIGHTPAREN SEMICOLON''' def p_expression1(p): '''expression1 : empty | GREATERTHANEQ saveOperation exp expressionEnded | LESSTHANEQ saveOperation exp expressionEnded | GREATERTHAN saveOperation exp expressionEnded | LESSTHAN saveOperation exp expressionEnded | EQUAL saveOperation exp expressionEnded | DIFFERENT saveOperation exp expressionEnded | OR saveOperation exp expressionEnded | AND saveOperation exp expressionEnded''' def p_expression(p): '''expression : exp expression1''' def p_exp1(p): '''exp1 : empty | PLUS saveOperation exp exp1 | MINUS saveOperation exp exp1''' def p_exp(p): '''exp : term exp1''' def p_term1(p): '''term1 : empty | TIMES saveOperation term term1 | DIVIDE saveOperation term term1''' def p_term(p): '''term : factor term1 termEnded''' def p_factor1(p): '''factor1 : constant | PLUS constant | MINUS constant''' global operandStack print(operandStack) operand = {} if len(p) == 3: operand = getOperand(p[2]) # p[0] = p[1] + str(p[2]) # # Verify PLUS & MINUS are used only on INT & FLOATS # if ((isinstance(p[2], int)) or (isinstance(p[2], float))): # if (p[1] == '-'): # cuadruplos.pOperandos.append(p[2]*-1) # else: # cuadruplos.pOperandos.append(p[2]) # # Insert Type of varcte to pTipos # if isinstance(p[2], int): # cuadruplos.pTipos.append(INT) # elif isinstance(p[2], float): # cuadruplos.pTipos.append(FLOAT) # else: # print("Operator mismatch you have a %s before a type: %s at line: %s" %(p[1], type(p[2]), lexer.lineno)) # exit(1) else: operand = getOperand(p[1]) # p[0] = p[1] # print("VARCTE operando: %s" %(str(p[1]))) # cuadruplos.pOperandos.append(p[1]) # print("operadores VARCTE encontrada: %s" %(str(cuadruplos.pOperandos))) # # Insert Type of varcte to pTipos # if isinstance(p[1], int): # cuadruplos.pTipos.append(INT) # elif isinstance(p[1], float): # cuadruplos.pTipos.append(FLOAT) # elif isinstance(p[1], bool): # cuadruplos.pTipos.append(BOOL) # else: # if globalVars.has_key(p[1]): # cuadruplos.pTipos.append(globalVars[p[1]][0]) # elif function_ptr != "GLOBAL" and functionsDir[function_ptr][1].has_key(p[1]): # cuadruplos.pTipos.append(functionsDir[function_ptr][1][p[1]][0]) # else: # cuadruplos.pTipos.append(STRING) operandStack.append(operand) typeStack.append(variableType) def p_factor(p): '''factor : LEFTPAREN addFakeBottom expression RIGHTPAREN removeFakeBottom factorEnded | factor1 factorEnded''' def p_statute(p): '''statute : call | assignement | vars | condition | readg | write | cycle''' def p_cycle(p): '''cycle : WHILE LEFTPAREN expression RIGHTPAREN block''' def p_call2(p): '''call2 : empty | COMMA exp call2''' def p_call1(p): '''call1 : empty | exp call2''' def p_call(p): '''call : ID LEFTPAREN call1 RIGHTPAREN SEMICOLON''' def p_arguments1(p): '''arguments1 : empty | COMMA type ID addArgument arguments1''' def p_arguments(p): '''arguments : empty | type ID addArgument arguments1''' def p_constant1(p): '''constant1 : empty | COMMA cte constant1''' def p_constant(p): '''constant : cte | LEFTSQBKT cte constant1 RIGHTSQBKT''' if len(p) == 2: p[0] = p[1] def p_cte(p): '''cte : ID | varArr | TRUE | FALSE | cteN | cteS''' p[0] = p[1] def p_cteN(p): '''cteN : NUMBERINT addConstant | NUMBERFLT addConstant''' p[0] = p[1] def p_cteS(p): '''cteS : STRING''' def p_condition2(p): '''condition2 : empty | ELSE block''' def p_condition1(p): '''condition1 : empty | ELSEIF LEFTPAREN expression RIGHTPAREN block condition1''' def p_condition(p): '''condition : IF LEFTPAREN expression RIGHTPAREN block condition1 condition2''' def p_assignement2(p): '''assignement2 : call | expression''' def p_assignement1(p): '''assignement1 : ID | varArr''' def p_assignement(p): '''assignement : assignement1 ASSIGN assignement2 SEMICOLON''' def p_varArr(p): '''varArr : ID LEFTSQBKT exp RIGHTSQBKT''' def p_type(p): '''type : TBOOL addType | TINT addType | TFLOAT addType | TCHAR addType''' # extra grammar def p_convertVariableToArray(p): '''convertVariableToArray : empty''' convertVariableToArray() def p_addVariable(p): '''addVariable : empty''' global lastVarName lastVarName = p[-1] variableName = lastVarName addVariable(variableName, variableType) def p_addConstant(p): '''addConstant : empty''' constType = -1 cte = num(p[-1]) if type(cte) is int: constType = INT else: constType = FLOAT global constants if not str(cte) in constants.keys(): constants[str(cte)] = {'value':cte, 'type':constType} def p_saveFuncName(p): '''saveFuncName : empty''' global lastFuncName lastFuncName = p[-1] def p_funcTypeNext(p): '''funcTypeNext : empty''' global funcTypeNext funcTypeNext = True def p_saveFuncTypeVoid(p): '''saveFuncTypeVoid : empty''' global funcType funcType = VOID def p_addArgument(p): '''addArgument : empty''' global lastVarName global funcArguments lastVarName = p[-1] variableName = lastVarName addVariable(variableName, variableType) funcArguments.append(varLocal[variableName]) def p_addType(p): '''addType : empty''' global variableType global funcTypeNext global funcType if funcTypeNext: funcType = getTypeValue(p[-1]) else: variableType = getTypeValue(p[-1]) funcTypeNext = False def p_saveOperation(p): '''saveOperation : empty''' global operationStack operationStack.append(p[-1]) def p_termEnded(p): '''termEnded : empty''' global operationStack global operandStack if len(operationStack) > 0: if operationStack[-1] == '+' or operationStack[-1] == '-' or operationStack[-1] == '||': operand2 = operandStack.pop() operation = operationStack.pop() operand1 = operandStack.pop() print(operand1) print(operation) print(operand2) resultType = getResultType(operand1['type']%10, operation, operand2['type']%10) if resultType > 0: addQuadruple(operation, operand1, operand2, 0) typeStack.append(resultType) operandStack.append({'value':0, 'type':resultType}) else: print('Error: Type mismatch') exit(1) def p_factorEnded(p): '''factorEnded : empty''' global operationStack global operandStack if len(operationStack) > 0: if operationStack[-1] == '*' or operationStack[-1] == '/' or operationStack[-1] == '&&': operand1 = operandStack.pop() operation = operationStack.pop() operand2 = operandStack.pop() print(operand1) print(operation) print(operand2) resultType = getResultType(operand1['type']%10, operation, operand2['type']%10) if resultType > 0: addQuadruple(operation, operand1, operand2, 0) typeStack.append(resultType) operandStack.append({'value':0, 'type':resultType}) else: print('Error: Type mismatch') exit(1) def p_expressionEnded(p): '''expressionEnded : empty''' global operationStack global operandStack if len(operationStack) > 0: if operationStack[-1] == '<' or operationStack[-1] == '>' or operationStack[-1] == '<=' or operationStack[-1] == '>=' or operationStack[-1] == '==' or operationStack[-1] == '!=': operand1 = operandStack.pop() operation = operationStack.pop() operand2 = operandStack.pop() print(operand1) print(operation) print(operand2) resultType = getResultType(operand1['type']%10, operation, operand2['type']%10) if resultType > 0: addQuadruple(operation, operand1, operand2, 0) typeStack.append(resultType) operandStack.append({'value':0, 'type':resultType}) else: print('Type mismatch') def p_addFakeBottom(p): '''addFakeBottom : empty''' global operationStack operationStack.append('(') def p_removeFakeBottom(p): '''removeFakeBottom : empty''' global operationStack print(operationStack) operationStack.pop() def p_changeToLocalScope(p): '''changeToLocalScope : empty''' global scope scope = 'local' def p_changeToGlobalScope(p): '''changeToGlobalScope : empty''' global scope scope = 'global' def p_error(p): if p: print("Syntax error at '%s'" % p)#p.value) else: print("Syntax error at EOF") exit(1) import ply.yacc as yacc yacc.yacc() #Functions def addVariable(variable, varType): global varGlobal global varLocal if scope == 'global': if not variable in varGlobal.keys(): varGlobal[variable] = {'name':variable, 'type':varType} else: print("Variable error : Variable is already declared globally") exit(1) else: if not variable in varLocal.keys(): varLocal[variable] = {'name':variable, 'type':varType} else: print("Variable error : Variable is already declared locally") exit(1) def convertVariableToArray(): global varGlobal global varLocal if scope == 'global': varGlobal[lastVarName]['type'] *= 11 else: varLocal[lastVarName]['type'] *= 11 def addFunction(name, funType, parameters): global funcGlobal if not name in funcGlobal.keys(): funcGlobal[name] = {'name':name, 'type':funType, 'parameters':parameters} else: print("Function error : Function is already declared") exit(1) def addQuadruple(operation, var1, var2, result): global quadruples quadruples.append({'op':operation, 'var1':var1, 'var2':var2, 'result':result}) def num(s): try: return int(s) except ValueError: return float(s) def getOperand(key): if key in constants.keys(): return constants[key] elif key in varLocal.keys(): return varLocal[key] elif key in varGlobal.keys(): return varGlobal[key] # Main if __name__ == '__main__': # Check for file if (len(sys.argv) > 1): file = sys.argv[1] # Open file try: f = open(file, 'r') data = f.read() f.close() # Parse the data if (yacc.parse(data, tracking = True) == 'OK'): print(dirProc); except EOFError: print(EOFError) else: print('File missing') while 1: try: s = raw_input('') except EOFError: break if not s: continue yacc.parse(s)
sanchezz93/Giga-Compiler
Entrega 4/Giga.py
Python
mit
13,943
# -*- coding: utf-8 -*- # # PySPED - Python libraries to deal with Brazil's SPED Project # # Copyright (C) 2010-2012 # Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # PySPED - Bibliotecas Python para o # SPED - Sistema Público de Escrituração Digital # # Copyright (C) 2010-2012 # Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br> # # Este programa é um software livre: você pode redistribuir e/ou modificar # este programa sob os termos da licença GNU Affero General Public License, # publicada pela Free Software Foundation, em sua versão 3 ou, de acordo # com sua opção, qualquer versão posterior. # # Este programa é distribuido na esperança de que venha a ser útil, # porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de # COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a # GNU Affero General Public License para mais detalhes. # # Você deve ter recebido uma cópia da GNU Affero General Public License # juntamente com este programa. Caso esse não seja o caso, acesse: # <http://www.gnu.org/licenses/> # from __future__ import (division, print_function, unicode_literals, absolute_import) from builtins import str from pysped.xml_sped import * from pysped.cte.leiaute import ESQUEMA_ATUAL_VERSAO_300 as ESQUEMA_ATUAL from .modais_300 import Multimodal, Duto, Ferrov, Aquav, Aereo, Rodo import os DIRNAME = os.path.dirname(__file__) class AutXML(XMLNFe): def __init__(self): super(AutXML, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , tamanho=[ 0, 14], raiz='//autXML', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , tamanho=[11, 11], raiz='//autXML', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<autXML>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += '</autXML>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo xml = property(get_xml, set_xml) class InfCTeAnu(XMLNFe): def __init__(self): super(InfCTeAnu, self).__init__() self.chCte = TagCaracter(nome='chCte', tamanho=[ 44, 44], raiz='//infCTeAn', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dEmi = TagData(nome='dEmi', raiz='//infCTeAn', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.chCte.valor or self.dEmi.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infCteAnu>' xml += self.chCte.xml xml += self.dEmi.xml xml += '</infCteAnu>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chCte.xml = arquivo self.dEmi.xml = arquivo xml = property(get_xml, set_xml) class InfCTeComp(XMLNFe): def __init__(self): super(InfCTeComp, self).__init__() self.chCTe = TagCaracter(nome='chCTe', tamanho=[ 44, 44], raiz='//infCteComp', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.chCTe.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infCteComp>' xml += self.chCTe.xml xml += '</infCteComp>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chCTe.xml = arquivo xml = property(get_xml, set_xml) class InfCTeMultimodal(XMLNFe): def __init__(self): super(InfCTeMultimodal, self).__init__() self.chCTeMultimodal = TagCaracter(nome='chCTeMultimodal', tamanho=[ 44, 44], raiz='//infCTeMultimodal', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<infCTeMultimodal>' xml += self.chCTeMultimodal.xml xml += '</infCTeMultimodal>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chCTeMultimodal.xml = arquivo xml = property(get_xml, set_xml) class InfServVinc(XMLNFe): def __init__(self): super(InfServVinc, self).__init__() self.infCTeMultimodal = [] def get_xml(self): if not (len(self.infCTeMultimodal)): return '' xml = XMLNFe.get_xml(self) xml += '<infServVinc>' for i in self.infCTeMultimodal: xml += i.xml xml += '</infServVinc>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.infCTeMultimodal = self.le_grupo('//CTe/infCte/infCTeNorm/infServVinc/infCTeMultimodal', InfCTeMultimodal, sigla_ns='cte') xml = property(get_xml, set_xml) class InfGlobalizado(XMLNFe): def __init__(self): super(InfGlobalizado, self).__init__() self.xObs = TagCaracter(nome='xObs' , tamanho=[ 15, 256], raiz='//infGlobalizado', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.xObs.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infGlobalizado>' xml += self.xObs.xml xml += '</infGlobalizado>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xObs.xml = arquivo xml = property(get_xml, set_xml) class RefNF(XMLNFe): def __init__(self): super(RefNF, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , tamanho=[ 0, 14], raiz='//refNF', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , tamanho=[11, 11], raiz='//refNF', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.mod = TagCaracter(nome='mod' , tamanho=[ 2, 2, 2], raiz='//refNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.serie = TagCaracter(nome='serie' , tamanho=[ 1, 3, 1], raiz='//refNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.subserie = TagCaracter(nome='subserie', tamanho=[1, 3], raiz='//refNF', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.nro = TagCaracter(nome='nro', tamanho=[ 1, 6] , raiz='//refNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.valor = TagDecimal(nome='valor', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//refNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dEmi = TagData(nome='dEmi', raiz='//refNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<refNF>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.mod.xml xml += self.serie.xml xml += self.subserie.xml xml += self.nro.xml xml += self.valor.xml xml += self.dEmi.xml xml += '</refNF>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.mod.xml = arquivo self.serie.xml = arquivo self.subserie.xml = arquivo self.nro.xml = arquivo self.valor.xml = arquivo self.dEmi.xml = arquivo xml = property(get_xml, set_xml) class TomaICMS(XMLNFe): def __init__(self): super(TomaICMS, self).__init__() self.refNFe = TagCaracter(nome='refNFe', tamanho=[44, 44], raiz='//tomaICMS', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.refNF = RefNF() self.refCte = TagCaracter(nome='refCte', tamanho=[44, 44], raiz='//tomaICMS', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) def get_xml(self): if not (self.refNFe.valor and self.refNF.xml and self.refCte.valor): return '' xml = XMLNFe.get_xml(self) xml += '<tomaICMS>' xml += self.refNFe.xml xml += self.refNF.xml xml += self.refCte.xml xml += '</tomaICMS>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.refNFe.xml = arquivo self.refNF.xml = arquivo self.refCte.xml = arquivo xml = property(get_xml, set_xml) class InfCTeSub(XMLNFe): def __init__(self): super(InfCTeSub, self).__init__() self.chCte = TagCaracter(nome='chCte', tamanho=[44, 44], raiz='//infCteSub', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.refCteAnu = TagCaracter(nome='refCteAn', tamanho=[44, 44], raiz='//infCteSub', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.tomaICMS = TomaICMS() #self.indAlteraToma = TagInteiro(nome='indAlteraToma', tamanho=[0, 1], raiz='//infCteSub', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) def get_xml(self): if not (self.chCte.valor) or not (self.refCteAnu.valor and self.tomaICMS.xml): return '' xml = XMLNFe.get_xml(self) xml += '<infCteSub>' xml += self.chCte.xml xml += self.refCteAnu.xml xml += self.tomaICMS.xml #xml += self.indAlteraToma.xml xml += '</infCteSub>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chCte.xml = arquivo self.refCteAnu.xml = arquivo self.tomaICMS.xml = arquivo #self.indAlteraToma.xml = arquivo xml = property(get_xml, set_xml) class Dup(XMLNFe): def __init__(self): super(Dup, self).__init__() self.nDup = TagCaracter(nome='nDup', tamanho=[1, 60], raiz='//dup', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dVenc = TagData(nome='dVenc', raiz='//dup', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vDup = TagDecimal(nome='vDup', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//dup', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.nDup.valor or self.dVenc.valor or self.vDup.valor): return '' xml = XMLNFe.get_xml(self) xml += '<dup>' xml += self.nDup.xml xml += self.dVenc.xml xml += self.vDup.xml xml += '</dup>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.nDup.xml = arquivo self.dVenc.xml = arquivo self.vDup.xml = arquivo xml = property(get_xml, set_xml) class Fat(XMLNFe): def __init__(self): super(Fat, self).__init__() self.nFat = TagCaracter(nome='nFat', tamanho=[1, 60], raiz='//CTe/infCte/infCTeNorm/cobr/fat', obrigatorio=False) self.vOrig = TagDecimal(nome='vOrig', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/infCTeNorm/cobr/fat', obrigatorio=False) self.vDesc = TagDecimal(nome='vDesc', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/infCTeNorm/cobr/fat', obrigatorio=False) self.vLiq = TagDecimal(nome='vLiq' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/infCTeNorm/cobr/fat', obrigatorio=False) def get_xml(self): if not (self.nFat.valor or self.vOrig.valor or self.vDesc.valor or self.vLiq.valor): return '' xml = XMLNFe.get_xml(self) xml += '<fat>' xml += self.nFat.xml xml += self.vOrig.xml xml += self.vDesc.xml xml += self.vLiq.xml xml += '</fat>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.nFat.xml = arquivo self.vOrig.xml = arquivo self.vDesc.xml = arquivo self.vLiq.xml = arquivo xml = property(get_xml, set_xml) class Cobr(XMLNFe): def __init__(self): super(Cobr, self).__init__() self.fat = Fat() self.dup = [] def get_xml(self): if not (self.fat.xml or len(self.dup)): return '' xml = XMLNFe.get_xml(self) xml += '<cobr>' xml += self.fat.xml for d in self.dup: xml += d.xml xml += '</cobr>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.fat.xml = arquivo self.dup = self.le_grupo('//CTe/infCte/infCTeNorm/cobr/dup', Dup, sigla_ns='cte') xml = property(get_xml, set_xml) class VeicNovos(XMLNFe): def __init__(self): super(VeicNovos, self).__init__() self.chassi = TagCaracter(nome='chassi', tamanho=[17, 17, 17], raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.cCor = TagCaracter(nome='cCor', tamanho=[1, 4] , raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.xCor = TagCaracter(nome='xCor', tamanho=[1, 40] , raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.cMod = TagCaracter(nome='cMod', tamanho=[1, 6] , raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vUnit = TagDecimal(nome='vUnit', tamanho=[1 , 13, 1], decimais=[0, 2, 2], raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vFrete = TagDecimal(nome='vFrete', tamanho=[1 , 13, 1], decimais=[0, 2, 2], raiz='//veicNovos', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.chassi.valor or self.cCor.valor or self.xCor.valor or self.cMod.valor or self.vUnit.valor or self.vFrete.valor): return '' xml = XMLNFe.get_xml(self) xml += '<veicNovos>' xml += self.chassi.xml xml += self.cCor.xml xml += self.xCor.xml xml += self.cMod.xml xml += self.vUnit.xml xml += self.vFrete.xml xml += '</veicNovos>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chassi.xml = arquivo self.cCor.xml = arquivo self.xCor.xml = arquivo self.cMod.xml = arquivo self.vUnit.xml = arquivo self.vFrete.xml = arquivo xml = property(get_xml, set_xml) class InfModal(XMLNFe): def __init__(self): super(InfModal, self).__init__() self.versaoModal = TagDecimal(nome='infModal', propriedade='versaoModal', raiz='/', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, valor='3.00') self.modal = None def get_xml(self): if not self.modal: return '' xml = XMLNFe.get_xml(self) xml += '<infModal versaoModal="' + str(self.versaoModal.valor) + '">' xml += self.modal.xml xml += '</infModal>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versaoModal.xml = arquivo if ('<rodo>' in arquivo and '</rodo>' in arquivo): self.modal = Rodo() self.modal.xml = arquivo elif ('<aereo>' in arquivo and '</aereo>' in arquivo): self.modal = Aereo() self.modal.xml = arquivo elif ('<aquav>' in arquivo and '</aquav>' in arquivo): self.modal = Aquav() self.modal.xml = arquivo elif ('<ferrov>' in arquivo and '</ferrov>' in arquivo): self.modal = Ferrov() self.modal.xml = arquivo elif ('<duto>' in arquivo and '</duto>' in arquivo): self.modal = Duto() self.modal.xml = arquivo xml = property(get_xml, set_xml) class IdDocAntEle(XMLNFe): def __init__(self): super(IdDocAntEle, self).__init__() self.chCTe = TagCaracter(nome='chCTe', tamanho=[44, 44], raiz='//idDocAntEle', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not self.chCTe.valor: return '' xml = XMLNFe.get_xml(self) xml += '<idDocAntEle>' xml += self.chCTe.xml xml += '</idDocAntEle>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chCTe.xml = arquivo xml = property(get_xml, set_xml) class IdDocAntPap(XMLNFe): def __init__(self): super(IdDocAntPap, self).__init__() self.tpDoc = TagInteiro(nome='tpDoc', tamanho=[2, 2, 2] , raiz='//idDocAntPap', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.serie = TagCaracter(nome='serie' , tamanho=[ 1, 3], raiz='//idDocAntPap', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.subser = TagCaracter(nome='subser' , tamanho=[ 1, 2], raiz='//idDocAntPap', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.nDoc = TagCaracter(nome='nDoc', tamanho=[1, 30] , raiz='//idDocAntPap', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dEmi = TagData(nome='dEmi', raiz='//idDocAntPap', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.toDoc.valor or self.serie.valor): return '' xml = XMLNFe.get_xml(self) xml += '<idDocAntPap>' xml += self.tpDoc.xml xml += self.serie.xml xml += self.subser.xml xml += self.nDoc.xml xml += self.dEmi.xml xml += '</idDocAntPap>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.tpDoc.xml = arquivo self.serie.xml = arquivo self.subser.xml = arquivo self.nDoc.xml = arquivo self.dEmi.xml = arquivo xml = property(get_xml, set_xml) class IdDocAnt(XMLNFe): def __init__(self): super(IdDocAnt, self).__init__() self.idDocAntPap = [] self.idDocAntEle = [] def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<idDocAnt>' for ipap in self.idDocAntPap: xml += ipap.xml for iele in self.idDocAntEle: xml += iele.xml xml += '</idDocAnt>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.idDocAntPap = self.le_grupo('//CTe/infCte/infCTeNorm/docAnt/emitDocAnt/idDocAnt/idDocAntPap', IdDocAntPap, sigla_ns='cte') self.idDocAntEle = self.le_grupo('//CTe/infCte/infCTeNorm/docAnt/emitDocAnt/idDocAnt/idDocAntEle', IdDocAntEle, sigla_ns='cte') xml = property(get_xml, set_xml) class EmitDocAnt(XMLNFe): def __init__(self): super(EmitDocAnt, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , tamanho=[ 0, 14], raiz='//emitDocAnt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , tamanho=[11, 11], raiz='//emitDocAnt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.IE = TagCaracter(nome='IE' , tamanho=[ 2, 14], raiz='//emitDocAnt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.UF = TagCaracter(nome='UF' , tamanho=[ 2, 2], raiz='//emitDocAnt') self.xNome = TagCaracter(nome='xNome', tamanho=[ 2, 60], raiz='//emitDocAnt') self.idDocAnt = [] def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<emitDocAnt>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.UF.xml xml += self.xNome.xml for iant in self.idDocAnt: xml += iant.xml xml += '</emitDocAnt>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CPF.xml = arquivo self.CNPJ.xml = arquivo self.IE.xml = arquivo self.UF.xml = arquivo self.xNome.xml = arquivo self.idDocAnt = self.le_grupo('//CTe/infCte/infCTeNorm/docAnt/emitDocAnt/idDocAnt', IdDocAnt, sigla_ns='cte') xml = property(get_xml, set_xml) class DocAnt(XMLNFe): def __init__(self): super(DocAnt, self).__init__() self.emitDocAnt = [] def get_xml(self): if not (len(self.emitDocAnt)): return '' xml = XMLNFe.get_xml(self) xml += '<docAnt>' for e in self.emitDocAnt: xml += e.xml xml += '</docAnt>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.emitDocAnt = self.le_grupo('//CTe/infCte/infCTeNorm/docAnt/emitDocAnt', EmitDocAnt, sigla_ns='cte') xml = property(get_xml, set_xml) class InfOutros(XMLNFe): def __init__(self): super(InfOutros, self).__init__() self.tpDoc = TagInteiro(nome='tpDoc', tamanho=[2, 2, 2], raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.descOutros = TagCaracter(nome='descOutros', tamanho=[ 1, 100], raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.nDoc = TagCaracter(nome='nDoc', tamanho=[2, 2, 2], raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dEmi = TagData(nome='dEmi', raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vDocFisc = TagDecimal(nome='vDocFisc', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dPrev = TagData(nome='dPrev', raiz='//infOutros', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.infUnidCarga = [] self.infUnidTransp = [] def get_xml(self): if not (self.tpDoc.valor or self.descOutros.valor or self.nDoc.valor or self.dEmi.valor or self.vDocFisc.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infOutros>' xml += self.tpDoc.xml xml += self.descOutros.xml xml += self.nDoc.xml xml += self.dEmi.xml xml += self.vDocFisc.xml xml += self.dPrev.xml for c in self.infUnidCarga: xml += c.xml for t in self.infUnidTransp: xml += t.xml xml += '</infOutros>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.tpDoc.xml = arquivo self.descOutros.xml = arquivo self.nDoc.xml = arquivo self.dEmi.xml = arquivo self.vDocFisc.xml = arquivo self.dPrev.xml = arquivo self.infUnidCarga = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infOutros/infUnidCarga', InfUnidCarga, sigla_ns='cte') self.infUnidTransp = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infOutros/infUnidTransp', InfUnidTransp, sigla_ns='cte') xml = property(get_xml, set_xml) class InfNFe(XMLNFe): def __init__(self): super(InfNFe, self).__init__() self.chave = TagCaracter(nome='chave', codigo='B16', tamanho=[44, 44] , raiz='//infNFe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.PIN = TagInteiro(nome='PIN' , codigo='B20', tamanho=[ 2, 9] , raiz='//infNFe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.dPrev = TagData(nome='dPrev', raiz='//infNFe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.infUnidCarga = [] self.infUnidTransp = [] def get_xml(self): if not self.chave.valor: return '' xml = XMLNFe.get_xml(self) xml += '<infNFe>' xml += self.chave.xml xml += self.PIN.xml xml += self.dPrev.xml for c in self.infUnidCarga: xml += c.xml for t in self.infUnidTransp: xml += t.xml xml += '</infNFe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.chave.xml = arquivo self.PIN.xml = arquivo self.dPrev.xml = arquivo self.infUnidCarga = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNFe/infUnidCarga', InfUnidCarga, sigla_ns='cte') self.infUnidTransp = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNFe/infUnidTransp', InfUnidTransp, sigla_ns='cte') xml = property(get_xml, set_xml) class LacUnidTransp(XMLNFe): def __init__(self): super(LacUnidTransp, self).__init__() self.nLacre = TagInteiro(nome='nLacre', tamanho=[ 1, 20], raiz='//lacUnidTransp', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.nLacre.valor): return '' xml = XMLNFe.get_xml(self) xml += '<lacUnidTransp>' xml += self.nLacre.xml xml += '</lacUnidTransp>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.nLacre.xml = arquivo xml = property(get_xml, set_xml) class InfUnidTransp(XMLNFe): def __init__(self): super(InfUnidTransp, self).__init__() self.tpUnidTransp = TagInteiro(nome='tpUnidTransp', tamanho=[ 1, 1, 1] , raiz='//infUnidTransp', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.idUnidTransp = TagCaracter(nome='idUnidTransp' , tamanho=[ 1, 20] , raiz='//infUnidTransp', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.lacUnidTransp = [] self.infUnidCarga = [] self.qtdRat = TagDecimal(nome='qtdRat', tamanho=[1, 3, 1], decimais=[0, 2, 2], raiz='//infUnidTransp', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) def get_xml(self): if not (self.tpUnidTransp.valor or self.idUnidTransp.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infUnidTransp>' xml += self.tpUnidTransp.xml xml += self.idUnidTransp.xml for l in self.lacUnidTransp: xml += l.xml for i in self.infUnidCarga: xml += i.xml xml += self.qtdRat.xml xml += '</infUnidTransp>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.tpUnidTransp.xml = arquivo self.idUnidTransp.xml = arquivo self.qtdRat.xml = arquivo self.infUnidCarga = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF/infUnidTransp/infUnidCarga', InfUnidCarga, sigla_ns='cte') self.lacUnidTransp = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF/infUnidTransp/lacUnidTransp', LacUnidTransp, sigla_ns='cte') xml = property(get_xml, set_xml) class LacUnidCarga(XMLNFe): def __init__(self): super(LacUnidCarga, self).__init__() self.nLacre = TagInteiro(nome='nLacre', tamanho=[ 1, 20], raiz='//lacUnidCarga', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.nLacre.valor): return '' xml = XMLNFe.get_xml(self) xml += '<lacUnidCarga>' xml += self.nLacre.xml xml += '</lacUnidCarga>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.nLacre.xml = arquivo xml = property(get_xml, set_xml) class InfUnidCarga(XMLNFe): def __init__(self): super(InfUnidCarga, self).__init__() self.tpUnidCarga = TagInteiro(nome='tpUnidCarga', tamanho=[ 1, 1, 1] , raiz='//infUnidCarga', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.idUnidCarga = TagCaracter(nome='idUnidCarga' , tamanho=[ 1, 20] , raiz='//infUnidCarga', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.lacUnidCarga = [] self.qtdRat = TagDecimal(nome='qtdRat', tamanho=[1, 3, 1], decimais=[0, 2, 2], raiz='//infUnidCarga', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) def get_xml(self): if not (self.tpUnidCarga.valor or self.idUnidCarga.valor): return '' xml = XMLNFe.get_xml(self) xml += '<infUnidCarga>' xml += self.tpUnidCarga.xml xml += self.idUnidCarga.xml for l in self.lacUnidCarga: xml += l.xml xml += self.qtdRat.xml xml += '</infUnidCarga>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.tpUnidCarga.xml = arquivo self.idUnidCarga.xml = arquivo self.qtdRat.xml = arquivo self.lacUnidCarga = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF/infUnidCarga/lacUnidCarga', LacUnidCarga, sigla_ns='cte') xml = property(get_xml, set_xml) class InfNF(XMLNFe): def __init__(self): super(InfNF, self).__init__() self.nRoma = TagCaracter(nome='nRoma', codigo='B16', tamanho=[ 1, 20], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.nPed = TagCaracter(nome='nPed' , codigo='B16', tamanho=[ 1, 20], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.mod = TagCaracter(nome='mod' , codigo='B18', tamanho=[ 2, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.serie = TagCaracter(nome='serie' , codigo='B19', tamanho=[ 1, 3, 1], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.nDoc = TagInteiro(nome='nDoc' , codigo='B20', tamanho=[ 1, 20], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.dEmi = TagData(nome='dEmi' , codigo='B09', raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vBC = TagDecimal(nome='vBC' , codigo='W03', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vICMS = TagDecimal(nome='vICMS' , codigo='W04', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vBCST = TagDecimal(nome='vBCST' , codigo='W05', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vST = TagDecimal(nome='vST' , codigo='W06', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vProd = TagDecimal(nome='vProd' , codigo='W07', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.vNF = TagDecimal(nome='vNF' , codigo='W16', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.nCFOP = TagInteiro(nome='nCFOP' , codigo='I08', tamanho=[4, 4, 4] , raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.nPeso = TagDecimal(nome='nPeso' , codigo='W16', tamanho=[1, 12, 1], decimais=[0, 3, 3], raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.PIN = TagInteiro(nome='PIN' , codigo='B20', tamanho=[ 2, 9] , raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) #self.locRet = LocRet() self.dPrev = TagData(nome='dPrev', raiz='//infNF', namespace=NAMESPACE_CTE, namespace_obrigatorio=False, obrigatorio=False) self.infUnidCarga = [] self.infUnidTransp = [] def get_xml(self): if not self.mod.valor: return '' xml = XMLNFe.get_xml(self) xml += '<infNF>' xml += self.nRoma.xml xml += self.nPed.xml xml += self.mod.xml xml += self.serie.xml xml += self.nDoc.xml xml += self.dEmi.xml xml += self.vBC.xml xml += self.vICMS.xml xml += self.vBCST.xml xml += self.vST.xml xml += self.vProd.xml xml += self.vNF.xml xml += self.nCFOP.xml xml += self.nPeso.xml xml += self.PIN.xml #xml += self.locRet.xml xml += self.dPrev.xml for c in self.infUnidCarga: xml += c.xml for t in self.infUnidTransp: xml += t.xml xml += '</infNF>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.nRoma.xml = arquivo self.nPed.xml = arquivo self.mod.xml = arquivo self.serie.xml = arquivo self.nDoc.xml = arquivo self.dEmi.xml = arquivo self.vBC.xml = arquivo self.vICMS.xml = arquivo self.vBCST.xml = arquivo self.vST.xml = arquivo self.vProd.xml = arquivo self.vNF.xml = arquivo self.nCFOP.xml = arquivo self.nPeso.xml = arquivo self.PIN.xml = arquivo #self.locRet.xml = arquivo self.dPrev.xml = arquivo self.infUnidCarga = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF/infUnidCarga', InfUnidCarga, sigla_ns='cte') self.infUnidTransp = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF/infUnidTransp', InfUnidTransp, sigla_ns='cte') xml = property(get_xml, set_xml) class InfDoc(XMLNFe): def __init__(self): super(InfDoc, self).__init__() self.infNF = [] self.infNFe = [] self.infOutros = [] def get_xml(self): if not (len(self.infNF) or len(self.infNFe) or len(self.infOutros)): return '' xml = XMLNFe.get_xml(self) xml += '<infDoc>' for inf in self.infNF: xml += inf.xml for infe in self.infNFe: xml += infe.xml for o in self.infOutros: xml += o.xml xml += '</infDoc>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.infNF = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNF', InfNF, sigla_ns='cte') self.infNFe = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infNFe', InfNFe, sigla_ns='cte') self.infOutros = self.le_grupo('//CTe/infCte/infCTeNorm/infDoc/infOutros', InfOutros, sigla_ns='cte') xml = property(get_xml, set_xml) class InfQ(XMLNFe): def __init__(self): super(InfQ, self).__init__() self.cUnid = TagCaracter(nome='cUnid', tamanho=[2, 2, 2] , raiz='//infQ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.tpMed = TagCaracter(nome='tpMed', tamanho=[1, 20] , raiz='//infQ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.qCarga = TagDecimal(nome='qCarga', tamanho=[1, 11, 1], decimais=[0, 4, 4], raiz='//infQ', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<infQ>' xml += self.cUnid.xml xml += self.tpMed.xml xml += self.qCarga.xml xml += '</infQ>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.cUnid.xml = arquivo self.tpMed.xml = arquivo self.qCarga.xml = arquivo xml = property(get_xml, set_xml) class InfCarga(XMLNFe): def __init__(self): super(InfCarga, self).__init__() self.vCarga = TagDecimal(nome='vCarga' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/infCTeNorm/infCarga', obrigatorio=False) self.proPred = TagCaracter(nome='proPred', tamanho=[1, 60] , raiz='//CTe/infCte/infCTeNorm/infCarga') self.xOutCat = TagCaracter(nome='xOutCat', tamanho=[1, 30] , raiz='//CTe/infCte/infCTeNorm/infCarga', obrigatorio=False) self.infQ = [] self.vCargaAverb = TagDecimal(nome='vCargaAverb', tamanho=[1 , 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/infCTeNorm/infCarga', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<infCarga>' xml += self.vCarga.xml xml += self.proPred.xml xml += self.xOutCat.xml for i in self.infQ: xml += i.xml xml += self.vCargaAverb.xml xml += '</infCarga>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.vCarga.xml = arquivo self.proPred.xml = arquivo self.xOutCat.xml = arquivo self.vCargaAverb.xml = arquivo self.infQ = self.le_grupo('//CTe/infCte/infCTeNorm/infCarga/infQ', InfQ, sigla_ns='cte') xml = property(get_xml, set_xml) class InfCTeNorm(XMLNFe): def __init__(self): super(InfCTeNorm, self).__init__() self.infCarga = InfCarga() self.infDoc = InfDoc() self.docAnt = DocAnt() self.infModal = InfModal() self.veicNovos = [] self.cobr = Cobr() self.infCteSub = InfCTeSub() self.infGlobalizado = InfGlobalizado() self.infServVinc = InfServVinc() def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<infCTeNorm>' xml += self.infCarga.xml xml += self.infDoc.xml xml += self.docAnt.xml xml += self.infModal.xml for v in self.veicNovos: xml += v.xml xml += self.cobr.xml xml += self.infCteSub.xml xml += self.infGlobalizado.xml xml += self.infServVinc.xml xml += '</infCTeNorm>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.infCarga.xml = arquivo self.infDoc.xml = arquivo self.docAnt.xml = arquivo self.infModal.xml = arquivo self.cobr.xml = arquivo self.infCteSub.xml = arquivo self.infGlobalizado.xml = arquivo self.infServVinc.xml = arquivo self.veicNovos = self.le_grupo('//CTe/infCte/infCTeNorm/veicNovos', VeicNovos, sigla_ns='cte') xml = property(get_xml, set_xml) class ICMSUFFim(XMLNFe): def __init__(self): super(ICMSUFFim, self).__init__() self.vBCUFFim = TagDecimal(nome='vBCUFFim', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.pFCPUFFim = TagDecimal(nome='pFCPUFFim', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.pICMSUFFim = TagDecimal(nome='pICMSUFFim', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.pICMSInter = TagDecimal(nome='pICMSInter', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.pICMSInterPart = TagDecimal(nome='pICMSInterPart', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.vFCPUFFim = TagDecimal(nome='vFCPUFFim', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.vICMSUFFim = TagDecimal(nome='vICMSUFFim', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') self.vICMSUFIni = TagDecimal(nome='vICMSUFIni', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp/ICMSUFFim') def get_xml(self): if not (self.vBCUFFim.valor or self.pFCPUFFim.valor or self.pICMSUFFim.valor or self.pICMSInter.valor or self.pICMSInterPart.valor or self.vFCPUFFim.valor or self.vICMSUFFim.valor or self.vICMSUFIni.valor): return '' xml = XMLNFe.get_xml(self) xml += '<ICMSUFFim>' xml += self.vBCUFFim.xml xml += self.pFCPUFFim.xml xml += self.pICMSUFFim.xml xml += self.pICMSInter.xml xml += self.pICMSInterPart.xml xml += self.vFCPUFFim.xml xml += self.vICMSUFFim.xml xml += self.vICMSUFIni.xml xml += '</ICMSUFFim>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.vBCUFFim.xml = arquivo self.pFCPUFFim.xml = arquivo self.pICMSUFFim.xml = arquivo self.pICMSInter.xml = arquivo self.pICMSInterPart.xml = arquivo self.vFCPUFFim.xml = arquivo self.vICMSUFFim.xml = arquivo self.vICMSUFIni.xml = arquivo xml = property(get_xml, set_xml) class TagCSTICMS(TagCaracter): def __init__(self, *args, **kwargs): super(TagCSTICMS, self).__init__(*args, **kwargs) self.nome = 'CST' self.codigo = 'N12' self.tamanho = [2, 2] self.raiz = '' self.grupo_icms = None def set_valor(self, novo_valor): super(TagCSTICMS, self).set_valor(novo_valor) if not self.grupo_icms: return None # # Definimos todas as tags como não obrigatórias # #self.grupo_icms.modBC.obrigatorio = False self.grupo_icms.pRedBC.obrigatorio = False self.grupo_icms.vBC.obrigatorio = False self.grupo_icms.pICMS.obrigatorio = False self.grupo_icms.vICMS.obrigatorio = False #self.grupo_icms.modBCST.obrigatorio = False #self.grupo_icms.pMVAST.obrigatorio = False #self.grupo_icms.pRedBCST.obrigatorio = False self.grupo_icms.vBCSTRet.obrigatorio = False self.grupo_icms.pICMSSTRet.obrigatorio = False self.grupo_icms.vICMSSTRet.obrigatorio = False self.grupo_icms.vCred.obrigatorio = False self.grupo_icms.vBCOutraUF.obrigatorio = False self.grupo_icms.pICMSOutraUF.obrigatorio = False self.grupo_icms.vICMSOutraUF.obrigatorio = False self.grupo_icms.indSN.obrigatorio = False # # Por segurança, zeramos os valores das tags do # grupo ICMS ao redefinirmos o código da situação # tributária # #self.grupo_icms.modBC.valor = 3 self.grupo_icms.pRedBC.valor = '0.00' self.grupo_icms.vBC.valor = '0.00' self.grupo_icms.pICMS.valor = '0.00' self.grupo_icms.vICMS.valor = '0.00' #self.grupo_icms.modBCST.valor = 4 #self.grupo_icms.pMVAST.valor = '0.00' #self.grupo_icms.pRedBCST.valor = '0.00' self.grupo_icms.vBCSTRet.valor = '0.00' self.grupo_icms.pICMSSTRet.valor = '0.00' self.grupo_icms.vICMSSTRet.valor = '0.00' self.grupo_icms.vBCOutraUF.valor = '0.00' self.grupo_icms.pICMSOutraUF.valor = '0.00' self.grupo_icms.vICMSOutraUF.valor = '0.00' # # Para cada código de situação tributária, # redefinimos a raiz e a obrigatoriedade das # tags do grupo de ICMS # if self.valor == '00': self.grupo_icms.nome_tag = 'ICMS00' self.grupo_icms.nome_tag_txt = 'N02' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS00' #self.grupo_icms.modBC.obrigatorio = True self.grupo_icms.vBC.obrigatorio = True self.grupo_icms.pICMS.obrigatorio = True self.grupo_icms.vICMS.obrigatorio = True elif self.valor == '20': self.grupo_icms.nome_tag = 'ICMS20' self.grupo_icms.nome_tag_txt = 'N04' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS20' #self.grupo_icms.modBC.obrigatorio = True self.grupo_icms.pRedBC.obrigatorio = True self.grupo_icms.vBC.obrigatorio = True self.grupo_icms.pICMS.obrigatorio = True self.grupo_icms.vICMS.obrigatorio = True elif self.valor in ('40', '41', '51'): self.grupo_icms.nome_tag = 'ICMS45' self.grupo_icms.nome_tag_txt = 'N06' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS45' elif self.valor == '60': self.grupo_icms.nome_tag = 'ICMS60' self.grupo_icms.nome_tag_txt = 'N08' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS60' self.grupo_icms.vBCSTRet.obrigatorio = True self.grupo_icms.pICMSSTRet.obrigatorio = True self.grupo_icms.vICMSSTRet.obrigatorio = True elif self.valor == '90': if self.grupo_icms.icms_outra_uf: self.grupo_icms.nome_tag = 'ICMSOutraUF' self.grupo_icms.nome_tag_txt = 'N10' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMSOutraUF' self.grupo_icms.vBCOutraUF.obrigatorio = True self.grupo_icms.pICMSOutraUF.obrigatorio = True self.grupo_icms.vICMSOutraUF.obrigatorio = True elif self.grupo_icms.icms_sn: self.grupo_icms.nome_tag = 'ICMSSN' self.grupo_icms.nome_tag_txt = 'N10' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMSSN' self.grupo_icms.indSN.obrigatorio = True else: self.grupo_icms.nome_tag = 'ICMS90' self.grupo_icms.nome_tag_txt = 'N10' self.grupo_icms.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS90' #self.grupo_icms.pRedBC.obrigatorio = True self.grupo_icms.vBC.obrigatorio = True self.grupo_icms.pICMS.obrigatorio = True self.grupo_icms.vICMS.obrigatorio = True # # Redefine a raiz para todas as tags do grupo ICMS # #self.grupo_icms.orig.raiz = self.grupo_icms.raiz_tag self.grupo_icms.CST.raiz = self.grupo_icms.raiz_tag #self.grupo_icms.modBC.raiz = self.grupo_icms.raiz_tag self.grupo_icms.pRedBC.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vBC.raiz = self.grupo_icms.raiz_tag self.grupo_icms.pICMS.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vICMS.raiz = self.grupo_icms.raiz_tag #self.grupo_icms.modBCST.raiz = self.grupo_icms.raiz_tag #self.grupo_icms.pMVAST.raiz = self.grupo_icms.raiz_tag #self.grupo_icms.pRedBCST.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vBCSTRet.raiz = self.grupo_icms.raiz_tag self.grupo_icms.pICMSSTRet.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vICMSSTRet.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vBCOutraUF.raiz = self.grupo_icms.raiz_tag self.grupo_icms.pICMSOutraUF.raiz = self.grupo_icms.raiz_tag self.grupo_icms.vICMSOutraUF.raiz = self.grupo_icms.raiz_tag self.grupo_icms.indSN.raiz = self.grupo_icms.raiz_tag def get_valor(self): return self._valor_string valor = property(get_valor, set_valor) class ICMS(XMLNFe): def __init__(self): super(ICMS, self).__init__() #self.orig = TagInteiro(nome='orig' , tamanho=[1, 1, 1], raiz='') #self.modBC = TagInteiro(nome='modBC' , tamanho=[1, 1, 1], raiz='') self.pRedBC = TagDecimal(nome='pRedBC' , tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') self.vBC = TagDecimal(nome='vBC' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') self.pICMS = TagDecimal(nome='pICMS' , tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') self.vICMS = TagDecimal(nome='vICMS' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') #self.modBCST = TagInteiro(nome='modBCST' , tamanho=[1, 1, 1], raiz='') #self.pMVAST = TagDecimal(nome='pMVAST' , tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') #self.pRedBCST = TagDecimal(nome='pRedBCST' , tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') self.vBCSTRet = TagDecimal(nome='vBCSTRet' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') self.pICMSSTRet = TagDecimal(nome='pICMSSTRet', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') self.vICMSSTRet = TagDecimal(nome='vICMSSTRet', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') self.vCred = TagDecimal(nome='vCred' , tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') #Campos ICMSOutraUF self.pRedBCOutraUF = TagDecimal(nome='pRedBCOutraUF', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='', obrigatorio=False) self.vBCOutraUF = TagDecimal(nome='vBCOutraUF', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') self.pICMSOutraUF = TagDecimal(nome='pICMSOutraUF', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='') self.vICMSOutraUF = TagDecimal(nome='vICMSOutraUF', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='') #Campo ICMSSN: self.indSN = TagInteiro(nome='indSN', tamanho=[1, 1, 1], raiz='') self.CST = TagCSTICMS() self.CST.grupo_icms = self #self.CST.valor = '40' self.nome_tag = 'ICMS45' self.raiz_tag = '//CTe/infCte/imp/ICMS/ICMS45' self.nome_tag_txt = 'N06' self.icms_outra_uf = False #Para grupo ICMSOutraUF self.icms_sn = False #Para grupo ICMSSN (Simples Nacional) def get_xml(self): # # Define as tags baseado no código da situação tributária # xml = XMLNFe.get_xml(self) xml += '<ICMS><' + self.nome_tag + '>' #xml += self.orig.xml xml += self.CST.xml if self.CST.valor == '00': #xml += self.modBC.xml xml += self.vBC.xml xml += self.pICMS.xml xml += self.vICMS.xml elif self.CST.valor == '20': #xml += self.modBC.xml xml += self.pRedBC.xml xml += self.vBC.xml xml += self.pICMS.xml xml += self.vICMS.xml elif self.CST.valor in ('40', '41', '51'): pass elif self.CST.valor == '60': xml += self.vBCSTRet.xml xml += self.pICMSSTRet.xml xml += self.vICMSSTRet.xml elif self.CST.valor == '90': if self.icms_outra_uf: xml += self.pRedBCOutraUF.xml xml += self.vBCOutraUF.xml xml += self.pICMSOutraUF.xml xml += self.vICMSOutraUF.xml elif self.icms_sn: xml += self.indSN.xml else: #xml += self.modBC.xml xml += self.pRedBC.xml xml += self.vBC.xml xml += self.pICMS.xml xml += self.vICMS.xml xml += self.vCred.xml xml += '</' + self.nome_tag + '></ICMS>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): # # Para ler corretamente o ICMS, primeiro temos que descobrir em # qual grupo de situação tributária ele está # if self._le_noh('//CTe/infCte/imp/ICMS/ICMS00') is not None: self.CST.valor = '00' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMS20') is not None: self.CST.valor = '20' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMS45') is not None: self.CST.valor = '40' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMS60') is not None: self.CST.valor = '60' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMS90') is not None: self.CST.valor = '90' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMSOutraUF') is not None: self.CST.valor = '90' elif self._le_noh('//CTe/infCte/imp/ICMS/ICMSSN') is not None: self.CST.valor = '90' # # Agora podemos ler os valores tranquilamente... # #self.orig.xml = arquivo self.CST.xml = arquivo #self.modBC.xml = arquivo self.pRedBC.xml = arquivo self.vBC.xml = arquivo self.pICMS.xml = arquivo self.vICMS.xml = arquivo #self.modBCST.xml = arquivo #self.pMVAST.xml = arquivo #self.pRedBCST.xml = arquivo self.vBCSTRet.xml = arquivo self.pICMSSTRet.xml = arquivo self.vICMSSTRet.xml = arquivo self.vCred.xml = arquivo self.pRedBCOutraUF.xml = arquivo self.vBCOutraUF.xml = arquivo self.pICMSOutraUF.xml = arquivo self.vICMSOutraUF.xml = arquivo self.indSN.xml = arquivo xml = property(get_xml, set_xml) class Imp(XMLNFe): def __init__(self): super(Imp, self).__init__() self.ICMS = ICMS() self.vTotTrib = TagDecimal(nome='vTotTrib', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/imp', obrigatorio=False) self.infAdFisco = TagCaracter(nome='infAdFisco', tamanho=[1, 2000], raiz='//CTe/infCte/imp', obrigatorio=False) self.ICMSUFFim = ICMSUFFim() def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<imp>' xml += self.ICMS.xml xml += self.vTotTrib.xml xml += self.infAdFisco.xml xml += self.ICMSUFFim.xml xml += '</imp>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.ICMS.xml = arquivo self.infAdFisco.xml = arquivo self.vTotTrib.xml = arquivo self.ICMSUFFim.xml = arquivo xml = property(get_xml, set_xml) class Comp(XMLNFe): def __init__(self): super(Comp, self).__init__() self.xNome = TagCaracter(nome='xNome', tamanho=[1, 15], raiz='//CTe/infCte/vPrest/Comp') self.vComp = TagDecimal(nome='vComp', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/vPrest/Comp') def get_xml(self): if not (self.xNome.valor or self.vComp.valor): return '' xml = XMLNFe.get_xml(self) xml += '<Comp>' xml += self.xNome.xml xml += self.vComp.xml xml += '</Comp>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xNome.xml = arquivo self.vComp.xml = arquivo xml = property(get_xml, set_xml) class VPrest(XMLNFe): def __init__(self): super(VPrest, self).__init__() self.vTPrest = TagDecimal(nome='vTPrest', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/vPrest') self.vRec = TagDecimal(nome='vRec', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//CTe/infCte/vPrest') self.Comp = [] def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<vPrest>' xml += self.vTPrest.xml xml += self.vRec.xml for c in self.Comp: xml += c.xml xml += '</vPrest>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.vTPrest.xml = arquivo self.vRec.xml = arquivo self.Comp = self.le_grupo('//CTe/infCte/vPrest/Comp', Comp, sigla_ns='cte') xml = property(get_xml, set_xml) class EnderDest(XMLNFe): def __init__(self): super(EnderDest, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , codigo='E06', tamanho=[ 2, 255] , raiz='//CTe/infCte/dest/enderDest') self.nro = TagCaracter(nome='nro' , codigo='E07', tamanho=[ 1, 60] , raiz='//CTe/infCte/dest/enderDest') self.xCpl = TagCaracter(nome='xCpl' , codigo='E08', tamanho=[ 1, 60] , raiz='//CTe/infCte/dest/enderDest', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', codigo='E09', tamanho=[ 2, 60] , raiz='//CTe/infCte/dest/enderDest') self.cMun = TagInteiro(nome='cMun' , codigo='E10', tamanho=[ 7, 7, 7], raiz='//CTe/infCte/dest/enderDest') self.xMun = TagCaracter(nome='xMun' , codigo='E11', tamanho=[ 2, 60] , raiz='//CTe/infCte/dest/enderDest') self.CEP = TagCaracter(nome='CEP' , codigo='E13', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/dest/enderDest', obrigatorio=False) self.UF = TagCaracter(nome='UF' , codigo='E12', tamanho=[ 2, 2] , raiz='//CTe/infCte/dest/enderDest') self.cPais = TagInteiro(nome='cPais' , codigo='E14', tamanho=[ 4, 4, 4], raiz='//CTe/infCte/dest/enderDest', obrigatorio=False) self.xPais = TagCaracter(nome='xPais' , codigo='E15', tamanho=[ 2, 60] , raiz='//CTe/infCte/dest/enderDest', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderDest>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml xml += self.cPais.xml xml += self.xPais.xml xml += '</enderDest>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo self.cPais.xml = arquivo self.xPais.xml = arquivo xml = property(get_xml, set_xml) class Dest(XMLNFe): def __init__(self): super(Dest, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , codigo='E02', tamanho=[ 0, 14], raiz='//CTe/infCte/dest', obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , codigo='E03', tamanho=[11, 11], raiz='//CTe/infCte/dest', obrigatorio=False) self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14], raiz='//CTe/infCte/dest', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', codigo='E04', tamanho=[ 2, 60], raiz='//CTe/infCte/dest') self.fone = TagInteiro(nome='fone' , codigo='E16', tamanho=[ 6, 14], raiz='//CTe/infCte/dest', obrigatorio=False) self.ISUF = TagCaracter(nome='ISUF' , codigo='E18', tamanho=[ 8, 9], raiz='//CTe/infCte/dest', obrigatorio=False) self.enderDest = EnderDest() self.email = TagCaracter(nome='email', codigo='E19', tamanho=[ 1, 60], raiz='//CTe/infCte/dest', obrigatorio=False) #self.locEnt = LocEnt() def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<dest>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.xNome.xml xml += self.fone.xml xml += self.ISUF.xml xml += self.enderDest.xml xml += self.email.xml #xml += self.locEnt.xml xml += '</dest>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.IE.xml = arquivo self.xNome.xml = arquivo self.fone.xml = arquivo self.ISUF.xml = arquivo self.enderDest.xml = arquivo self.email.xml = arquivo #self.locEnt.xml = arquivo xml = property(get_xml, set_xml) class EnderReceb(XMLNFe): def __init__(self): super(EnderReceb, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , codigo='E06', tamanho=[ 2, 255] , raiz='//CTe/infCte/receb/enderReceb') self.nro = TagCaracter(nome='nro' , codigo='E07', tamanho=[ 1, 60] , raiz='//CTe/infCte/receb/enderReceb') self.xCpl = TagCaracter(nome='xCpl' , codigo='E08', tamanho=[ 1, 60] , raiz='//CTe/infCte/receb/enderReceb', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', codigo='E09', tamanho=[ 2, 60] , raiz='//CTe/infCte/receb/enderReceb') self.cMun = TagInteiro(nome='cMun' , codigo='E10', tamanho=[ 7, 7, 7], raiz='//CTe/infCte/receb/enderReceb') self.xMun = TagCaracter(nome='xMun' , codigo='E11', tamanho=[ 2, 60] , raiz='//CTe/infCte/receb/enderReceb') self.CEP = TagCaracter(nome='CEP' , codigo='E13', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/receb/enderReceb', obrigatorio=False) self.UF = TagCaracter(nome='UF' , codigo='E12', tamanho=[ 2, 2] , raiz='//CTe/infCte/receb/enderReceb') self.cPais = TagInteiro(nome='cPais' , codigo='E14', tamanho=[ 4, 4, 4], raiz='//CTe/infCte/receb/enderReceb', obrigatorio=False) self.xPais = TagCaracter(nome='xPais' , codigo='E15', tamanho=[ 2, 60] , raiz='//CTe/infCte/receb/enderReceb', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderReceb>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml xml += self.cPais.xml xml += self.xPais.xml xml += '</enderReceb>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo self.cPais.xml = arquivo self.xPais.xml = arquivo xml = property(get_xml, set_xml) class Receb(XMLNFe): def __init__(self): super(Receb, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , codigo='E02', tamanho=[ 0, 14], raiz='//CTe/infCte/receb', obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , codigo='E03', tamanho=[11, 11], raiz='//CTe/infCte/receb', obrigatorio=False) self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14], raiz='//CTe/infCte/receb', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', codigo='E04', tamanho=[ 2, 60], raiz='//CTe/infCte/receb') self.fone = TagInteiro(nome='fone' , codigo='E16', tamanho=[ 6, 14], raiz='//CTe/infCte/receb', obrigatorio=False) self.enderReceb = EnderReceb() self.email = TagCaracter(nome='email', codigo='E19', tamanho=[ 1, 60], raiz='//CTe/infCte/receb', obrigatorio=False) def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<receb>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.xNome.xml xml += self.fone.xml xml += self.enderReceb.xml xml += self.email.xml xml += '</receb>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.IE.xml = arquivo self.xNome.xml = arquivo self.fone.xml = arquivo self.enderReceb.xml = arquivo self.email.xml = arquivo xml = property(get_xml, set_xml) class EnderExped(XMLNFe): def __init__(self): super(EnderExped, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , codigo='E06', tamanho=[ 2, 255] , raiz='//CTe/infCte/exped/enderExped') self.nro = TagCaracter(nome='nro' , codigo='E07', tamanho=[ 1, 60] , raiz='//CTe/infCte/exped/enderExped') self.xCpl = TagCaracter(nome='xCpl' , codigo='E08', tamanho=[ 1, 60] , raiz='//CTe/infCte/exped/enderExped', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', codigo='E09', tamanho=[ 2, 60] , raiz='//CTe/infCte/exped/enderExped') self.cMun = TagInteiro(nome='cMun' , codigo='E10', tamanho=[ 7, 7, 7], raiz='//CTe/infCte/exped/enderExped') self.xMun = TagCaracter(nome='xMun' , codigo='E11', tamanho=[ 2, 60] , raiz='//CTe/infCte/exped/enderExped') self.CEP = TagCaracter(nome='CEP' , codigo='E13', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/exped/enderExped', obrigatorio=False) self.UF = TagCaracter(nome='UF' , codigo='E12', tamanho=[ 2, 2] , raiz='//CTe/infCte/exped/enderExped') self.cPais = TagInteiro(nome='cPais' , codigo='E14', tamanho=[ 4, 4, 4], raiz='//CTe/infCte/exped/enderExped', obrigatorio=False) self.xPais = TagCaracter(nome='xPais' , codigo='E15', tamanho=[ 1, 60] , raiz='//CTe/infCte/exped/enderExped', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderExped>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml xml += self.cPais.xml xml += self.xPais.xml xml += '</enderExped>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo self.cPais.xml = arquivo self.xPais.xml = arquivo xml = property(get_xml, set_xml) class Exped(XMLNFe): def __init__(self): super(Exped, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , codigo='E02', tamanho=[ 0, 14], raiz='//CTe/infCte/exped', obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , codigo='E03', tamanho=[11, 11], raiz='//CTe/infCte/exped', obrigatorio=False) self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14], raiz='//CTe/infCte/exped', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', codigo='E04', tamanho=[ 2, 60], raiz='//CTe/infCte/exped') self.fone = TagInteiro(nome='fone' , codigo='E16', tamanho=[ 6, 14], raiz='//CTe/infCte/exped', obrigatorio=False) self.enderExped = EnderExped() self.email = TagCaracter(nome='email', codigo='E19', tamanho=[ 1, 60], raiz='//CTe/infCte/exped', obrigatorio=False) def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<exped>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.xNome.xml xml += self.fone.xml xml += self.enderExped.xml xml += self.email.xml xml += '</exped>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.IE.xml = arquivo self.xNome.xml = arquivo self.fone.xml = arquivo self.enderExped.xml = arquivo self.email.xml = arquivo xml = property(get_xml, set_xml) class EnderReme(XMLNFe): def __init__(self): super(EnderReme, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , codigo='E06', tamanho=[ 2, 255] , raiz='//CTe/infCte/rem/enderReme') self.nro = TagCaracter(nome='nro' , codigo='E07', tamanho=[ 1, 60] , raiz='//CTe/infCte/rem/enderReme') self.xCpl = TagCaracter(nome='xCpl' , codigo='E08', tamanho=[ 1, 60] , raiz='//CTe/infCte/rem/enderReme', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', codigo='E09', tamanho=[ 2, 60] , raiz='//CTe/infCte/rem/enderReme') self.cMun = TagInteiro(nome='cMun' , codigo='E10', tamanho=[ 7, 7, 7], raiz='//CTe/infCte/rem/enderReme') self.xMun = TagCaracter(nome='xMun' , codigo='E11', tamanho=[ 2, 60] , raiz='//CTe/infCte/rem/enderReme') self.CEP = TagCaracter(nome='CEP' , codigo='E13', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/rem/enderReme', obrigatorio=False) self.UF = TagCaracter(nome='UF' , codigo='E12', tamanho=[ 2, 2] , raiz='//CTe/infCte/rem/enderReme') self.cPais = TagInteiro(nome='cPais' , codigo='E14', tamanho=[ 4, 4, 4], raiz='//CTe/infCte/rem/enderReme', obrigatorio=False) self.xPais = TagCaracter(nome='xPais' , codigo='E15', tamanho=[ 1, 60] , raiz='//CTe/infCte/rem/enderReme', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderReme>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml xml += self.cPais.xml xml += self.xPais.xml xml += '</enderReme>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo self.cPais.xml = arquivo self.xPais.xml = arquivo xml = property(get_xml, set_xml) class Rem(XMLNFe): def __init__(self): super(Rem, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , codigo='E02', tamanho=[ 0, 14], raiz='//CTe/infCte/rem', obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , codigo='E03', tamanho=[11, 11], raiz='//CTe/infCte/rem', obrigatorio=False) self.IE = TagCaracter(nome='IE' , codigo='E17', tamanho=[ 2, 14], raiz='//CTe/infCte/rem', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', codigo='E04', tamanho=[ 2, 60], raiz='//CTe/infCte/rem') self.xFant = TagCaracter(nome='xFant', codigo='E04', tamanho=[ 1, 60], raiz='//CTe/infCte/rem', obrigatorio=False) self.fone = TagInteiro(nome='fone' , codigo='E16', tamanho=[ 6, 14], raiz='//CTe/infCte/rem', obrigatorio=False) self.enderReme = EnderReme() self.email = TagCaracter(nome='email', codigo='E19', tamanho=[ 1, 60], raiz='//CTe/infCte/rem', obrigatorio=False) #self.infNF = [] #self.infNFe = [] #self.infOutros = [] def get_xml(self): if self.CNPJ.valor == '' and self.CPF.valor == '': return '' xml = XMLNFe.get_xml(self) xml += '<rem>' if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.xNome.xml xml += self.xFant.xml xml += self.fone.xml xml += self.enderReme.xml xml += self.email.xml xml += '</rem>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.IE.xml = arquivo self.xNome.xml = arquivo self.xFant.xml = arquivo self.fone.xml = arquivo self.enderReme.xml = arquivo self.email.xml = arquivo #self.infNF = self.le_grupo('//CTe/infCte/rem/infNF', InfNF, sigla_ns='cte') #self.infNFe = self.le_grupo('//CTe/infCte/rem/infNFe', InfNFe, sigla_ns='cte') #self.infOutros = self.le_grupo('//CTe/infCte/rem/infOutros', InfOutros, sigla_ns='cte') xml = property(get_xml, set_xml) class EnderEmit(XMLNFe): def __init__(self): super(EnderEmit, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , codigo='C06', tamanho=[ 2, 60] , raiz='//CTe/infCte/emit/enderEmit') self.nro = TagCaracter(nome='nro' , codigo='C07', tamanho=[ 1, 60] , raiz='//CTe/infCte/emit/enderEmit') self.xCpl = TagCaracter(nome='xCpl' , codigo='C08', tamanho=[ 1, 60] , raiz='//CTe/infCte/emit/enderEmit', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', codigo='C09', tamanho=[ 2, 60] , raiz='//CTe/infCte/emit/enderEmit') self.cMun = TagInteiro(nome='cMun' , codigo='C10', tamanho=[ 7, 7, 7], raiz='//CTe/infCte/emit/enderEmit') self.xMun = TagCaracter(nome='xMun' , codigo='C11', tamanho=[ 2, 60] , raiz='//CTe/infCte/emit/enderEmit') self.CEP = TagCaracter(nome='CEP' , codigo='C13', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/emit/enderEmit', obrigatorio=False) self.UF = TagCaracter(nome='UF' , codigo='C12', tamanho=[ 2, 2] , raiz='//CTe/infCte/emit/enderEmit') #self.cPais = TagInteiro(nome='cPais' , codigo='C14', tamanho=[ 4, 4, 4], raiz='//CTe/infCte/emit/enderEmit', obrigatorio=False) #self.xPais = TagCaracter(nome='xPais' , codigo='C15', tamanho=[ 1, 60] , raiz='//CTe/infCte/emit/enderEmit', obrigatorio=False) self.fone = TagInteiro(nome='fone' , codigo='C16', tamanho=[ 6, 14] , raiz='//CTe/infCte/emit/enderEmit', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderEmit>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml #xml += self.cPais.xml #xml += self.xPais.xml xml += self.fone.xml xml += '</enderEmit>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo #self.cPais.xml = arquivo #self.xPais.xml = arquivo self.fone.xml = arquivo xml = property(get_xml, set_xml) def get_txt(self): txt = 'C05|' txt += self.xLgr.txt + '|' txt += self.nro.txt + '|' txt += self.xCpl.txt + '|' txt += self.xBairro.txt + '|' txt += self.cMun.txt + '|' txt += self.xMun.txt + '|' txt += self.CEP.txt + '|' txt += self.UF.txt + '|' #txt += self.cPais.txt + '|' #txt += self.xPais.txt + '|' txt += self.fone.txt + '|' txt += '\n' return txt txt = property(get_txt) class Emit(XMLNFe): def __init__(self): super(Emit, self).__init__() self.CNPJ = TagCaracter(nome='CNPJ' , codigo='C02' , tamanho=[14, 14], raiz='//CTe/infCte/emit', obrigatorio=False) self.IE = TagCaracter(nome='IE' , codigo='C17' , tamanho=[ 2, 14], raiz='//CTe/infCte/emit', obrigatorio=False) self.IEST = TagInteiro(nome='IEST', tamanho=[14, 14], raiz='//CTe/infCte/emit', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', codigo='C03' , tamanho=[ 2, 60], raiz='//CTe/infCte/emit') self.xFant = TagCaracter(nome='xFant', codigo='C04' , tamanho=[ 1, 60], raiz='//CTe/infCte/emit', obrigatorio=False) self.enderEmit = EnderEmit() def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<emit>' xml += self.CNPJ.xml xml += self.IE.xml xml += self.IEST.xml xml += self.xNome.xml xml += self.xFant.xml xml += self.enderEmit.xml xml += '</emit>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.IE.xml = arquivo self.IEST.xml = arquivo self.xNome.xml = arquivo self.xFant.xml = arquivo self.enderEmit.xml = arquivo xml = property(get_xml, set_xml) class ObsFisco(XMLNFe): def __init__(self): super(ObsFisco, self).__init__() self.xCampo = TagCaracter(nome='ObsFisco', codigo='Z08', propriedade='xCampo', tamanho=[1, 20], raiz='/', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.xTexto = TagCaracter(nome='xTexto', codigo='Z09', tamanho=[1, 60], raiz='//ObsFisco', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.xCampo.valor or self.xTexto.valor): return '' xml = XMLNFe.get_xml(self) xml += '<ObsFisco xCampo="' + self.xCampo.valor + '">' xml += self.xTexto.xml xml += '</ObsFisco>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xCampo.xml = arquivo self.xTexto.xml = arquivo xml = property(get_xml, set_xml) class ObsCont(XMLNFe): def __init__(self): super(ObsCont, self).__init__() self.xCampo = TagCaracter(nome='ObsCont', propriedade='xCampo', tamanho=[1, 20], raiz='/', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) self.xTexto = TagCaracter(nome='xTexto', codigo='Z06', tamanho=[1, 160], raiz='//ObsCont', namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not (self.xCampo.valor or self.xTexto.valor): return '' xml = XMLNFe.get_xml(self) xml += '<ObsCont xCampo="' + self.xCampo.valor + '">' xml += self.xTexto.xml xml += '</ObsCont>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xCampo.xml = arquivo self.xTexto.xml = arquivo xml = property(get_xml, set_xml) class Entrega(XMLNFe): def __init__(self): super(Entrega, self).__init__() # # Data da entrega # self.tpPer = TagCaracter(nome='tpPer', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/semData') self.tpPerSemData = TagCaracter(nome='tpPer', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/semData', obrigatorio=False) self.tpPerComData = TagCaracter(nome='tpPer', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/comData', obrigatorio=False) self.dProg = TagData(nome='dProg', raiz='//CTe/infCte/compl/Entrega/comData') self.tpPerNoPeriodo = TagCaracter(nome='tpPer', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/noPeriodo', obrigatorio=False) self.dIni = TagData(nome='dIni', raiz='//CTe/infCte/compl/Entrega/noPeriodo') self.dFim = TagData(nome='dFim', raiz='//CTe/infCte/compl/Entrega/noPeriodo') # # Hora da entrega # self.tpHor = TagCaracter(nome='tpHor', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/semHora') self.tpHorSemHora = TagCaracter(nome='tpHor', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/semHora', obrigatorio=False) self.tpHorComHora = TagCaracter(nome='tpHor', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/comHora', obrigatorio=False) self.hProg = TagHora(nome='hProg', raiz='//CTe/infCte/compl/Entrega/comHora') self.tpHorNoInter = TagCaracter(nome='tpHor', tamanho=[1, 1, 1], raiz='//CTe/infCte/compl/Entrega/noInter', obrigatorio=False) self.hIni = TagHora(nome='hIni', raiz='//CTe/infCte/compl/Entrega/noInter') self.hFim = TagHora(nome='hFim', raiz='//CTe/infCte/compl/Entrega/noInter') def get_xml(self): if not (self.tpPer.valor and self.tpHor.valor): return '' xml = XMLNFe.get_xml(self) xml += '<Entrega>' if self.tpPer.valor == '0': xml += '<semData>' xml += self.tpPer.xml xml += '</semData>' elif self.tpPer.valor <= '3': xml += '<comData>' xml += self.tpPer.xml xml += self.dProg.xml xml += '</comData>' else: xml += '<noPeriodo>' xml += self.tpPer.xml xml += self.dIni.xml xml += self.dFim.xml xml += '</noPeriodo>' if self.tpHor.valor == '0': xml += '<semHora>' xml += self.tpHor.xml xml += '</semHora>' elif self.tpHor.valor <= '3': xml += '<comHora>' xml += self.tpHor.xml xml += self.hProg.xml xml += '</comHora>' else: xml += '<noInter>' xml += self.tpHor.xml xml += self.hIni.xml xml += self.hFim.xml xml += '</noInter>' xml += '</Entrega>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): if self._le_noh('//CTe/infCte/compl/Entrega/semData') is not None: self.tpPerSemData.xml = arquivo self.tpPer.valor = self.tpPerSemData.valor elif self._le_noh('//CTe/infCte/compl/Entrega/comData') is not None: self.tpPerComData.xml = arquivo self.tpPer.valor = self.tpPerComData.valor else: self.tpPerNoPeriodo.xml = arquivo self.tpPer.valor = self.tpPerNoPeriodo.valor self.dProg.xml = arquivo self.dIni.xml = arquivo self.dFim.xml = arquivo if self._le_noh('//CTe/infCte/compl/Entrega/semHora') is not None: self.tpHorSemHora.xml = arquivo self.tpHor.valor = self.tpHorSemHora.valor elif self._le_noh('//CTe/infCte/compl/Entrega/comHora') is not None: self.tpHorComHora.xml = arquivo self.tpHor.valor = self.tpHorComHora.valor else: self.tpHorNoInter.xml = arquivo self.tpHor.valor = self.tpHorNoInter.valor self.hProg.xml = arquivo self.hIni.xml = arquivo self.hFim.xml = arquivo xml = property(get_xml, set_xml) class Pass(XMLNFe): def __init__(self): super(Pass, self).__init__() self.xPass = TagCaracter(nome='xPass', tamanho=[1, 15], raiz='//pass', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False) def get_xml(self): if not self.xPass.valor: return '' xml = XMLNFe.get_xml(self) xml += '<pass>' xml += self.xPass.xml xml += '</pass>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xPass.xml = arquivo xml = property(get_xml, set_xml) class Fluxo(XMLNFe): def __init__(self): super(Fluxo, self).__init__() self.xOrig = TagCaracter(nome='xOrig', tamanho=[1, 60], raiz='//CTe/infCte/compl/fluxo', obrigatorio=False) self.passagem = [] self.xDest = TagCaracter(nome='xDest', tamanho=[1, 60], raiz='//CTe/infCte/compl/fluxo', obrigatorio=False) self.xRota = TagCaracter(nome='xRota', tamanho=[1, 10], raiz='//CTe/infCte/compl/fluxo', obrigatorio=False) def get_xml(self): if not (self.xOrig.valor or self.xDest.valor or self.xRota.valor or len(self.passagem)): return '' xml = XMLNFe.get_xml(self) xml += '<fluxo>' xml += self.xOrig.xml if len(self.passagem): for p in self.passagem: xml += p.xml xml += self.xDest.xml xml += self.xRota.xml xml += '</fluxo>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xOrig.xml = arquivo self.passagem = self.le_grupo('//CTe/infCte/compl/fluxo/pass', Pass, sigla_ns='cte') self.xDest.xml = arquivo self.xRota.xml = arquivo xml = property(get_xml, set_xml) class Compl(XMLNFe): def __init__(self): super(Compl, self).__init__() self.xCaracAd = TagCaracter(nome='xCaracAd', tamanho=[ 1, 15], raiz='//CTe/infCte/compl', obrigatorio=False) self.xCaracSer = TagCaracter(nome='xCaracSer', tamanho=[ 1, 30], raiz='//CTe/infCte/compl', obrigatorio=False) self.xEmi = TagCaracter(nome='xEmi', tamanho=[ 1, 20], raiz='//CTe/infCte/compl', obrigatorio=False) self.fluxo = Fluxo() self.Entrega = Entrega() self.origCalc = TagCaracter(nome='origCalc', tamanho=[ 2, 40], raiz='//CTe/infCte/compl', obrigatorio=False) self.destCalc = TagCaracter(nome='destCalc', tamanho=[ 2, 40], raiz='//CTe/infCte/compl', obrigatorio=False) self.xObs = TagCaracter(nome='xObs', tamanho=[ 1, 2000], raiz='//CTe/infCte/compl', obrigatorio=False) self.ObsCont = [] self.ObsFisco = [] def get_xml(self): if not (self.xCaracAd.valor or self.xCaracSer.valor or self.xEmi.valor or self.origCalc.valor or self.destCalc.valor or self.xObs.valor or len(self.ObsCont) or len(self.ObsFisco) or self.fluxo is not None or self.Entrega is not None): return '' xml = XMLNFe.get_xml(self) xml += '<compl>' xml += self.xCaracAd.xml xml += self.xCaracSer.xml xml += self.xEmi.xml xml += self.fluxo.xml xml += self.Entrega.xml xml += self.origCalc.xml xml += self.destCalc.xml xml += self.xObs.xml for o in self.ObsCont: xml += o.xml for o in self.ObsFisco: xml += o.xml xml += '</compl>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xCaracAd.xml = arquivo self.xCaracSer.xml = arquivo self.xEmi.xml = arquivo self.fluxo.xml = arquivo self.Entrega.xml = arquivo self.origCalc.xml = arquivo self.destCalc.xml = arquivo self.xObs.xml = arquivo self.ObsCont = self.le_grupo('//CTe/infCte/compl/ObsCont', ObsCont, sigla_ns='cte') self.ObsFisco = self.le_grupo('//CTe/infCte/compl/ObsFisco', ObsFisco, sigla_ns='cte') xml = property(get_xml, set_xml) class EnderToma(XMLNFe): def __init__(self): super(EnderToma, self).__init__() self.xLgr = TagCaracter(nome='xLgr' , tamanho=[ 2, 255] , raiz='//CTe/infCte/ide/toma4/enderToma') self.nro = TagCaracter(nome='nro' , tamanho=[ 1, 60] , raiz='//CTe/infCte/ide/toma4/enderToma') self.xCpl = TagCaracter(nome='xCpl' , tamanho=[ 1, 60] , raiz='//CTe/infCte/ide/toma4/enderToma', obrigatorio=False) self.xBairro = TagCaracter(nome='xBairro', tamanho=[ 2, 60] , raiz='//CTe/infCte/ide/toma4/enderToma') self.cMun = TagInteiro(nome='cMun' , tamanho=[ 7, 7, 7], raiz='//CTe/infCte/ide/toma4/enderToma') self.xMun = TagCaracter(nome='xMun' , tamanho=[ 2, 60] , raiz='//CTe/infCte/ide/toma4/enderToma') self.CEP = TagCaracter(nome='CEP' , tamanho=[ 8, 8, 8], raiz='//CTe/infCte/ide/toma4/enderToma', obrigatorio=False) self.UF = TagCaracter(nome='UF' , tamanho=[ 2, 2] , raiz='//CTe/infCte/ide/toma4/enderToma') self.cPais = TagInteiro(nome='cPais' , tamanho=[ 4, 4, 4], raiz='//CTe/infCte/ide/toma4/enderToma', obrigatorio=False) self.xPais = TagCaracter(nome='xPais' , tamanho=[ 2, 60] , raiz='//CTe/infCte/ide/toma4/enderToma', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<enderToma>' xml += self.xLgr.xml xml += self.nro.xml xml += self.xCpl.xml xml += self.xBairro.xml xml += self.cMun.xml xml += self.xMun.xml xml += self.CEP.xml xml += self.UF.xml xml += self.cPais.xml xml += self.xPais.xml xml += '</enderToma>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.xLgr.xml = arquivo self.nro.xml = arquivo self.xCpl.xml = arquivo self.xBairro.xml = arquivo self.cMun.xml = arquivo self.xMun.xml = arquivo self.CEP.xml = arquivo self.UF.xml = arquivo self.cPais.xml = arquivo self.xPais.xml = arquivo xml = property(get_xml, set_xml) class Tomador(XMLNFe): def __init__(self): super(Tomador, self).__init__() self.toma = TagInteiro(nome='toma', tamanho=[1, 1, 1], raiz='//CTe/infCte/ide/toma3', valor=0) self.toma3 = TagInteiro(nome='toma', tamanho=[1, 1, 1], raiz='//CTe/infCte/ide/toma3', valor=0) self.toma4 = TagInteiro(nome='toma', tamanho=[1, 1, 1], raiz='//CTe/infCte/ide/toma4', valor=4) self.CNPJ = TagCaracter(nome='CNPJ' , tamanho=[ 0, 14], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) self.CPF = TagCaracter(nome='CPF' , tamanho=[11, 11], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) self.IE = TagCaracter(nome='IE' , tamanho=[ 2, 14], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) self.xNome = TagCaracter(nome='xNome', tamanho=[ 2, 60], raiz='//CTe/infCte/ide/toma4') self.xFant = TagCaracter(nome='xFant', tamanho=[ 2, 60], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) self.fone = TagInteiro(nome='fone' , tamanho=[ 6, 14], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) self.enderToma = EnderToma() self.email = TagCaracter(nome='email', tamanho=[ 1, 60], raiz='//CTe/infCte/ide/toma4', obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) if self.toma.valor < 4: xml += '<toma3>' xml += self.toma.xml xml += '</toma3>' else: xml += '<toma4>' xml += self.toma.xml if self.CPF.valor: xml += self.CPF.xml else: xml += self.CNPJ.xml xml += self.IE.xml xml += self.xNome.xml xml += self.xFant.xml xml += self.fone.xml xml += self.enderToma.xml xml += self.email.xml xml += '</toma4>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.CNPJ.xml = arquivo self.CPF.xml = arquivo self.IE.xml = arquivo self.xNome.xml = arquivo self.xFant.xml = arquivo self.fone.xml = arquivo self.enderToma.xml = arquivo self.email.xml = arquivo if self._le_noh('//CTe/infCte/ide/toma3/toma') is not None: self.toma3.xml = arquivo self.toma.valor = self.toma3.valor else: self.toma4.xml = arquivo self.toma.valor = self.toma4.valor xml = property(get_xml, set_xml) class Ide(XMLNFe): def __init__(self): super(Ide, self).__init__() self.cUF = TagInteiro(nome='cUF', tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide') self.cCT = TagCaracter(nome='cCT', tamanho=[ 8, 8, 8], raiz='//CTe/infCte/ide') self.CFOP = TagCaracter(nome='CFOP', tamanho=[4, 4, 4], raiz='//CTe/infCte/ide') self.natOp = TagCaracter(nome='natOp', tamanho=[ 1, 60] , raiz='//CTe/infCte/ide') self.mod = TagInteiro(nome='mod' , tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide', valor=57) self.serie = TagInteiro(nome='serie' , tamanho=[ 1, 3, 1], raiz='//CTe/infCte/ide') self.nCT = TagInteiro(nome='nCT' , tamanho=[ 1, 9, 1], raiz='//CTe/infCte/ide') self.dhEmi = TagDataHoraUTC(nome='dhEmi' , raiz='//CTe/infCte/ide') self.tpImp = TagInteiro(nome='tpImp' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=1) self.tpEmis = TagInteiro(nome='tpEmis' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=1) self.cDV = TagInteiro(nome='cDV' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide') self.tpAmb = TagInteiro(nome='tpAmb' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=2) self.tpCTe = TagInteiro(nome='tpCTe' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=1) self.procEmi = TagInteiro(nome='procEmi' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide') self.verProc = TagCaracter(nome='verProc', tamanho=[ 1, 20] , raiz='//CTe/infCte/ide') self.indGlobalizado = TagInteiro(nome='indGlobalizado', tamanho=[1, 1, 1], raiz='//CTe/infCte/ide', obrigatorio=False) self.cMunEnv = TagInteiro(nome='cMunEnv' , tamanho=[ 7, 7, 7], raiz='//CTe/infCte/ide') self.xMunEnv = TagCaracter(nome='xMunEnv', tamanho=[ 1, 60] , raiz='//CTe/infCte/ide') self.UFEnv = TagCaracter(nome='UFEnv' , tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide') self.modal = TagCaracter(nome='modal' , tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide', default='01') self.tpServ = TagInteiro(nome='tpServ' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=0) self.cMunIni = TagInteiro(nome='cMunIni' , tamanho=[ 7, 7, 7], raiz='//CTe/infCte/ide') self.xMunIni = TagCaracter(nome='xMunIni', tamanho=[ 1, 60] , raiz='//CTe/infCte/ide') self.UFIni = TagCaracter(nome='UFIni' , tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide') self.cMunFim = TagInteiro(nome='cMunFim' , tamanho=[ 7, 7, 7], raiz='//CTe/infCte/ide') self.xMunFim = TagCaracter(nome='xMunFim', tamanho=[ 1, 60] , raiz='//CTe/infCte/ide') self.UFFim = TagCaracter(nome='UFFim' , tamanho=[ 2, 2, 2], raiz='//CTe/infCte/ide') self.retira = TagInteiro(nome='retira' , tamanho=[ 1, 1, 1], raiz='//CTe/infCte/ide', valor=0) self.xDetRetira = TagCaracter(nome='xDetRetira', tamanho=[ 1, 160], raiz='//CTe/infCte/ide', obrigatorio=False) self.indIEToma = TagInteiro(nome='indIEToma', tamanho=[1, 1, 1], raiz='//CTe/infCte/ide') self.tomador = Tomador() self.dhCont = TagDataHora(nome='dhCont', raiz='//CTe/infCte/ide', obrigatorio=False) self.xJust = TagCaracter(nome='xJust', raiz='//CTe/infCte/ide', tamanho=[15, 256], obrigatorio=False) def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<ide>' xml += self.cUF.xml xml += self.cCT.xml xml += self.CFOP.xml xml += self.natOp.xml xml += self.mod.xml xml += self.serie.xml xml += self.nCT.xml xml += self.dhEmi.xml xml += self.tpImp.xml xml += self.tpEmis.xml xml += self.cDV.xml xml += self.tpAmb.xml xml += self.tpCTe.xml xml += self.procEmi.xml xml += self.verProc.xml xml += self.indGlobalizado.xml xml += self.cMunEnv.xml xml += self.xMunEnv.xml xml += self.UFEnv.xml xml += self.modal.xml xml += self.tpServ.xml xml += self.cMunIni.xml xml += self.xMunIni.xml xml += self.UFIni.xml xml += self.cMunFim.xml xml += self.xMunFim.xml xml += self.UFFim.xml xml += self.retira.xml xml += self.xDetRetira.xml xml += self.indIEToma.xml xml += self.tomador.xml xml += self.dhCont.xml xml += self.xJust.xml xml += '</ide>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.cUF.xml = arquivo self.cCT.xml = arquivo self.CFOP.xml = arquivo self.natOp.xml = arquivo self.mod.xml = arquivo self.serie.xml = arquivo self.nCT.xml = arquivo self.dhEmi.xml = arquivo self.tpImp.xml = arquivo self.tpEmis.xml = arquivo self.cDV.xml = arquivo self.tpAmb.xml = arquivo self.tpCTe.xml = arquivo self.procEmi.xml = arquivo self.verProc.xml = arquivo self.indGlobalizado.xml = arquivo self.cMunEnv.xml = arquivo self.xMunEnv.xml = arquivo self.UFEnv.xml = arquivo self.modal.xml = arquivo self.tpServ.xml = arquivo self.cMunIni.xml = arquivo self.xMunIni.xml = arquivo self.UFIni.xml = arquivo self.cMunFim.xml = arquivo self.xMunFim.xml = arquivo self.UFFim.xml = arquivo self.retira.xml = arquivo self.xDetRetira.xml = arquivo self.indIEToma.xml = arquivo self.tomador.xml = arquivo self.dhCont.xml = arquivo self.xJust.xml = arquivo xml = property(get_xml, set_xml) class InfCTe(XMLNFe): def __init__(self): super(InfCTe, self).__init__() self.versao = TagDecimal(nome='infCte', propriedade='versao', raiz='//CTe', namespace=NAMESPACE_CTE, valor='3.00') self.Id = TagCaracter(nome='infCte', propriedade='Id', raiz='//CTe', namespace=NAMESPACE_CTE) self.ide = Ide() self.compl = Compl() self.emit = Emit() self.rem = Rem() self.exped = Exped() self.receb = Receb() self.dest = Dest() self.vPrest = VPrest() self.imp = Imp() self.autXML = [] ##Escolha tipo CT-e: #0 - CT-e Normal ou Substituto #1 - CT-e Complemento de valores #2 - CT-e de Anulação self.tipo_cte = 0 self.infCTeNorm = InfCTeNorm() self.infCTeComp = InfCTeComp() self.infCTeAnu = InfCTeAnu() def get_xml(self): xml = XMLNFe.get_xml(self) xml += '<infCte versao="' + str(self.versao.valor) + '" Id="' + self.Id.valor + '">' xml += self.ide.xml xml += self.compl.xml xml += self.emit.xml xml += self.rem.xml xml += self.exped.xml xml += self.receb.xml xml += self.dest.xml xml += self.vPrest.xml xml += self.imp.xml if self.tipo_cte == 1: xml += self.infCTeComp.xml elif self.tipo_cte == 2: xml += self.infCTeAnu.xml else: xml += self.infCTeNorm.xml xml += '</infCte>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versao.xml = arquivo self.Id.xml = arquivo self.ide.xml = arquivo self.compl.xml = arquivo self.emit.xml = arquivo self.rem.xml = arquivo self.exped.xml = arquivo self.receb.xml = arquivo self.dest.xml = arquivo self.vPrest.xml = arquivo self.imp.xml = arquivo self.infCTeNorm.xml = arquivo self.infCTeComp.xml = arquivo self.infCTeAnu.xml = arquivo if self.ide.tomador.toma.valor != 4: if self.ide.tomador.toma.valor == 0: tomador = self.rem endertoma = self.rem.enderReme elif self.ide.tomador.toma.valor == 1: tomador = self.exped endertoma = self.exped.enderExped elif self.ide.tomador.toma.valor == 2: tomador = self.receb endertoma = self.receb.enderReceb elif self.ide.tomador.toma.valor == 3: tomador = self.dest endertoma = self.dest.enderDest self.ide.tomador.CNPJ.valor = tomador.CNPJ.valor self.ide.tomador.CPF.valor = tomador.CPF.valor self.ide.tomador.IE.valor = tomador.IE.valor self.ide.tomador.xNome.valor = tomador.xNome.valor try: self.ide.tomador.xFant.valor = tomador.xFant.valor4 except: pass self.ide.tomador.fone.valor = tomador.fone.valor self.ide.tomador.email.valor = tomador.email.valor self.ide.tomador.enderToma.xLgr.valor = endertoma.xLgr.valor self.ide.tomador.enderToma.nro.valor = endertoma.nro.valor self.ide.tomador.enderToma.xCpl.valor = endertoma.xCpl.valor self.ide.tomador.enderToma.xBairro.valor = endertoma.xBairro.valor self.ide.tomador.enderToma.cMun.valor = endertoma.cMun.valor self.ide.tomador.enderToma.xMun.valor = endertoma.xMun.valor self.ide.tomador.enderToma.CEP.valor = endertoma.CEP.valor self.ide.tomador.enderToma.UF.valor = endertoma.UF.valor self.ide.tomador.enderToma.cPais.valor = endertoma.cPais.valor self.ide.tomador.enderToma.xPais.valor = endertoma.xPais.valor xml = property(get_xml, set_xml) class CTe(XMLNFe): def __init__(self): super(CTe, self).__init__() self.infCte = InfCTe() self.Signature = Signature() self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/') self.arquivo_esquema = 'cte_v3.00.xsd' def get_xml(self): xml = XMLNFe.get_xml(self) xml += ABERTURA xml += '<CTe xmlns="' + NAMESPACE_CTE + '">' xml += self.infCte.xml # # Define a URI a ser assinada # self.Signature.URI = '#' + self.infCte.Id.valor xml += self.Signature.xml xml += '</CTe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.infCte.xml = arquivo self.Signature.xml = self._le_noh('//CTe/sig:Signature') xml = property(get_xml, set_xml) def _calcula_dv(self, valor): soma = 0 m = 2 for i in range(len(valor)-1, -1, -1): c = valor[i] soma += int(c) * m m += 1 if m > 9: m = 2 digito = 11 - (soma % 11) if digito > 9: digito = 0 return digito def gera_nova_chave(self): chave = str(self.infCte.ide.cUF.valor).zfill(2) chave += str(self.infCte.ide.dhEmi.valor.strftime('%y%m')).zfill(4) chave += str(self.infCte.emit.CNPJ.valor).zfill(14) chave += str(self.infCte.ide.mod.valor).zfill(2) chave += str(self.infCte.ide.serie.valor).zfill(3) chave += str(self.infCte.ide.nCT.valor).zfill(9) chave += str(self.infCte.ide.tpEmis.valor).zfill(1) # # O código numério é um número aleatório # #chave += str(random.randint(0, 99999999)).strip().rjust(8, '0') # # Mas, por segurança, é preferível que esse número não seja aleatório de todo # soma = 0 for c in chave: soma += int(c) ** 3 ** 2 codigo = str(soma) if len(codigo) > 8: codigo = codigo[-8:] else: codigo = codigo.rjust(8, '0') chave += codigo # # Define na estrutura do XML o campo cCT # #self.infCte.ide.cCT.valor = str(self.infCte.ide.tpEmis.valor).zfill(1) + codigo self.infCte.ide.cCT.valor = chave[-8:] # # Gera o dígito verificador # digito = self._calcula_dv(chave) # # Define na estrutura do XML o campo cDV # self.infCte.ide.cDV.valor = digito chave += str(digito) self.chave = chave # # Define o Id # self.infCte.Id.valor = 'CTe' + chave def monta_chave(self): self.gera_nova_chave() chave = str(self.infCte.ide.cUF.valor).zfill(2) chave += str(self.infCte.ide.dhEmi.valor.strftime('%y%m')).zfill(4) chave += str(self.infCte.emit.CNPJ.valor).zfill(14) chave += str(self.infCte.ide.mod.valor).zfill(2) chave += str(self.infCte.ide.serie.valor).zfill(3) chave += str(self.infCte.ide.nCT.valor).zfill(9) chave += str(self.infCte.ide.tpEmis.valor).zfill(1) chave += str(self.infCte.ide.cCT.valor).zfill(8) chave += str(self.infCte.ide.cDV.valor).zfill(1) self.chave = chave
aricaldeira/PySPED
pysped/cte/leiaute/cte_300.py
Python
lgpl-2.1
106,356
# -*- coding: utf-8 -*- """ *************************************************************************** RandomPointsLayer.py --------------------- Date : April 2014 Copyright : (C) 2014 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'April 2014' __copyright__ = '(C) 2014, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import random from PyQt4.QtCore import QVariant from qgis.core import (QGis, QgsGeometry, QgsFields, QgsField, QgsSpatialIndex, QgsPoint, QgsFeature, QgsFeatureRequest) from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.ProcessingLog import ProcessingLog from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterNumber from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class RandomPointsLayer(GeoAlgorithm): VECTOR = 'VECTOR' POINT_NUMBER = 'POINT_NUMBER' MIN_DISTANCE = 'MIN_DISTANCE' OUTPUT = 'OUTPUT' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Random points in layer bounds') self.group, self.i18n_group = self.trAlgorithm('Vector creation tools') self.addParameter(ParameterVector(self.VECTOR, self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_POLYGON])) self.addParameter(ParameterNumber(self.POINT_NUMBER, self.tr('Points number'), 1, None, 1)) self.addParameter(ParameterNumber(self.MIN_DISTANCE, self.tr('Minimum distance'), 0.0, None, 0.0)) self.addOutput(OutputVector(self.OUTPUT, self.tr('Random points'))) def processAlgorithm(self, progress): layer = dataobjects.getObjectFromUri( self.getParameterValue(self.VECTOR)) pointCount = int(self.getParameterValue(self.POINT_NUMBER)) minDistance = float(self.getParameterValue(self.MIN_DISTANCE)) bbox = layer.extent() idxLayer = vector.spatialindex(layer) fields = QgsFields() fields.append(QgsField('id', QVariant.Int, '', 10, 0)) writer = self.getOutputFromName(self.OUTPUT).getVectorWriter( fields, QGis.WKBPoint, layer.dataProvider().crs()) nPoints = 0 nIterations = 0 maxIterations = pointCount * 200 total = 100.0 / pointCount index = QgsSpatialIndex() points = dict() request = QgsFeatureRequest() random.seed() while nIterations < maxIterations and nPoints < pointCount: rx = bbox.xMinimum() + bbox.width() * random.random() ry = bbox.yMinimum() + bbox.height() * random.random() pnt = QgsPoint(rx, ry) geom = QgsGeometry.fromPoint(pnt) ids = idxLayer.intersects(geom.buffer(5, 5).boundingBox()) if len(ids) > 0 and \ vector.checkMinDistance(pnt, index, minDistance, points): for i in ids: f = layer.getFeatures(request.setFilterFid(i)).next() tmpGeom = QgsGeometry(f.geometry()) if geom.within(tmpGeom): f = QgsFeature(nPoints) f.initAttributes(1) f.setFields(fields) f.setAttribute('id', nPoints) f.setGeometry(geom) writer.addFeature(f) index.insertFeature(f) points[nPoints] = pnt nPoints += 1 progress.setPercentage(int(nPoints * total)) nIterations += 1 if nPoints < pointCount: ProcessingLog.addToLog(ProcessingLog.LOG_INFO, self.tr('Can not generate requested number of random points. ' 'Maximum number of attempts exceeded.')) del writer
sebastic/QGIS
python/plugins/processing/algs/qgis/RandomPointsLayer.py
Python
gpl-2.0
4,815
# -*- coding: utf-8 -*- from __future__ import division from numpy import round, maximum as max_, logical_not as not_, logical_or as or_, vectorize from ...base import * # noqa analysis:ignore from .base_ressource import nb_enf class af_enfant_a_charge(Variable): column = BoolCol entity_class = Individus label = u"Enfant à charge au sens des allocations familiales" def function(self, simulation, period): period = period.this_month est_enfant_dans_famille = simulation.calculate('est_enfant_dans_famille', period) smic55 = simulation.calculate('smic55', period) age = simulation.calculate('age', period) rempli_obligation_scolaire = simulation.calculate('rempli_obligation_scolaire', period) pfam = simulation.legislation_at(period.start).fam condition_enfant = ((age >= pfam.enfants.age_minimal) * (age < pfam.enfants.age_intermediaire) * rempli_obligation_scolaire) condition_jeune = (age >= pfam.enfants.age_intermediaire) * (age < pfam.af.age3) * not_(smic55) return period, or_(condition_enfant, condition_jeune) * est_enfant_dans_famille class af_nbenf(Variable): column = IntCol entity_class = Familles label = u"Nombre d'enfants dans la famille au sens des allocations familiales" def function(self, simulation, period): period_mois = period.this_month af_enfant_a_charge_holder = simulation.compute('af_enfant_a_charge', period_mois) af_nbenf = self.sum_by_entity(af_enfant_a_charge_holder) return period, af_nbenf class af_coeff_garde_alternee(DatedVariable): column = FloatCol(default = 1) entity_class = Familles label = u"Coefficient à appliquer aux af pour tenir compte de la garde alternée" @dated_function(start = date(2007, 5, 1)) def function_2007(self, simulation, period): period = period.this_month nb_enf = simulation.calculate('af_nbenf', period) alt = simulation.compute('alt', period) af_enfant_a_charge = simulation.compute('af_enfant_a_charge', period) # Le nombre d'enfants à charge en garde alternée, qui vérifient donc af_enfant_a_charge = true et alt = true nb_enf_garde_alternee = self.sum_by_entity(alt.array * af_enfant_a_charge.array) # Avoid division by zero. If nb_enf == 0, necessarily nb_enf_garde_alternee = 0 so coeff = 1 coeff = 1 - (nb_enf_garde_alternee / (nb_enf + (nb_enf == 0))) * 0.5 return period, coeff class af_forf_nbenf(Variable): column = IntCol entity_class = Familles label = u"Nombre d'enfants dans la famille éligibles à l'allocation forfaitaire des AF" def function(self, simulation, period): period = period.this_month age_holder = simulation.compute('age', period) age = self.split_by_roles(age_holder, roles = ENFS) smic55_holder = simulation.compute('smic55', period) smic55 = self.split_by_roles(smic55_holder, roles = ENFS) pfam = simulation.legislation_at(period.start).fam.af af_forf_nbenf = nb_enf(age, smic55, pfam.age3, pfam.age3) return period, af_forf_nbenf class af_eligibilite_base(Variable): column = BoolCol entity_class = Familles label = u"Allocations familiales - Éligibilité pour la France métropolitaine sous condition de ressources" def function(self, simulation, period): period = period.this_month residence_dom = simulation.calculate('residence_dom', period) af_nbenf = simulation.calculate('af_nbenf', period) return period, not_(residence_dom) * (af_nbenf >= 2) class af_eligibilite_dom(Variable): column = BoolCol entity_class = Familles label = u"Allocations familiales - Éligibilité pour les DOM (hors Mayotte) sous condition de ressources" def function(self, simulation, period): period = period.this_month residence_dom = simulation.calculate('residence_dom', period) residence_mayotte = simulation.calculate('residence_mayotte', period) af_nbenf = simulation.calculate('af_nbenf', period) return period, residence_dom * not_(residence_mayotte) * (af_nbenf >= 1) class af_base(Variable): column = FloatCol entity_class = Familles label = u"Allocations familiales - allocation de base" # prestations familiales (brutes de crds) def function(self, simulation, period): period = period.this_month eligibilite_base = simulation.calculate('af_eligibilite_base', period) eligibilite_dom = simulation.calculate('af_eligibilite_dom', period) af_nbenf = simulation.calculate('af_nbenf', period) pfam = simulation.legislation_at(period.start).fam.af eligibilite = or_(eligibilite_base, eligibilite_dom) un_seul_enfant = eligibilite_dom * (af_nbenf == 1) * pfam.taux.enf_seul plus_de_deux_enfants = (af_nbenf >= 2) * pfam.taux.enf2 plus_de_trois_enfants = max_(af_nbenf - 2, 0) * pfam.taux.enf3 taux_total = un_seul_enfant + plus_de_deux_enfants + plus_de_trois_enfants montant_base = eligibilite * round(pfam.bmaf * taux_total, 2) coeff_garde_alternee = simulation.calculate('af_coeff_garde_alternee', period) montant_base = montant_base * coeff_garde_alternee af_taux_modulation = simulation.calculate('af_taux_modulation', period) montant_base_module = montant_base * af_taux_modulation return period, montant_base_module class af_taux_modulation(DatedVariable): column = FloatCol(default = 1) entity_class = Familles label = u"Taux de modulation à appliquer au montant des AF depuis 2015" @dated_function(start = date(2015, 7, 1)) def function_2015(self, simulation, period): period = period.this_month af_nbenf = simulation.calculate('af_nbenf', period) pfam = simulation.legislation_at(period.start).fam.af br_pf = simulation.calculate('br_pf', period) modulation = pfam.modulation plafond1 = modulation.plafond1 + af_nbenf * modulation.enfant_supp plafond2 = modulation.plafond2 + af_nbenf * modulation.enfant_supp taux = ( (br_pf <= plafond1) * 1 + (br_pf > plafond1) * (br_pf <= plafond2) * modulation.taux1 + (br_pf > plafond2) * modulation.taux2 ) return period, taux class af_forf_taux_modulation(DatedVariable): column = FloatCol(default = 1) entity_class = Familles label = u"Taux de modulation à appliquer depuis 2007 à l'allocation forfaitaire des AF depuis 2015" @dated_function(start = date(2015, 7, 1)) def function_2015(self, simulation, period): period = period.this_month pfam = simulation.legislation_at(period.start).fam.af af_nbenf = simulation.calculate('af_nbenf', period) af_forf_nbenf = simulation.calculate('af_forf_nbenf', period) nb_enf_tot = af_nbenf + af_forf_nbenf br_pf = simulation.calculate('br_pf', period) modulation = pfam.modulation plafond1 = modulation.plafond1 + nb_enf_tot * modulation.enfant_supp plafond2 = modulation.plafond2 + nb_enf_tot * modulation.enfant_supp taux = ( (br_pf <= plafond1) * 1 + (br_pf > plafond1) * (br_pf <= plafond2) * modulation.taux1 + (br_pf > plafond2) * modulation.taux2 ) return period, taux class af_age_aine(Variable): column = IntCol entity_class = Familles label = u"Allocations familiales - Âge de l'aîné des enfants éligibles" def function(self, simulation, period): period = period.this_month age_holder = simulation.compute('age', period) age_enfants = self.split_by_roles(age_holder, roles = ENFS) af_enfant_a_charge_holder = simulation.compute('af_enfant_a_charge', period) af_enfants_a_charge = self.split_by_roles(af_enfant_a_charge_holder, roles = ENFS) pfam = simulation.legislation_at(period.start).fam # Calcul de l'âge de l'aîné age_aine = -9999 for key, age in age_enfants.iteritems(): a_charge = af_enfants_a_charge[key] * (age <= pfam.af.age2) aine_potentiel = a_charge * (age > age_aine) age_aine = aine_potentiel * age + not_(aine_potentiel) * age_aine return period, age_aine class af_majoration_enfant(Variable): column = FloatCol entity_class = Individus label = u"Allocations familiales - Majoration pour âge applicable à l'enfant" def function(self, simulation, period): period = period.this_month af_enfant_a_charge = simulation.calculate('af_enfant_a_charge', period) age = simulation.calculate('age', period) age_aine_holder = simulation.compute('af_age_aine', period) age_aine = self.cast_from_entity_to_roles(age_aine_holder, roles = ENFS) af_nbenf_holder = simulation.compute('af_nbenf', period) af_nbenf = self.cast_from_entity_to_roles(af_nbenf_holder, roles = ENFS) af_base_holder = simulation.compute('af_base', period) af_base = self.cast_from_entity_to_roles(af_base_holder, roles = ENFS) pfam = simulation.legislation_at(period.start).fam montant_enfant_seul = pfam.af.bmaf * ( (pfam.af.maj_age_un_enfant.age1 <= age) * (age < pfam.af.maj_age_un_enfant.age2) * pfam.af.maj_age_un_enfant.taux1 + (pfam.af.maj_age_un_enfant.age2 <= age) * pfam.af.maj_age_un_enfant.taux2 ) montant_plusieurs_enfants = pfam.af.bmaf * ( (pfam.af.maj_age_deux_enfants.age1 <= age) * (age < pfam.af.maj_age_deux_enfants.age2) * pfam.af.maj_age_deux_enfants.taux1 + (pfam.af.maj_age_deux_enfants.age2 <= age) * pfam.af.maj_age_deux_enfants.taux2 ) montant = (af_nbenf == 1) * montant_enfant_seul + (af_nbenf > 1) * montant_plusieurs_enfants # Attention ! Ne fonctionne pas pour les enfants du même âge (typiquement les jumeaux...) pas_aine = or_(af_nbenf != 2, (af_nbenf == 2) * not_(age == age_aine)) return period, af_enfant_a_charge * (af_base > 0) * pas_aine * montant class af_majo(Variable): column = FloatCol entity_class = Familles label = u"Allocations familiales - majoration pour âge" def function(self, simulation, period): period = period.this_month af_majoration_enfant_holder = simulation.compute('af_majoration_enfant', period) af_majoration_enfants = self.sum_by_entity(af_majoration_enfant_holder, roles = ENFS) af_taux_modulation = simulation.calculate('af_taux_modulation', period) af_majoration_enfants_module = af_majoration_enfants * af_taux_modulation return period, af_majoration_enfants_module class af_complement_degressif(DatedVariable): column = FloatCol entity_class = Familles label = u"AF - Complément dégressif en cas de dépassement du plafond" @dated_function(start = date(2015, 7, 1)) def function_2015(self, simulation, period): period = period.this_month af_nbenf = simulation.calculate('af_nbenf', period) br_pf = simulation.calculate('br_pf', period) af_base = simulation.calculate('af_base', period) af_majo = simulation.calculate('af_majo', period) pfam = simulation.legislation_at(period.start).fam.af modulation = pfam.modulation plafond1 = modulation.plafond1 + af_nbenf * modulation.enfant_supp plafond2 = modulation.plafond2 + af_nbenf * modulation.enfant_supp depassement_plafond1 = max_(0, br_pf - plafond1) depassement_plafond2 = max_(0, br_pf - plafond2) depassement_mensuel = ( (depassement_plafond2 == 0) * depassement_plafond1 + (depassement_plafond2 > 0) * depassement_plafond2 ) / 12 af = af_base + af_majo return period, max_(0, af - depassement_mensuel) * (depassement_mensuel > 0) class af_forf_complement_degressif(DatedVariable): column = FloatCol entity_class = Familles label = u"AF - Complément dégressif pour l'allocation forfaitaire en cas de dépassement du plafond" @dated_function(start = date(2015, 7, 1)) def function_2015(self, simulation, period): period = period.this_month af_nbenf = simulation.calculate('af_nbenf', period) af_forf_nbenf = simulation.calculate('af_forf_nbenf', period) pfam = simulation.legislation_at(period.start).fam.af nb_enf_tot = af_nbenf + af_forf_nbenf br_pf = simulation.calculate('br_pf', period) af_forf = simulation.calculate('af_forf', period) modulation = pfam.modulation plafond1 = modulation.plafond1 + nb_enf_tot * modulation.enfant_supp plafond2 = modulation.plafond2 + nb_enf_tot * modulation.enfant_supp depassement_plafond1 = max_(0, br_pf - plafond1) depassement_plafond2 = max_(0, br_pf - plafond2) depassement_mensuel = ( (depassement_plafond2 == 0) * depassement_plafond1 + (depassement_plafond2 > 0) * depassement_plafond2 ) / 12 return period, max_(0, af_forf - depassement_mensuel) * (depassement_mensuel > 0) class af_forf(Variable): column = FloatCol entity_class = Familles label = u"Allocations familiales - forfait" def function(self, simulation, period): period = period.this_month af_nbenf = simulation.calculate('af_nbenf', period) af_forf_nbenf = simulation.calculate('af_forf_nbenf', period) P = simulation.legislation_at(period.start).fam.af bmaf = P.bmaf af_forfait = round(bmaf * P.taux.forfait, 2) af_forf = ((af_nbenf >= 2) * af_forf_nbenf) * af_forfait af_forf_taux_modulation = simulation.calculate('af_forf_taux_modulation', period) af_forf_module = af_forf * af_forf_taux_modulation return period, af_forf_module class af(Variable): calculate_output = calculate_output_add column = FloatCol entity_class = Familles label = u"Allocations familiales - total des allocations" def function(self, simulation, period): period = period.this_month af_base = simulation.calculate('af_base', period) af_majo = simulation.calculate('af_majo', period) af_forf = simulation.calculate('af_forf', period) af_complement_degressif = simulation.calculate('af_complement_degressif', period) af_forf_complement_degressif = simulation.calculate('af_forf_complement_degressif', period) return period, af_base + af_majo + af_forf + af_complement_degressif + af_forf_complement_degressif
adrienpacifico/openfisca-france
openfisca_france/model/prestations/prestations_familiales/af.py
Python
agpl-3.0
14,785
#!/usr/bin/env python # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Tests for detecting function memory leaks (typically the ones implemented in C). It does so by calling a function many times and checking whether process memory usage keeps increasing between calls or over time. Note that this may produce false positives (especially on Windows for some reason). """ from __future__ import print_function import errno import functools import gc import os import sys import threading import time import psutil import psutil._common from psutil import LINUX from psutil import OPENBSD from psutil import OSX from psutil import POSIX from psutil import SUNOS from psutil import WINDOWS from psutil._compat import xrange from psutil.tests import create_sockets from psutil.tests import get_test_subprocess from psutil.tests import HAS_CPU_AFFINITY from psutil.tests import HAS_CPU_FREQ from psutil.tests import HAS_ENVIRON from psutil.tests import HAS_IONICE from psutil.tests import HAS_MEMORY_MAPS from psutil.tests import HAS_PROC_CPU_NUM from psutil.tests import HAS_PROC_IO_COUNTERS from psutil.tests import HAS_RLIMIT from psutil.tests import HAS_SENSORS_BATTERY from psutil.tests import HAS_SENSORS_FANS from psutil.tests import HAS_SENSORS_TEMPERATURES from psutil.tests import reap_children from psutil.tests import run_test_module_by_name from psutil.tests import safe_rmpath from psutil.tests import skip_on_access_denied from psutil.tests import TESTFN from psutil.tests import TRAVIS from psutil.tests import unittest LOOPS = 1000 MEMORY_TOLERANCE = 4096 RETRY_FOR = 3 SKIP_PYTHON_IMPL = True if TRAVIS else False cext = psutil._psplatform.cext thisproc = psutil.Process() SKIP_PYTHON_IMPL = True if TRAVIS else False # =================================================================== # utils # =================================================================== def skip_if_linux(): return unittest.skipIf(LINUX and SKIP_PYTHON_IMPL, "worthless on LINUX (pure python)") def bytes2human(n): """ http://code.activestate.com/recipes/578019 >>> bytes2human(10000) '9.8K' >>> bytes2human(100001221) '95.4M' """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.2f%s' % (value, s) return "%sB" % n class TestMemLeak(unittest.TestCase): """Base framework class which calls a function many times and produces a failure if process memory usage keeps increasing between calls or over time. """ tolerance = MEMORY_TOLERANCE loops = LOOPS retry_for = RETRY_FOR def setUp(self): gc.collect() def execute(self, fun, *args, **kwargs): """Test a callable.""" def call_many_times(): for x in xrange(loops): self._call(fun, *args, **kwargs) del x gc.collect() tolerance = kwargs.pop('tolerance_', None) or self.tolerance loops = kwargs.pop('loops_', None) or self.loops retry_for = kwargs.pop('retry_for_', None) or self.retry_for # warm up for x in range(10): self._call(fun, *args, **kwargs) self.assertEqual(gc.garbage, []) self.assertEqual(threading.active_count(), 1) self.assertEqual(thisproc.children(), []) # Get 2 distinct memory samples, before and after having # called fun repeadetly. # step 1 call_many_times() mem1 = self._get_mem() # step 2 call_many_times() mem2 = self._get_mem() diff1 = mem2 - mem1 if diff1 > tolerance: # This doesn't necessarily mean we have a leak yet. # At this point we assume that after having called the # function so many times the memory usage is stabilized # and if there are no leaks it should not increase # anymore. # Let's keep calling fun for 3 more seconds and fail if # we notice any difference. ncalls = 0 stop_at = time.time() + retry_for while time.time() <= stop_at: self._call(fun, *args, **kwargs) ncalls += 1 del stop_at gc.collect() mem3 = self._get_mem() diff2 = mem3 - mem2 if mem3 > mem2: # failure extra_proc_mem = bytes2human(diff1 + diff2) print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr) msg = "+%s after %s calls, +%s after another %s calls, " msg += "+%s extra proc mem" msg = msg % ( bytes2human(diff1), loops, bytes2human(diff2), ncalls, extra_proc_mem) self.fail(msg) def execute_w_exc(self, exc, fun, *args, **kwargs): """Convenience function which tests a callable raising an exception. """ def call(): self.assertRaises(exc, fun, *args, **kwargs) self.execute(call) @staticmethod def _get_mem(): # By using USS memory it seems it's less likely to bump # into false positives. if LINUX or WINDOWS or OSX: return thisproc.memory_full_info().uss else: return thisproc.memory_info().rss @staticmethod def _call(fun, *args, **kwargs): fun(*args, **kwargs) # =================================================================== # Process class # =================================================================== class TestProcessObjectLeaks(TestMemLeak): """Test leaks of Process class methods.""" proc = thisproc def test_coverage(self): skip = set(( "pid", "as_dict", "children", "cpu_affinity", "cpu_percent", "ionice", "is_running", "kill", "memory_info_ex", "memory_percent", "nice", "oneshot", "parent", "rlimit", "send_signal", "suspend", "terminate", "wait")) for name in dir(psutil.Process): if name.startswith('_'): continue if name in skip: continue self.assertTrue(hasattr(self, "test_" + name), msg=name) @skip_if_linux() def test_name(self): self.execute(self.proc.name) @skip_if_linux() def test_cmdline(self): self.execute(self.proc.cmdline) @skip_if_linux() def test_exe(self): self.execute(self.proc.exe) @skip_if_linux() def test_ppid(self): self.execute(self.proc.ppid) @unittest.skipIf(not POSIX, "POSIX only") @skip_if_linux() def test_uids(self): self.execute(self.proc.uids) @unittest.skipIf(not POSIX, "POSIX only") @skip_if_linux() def test_gids(self): self.execute(self.proc.gids) @skip_if_linux() def test_status(self): self.execute(self.proc.status) def test_nice_get(self): self.execute(self.proc.nice) def test_nice_set(self): niceness = thisproc.nice() self.execute(self.proc.nice, niceness) @unittest.skipIf(not HAS_IONICE, "not supported") def test_ionice_get(self): self.execute(self.proc.ionice) @unittest.skipIf(not HAS_IONICE, "not supported") def test_ionice_set(self): if WINDOWS: value = thisproc.ionice() self.execute(self.proc.ionice, value) else: self.execute(self.proc.ionice, psutil.IOPRIO_CLASS_NONE) fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0) self.execute_w_exc(OSError, fun) @unittest.skipIf(not HAS_PROC_IO_COUNTERS, "not supported") @skip_if_linux() def test_io_counters(self): self.execute(self.proc.io_counters) @unittest.skipIf(POSIX, "worthless on POSIX") def test_username(self): self.execute(self.proc.username) @skip_if_linux() def test_create_time(self): self.execute(self.proc.create_time) @skip_if_linux() @skip_on_access_denied(only_if=OPENBSD) def test_num_threads(self): self.execute(self.proc.num_threads) @unittest.skipIf(not WINDOWS, "WINDOWS only") def test_num_handles(self): self.execute(self.proc.num_handles) @unittest.skipIf(not POSIX, "POSIX only") @skip_if_linux() def test_num_fds(self): self.execute(self.proc.num_fds) @skip_if_linux() def test_num_ctx_switches(self): self.execute(self.proc.num_ctx_switches) @skip_if_linux() @skip_on_access_denied(only_if=OPENBSD) def test_threads(self): self.execute(self.proc.threads) @skip_if_linux() def test_cpu_times(self): self.execute(self.proc.cpu_times) @skip_if_linux() @unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported") def test_cpu_num(self): self.execute(self.proc.cpu_num) @skip_if_linux() def test_memory_info(self): self.execute(self.proc.memory_info) @skip_if_linux() def test_memory_full_info(self): self.execute(self.proc.memory_full_info) @unittest.skipIf(not POSIX, "POSIX only") @skip_if_linux() def test_terminal(self): self.execute(self.proc.terminal) @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, "worthless on POSIX (pure python)") def test_resume(self): self.execute(self.proc.resume) @skip_if_linux() def test_cwd(self): self.execute(self.proc.cwd) @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported") def test_cpu_affinity_get(self): self.execute(self.proc.cpu_affinity) @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported") def test_cpu_affinity_set(self): affinity = thisproc.cpu_affinity() self.execute(self.proc.cpu_affinity, affinity) if not TRAVIS: self.execute_w_exc(ValueError, self.proc.cpu_affinity, [-1]) @skip_if_linux() def test_open_files(self): safe_rmpath(TESTFN) # needed after UNIX socket test has run with open(TESTFN, 'w'): self.execute(self.proc.open_files) # OSX implementation is unbelievably slow @unittest.skipIf(OSX, "too slow on OSX") @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported") @skip_if_linux() def test_memory_maps(self): self.execute(self.proc.memory_maps) @unittest.skipIf(not LINUX, "LINUX only") @unittest.skipIf(not HAS_RLIMIT, "not supported") def test_rlimit_get(self): self.execute(self.proc.rlimit, psutil.RLIMIT_NOFILE) @unittest.skipIf(not LINUX, "LINUX only") @unittest.skipIf(not HAS_RLIMIT, "not supported") def test_rlimit_set(self): limit = thisproc.rlimit(psutil.RLIMIT_NOFILE) self.execute(self.proc.rlimit, psutil.RLIMIT_NOFILE, limit) self.execute_w_exc(OSError, self.proc.rlimit, -1) @skip_if_linux() # Windows implementation is based on a single system-wide # function (tested later). @unittest.skipIf(WINDOWS, "worthless on WINDOWS") def test_connections(self): # TODO: UNIX sockets are temporarily implemented by parsing # 'pfiles' cmd output; we don't want that part of the code to # be executed. with create_sockets(): kind = 'inet' if SUNOS else 'all' self.execute(self.proc.connections, kind) @unittest.skipIf(not HAS_ENVIRON, "not supported") def test_environ(self): self.execute(self.proc.environ) @unittest.skipIf(not WINDOWS, "WINDOWS only") def test_proc_info(self): self.execute(cext.proc_info, os.getpid()) class TestTerminatedProcessLeaks(TestProcessObjectLeaks): """Repeat the tests above looking for leaks occurring when dealing with terminated processes raising NoSuchProcess exception. The C functions are still invoked but will follow different code paths. We'll check those code paths. """ @classmethod def setUpClass(cls): super(TestTerminatedProcessLeaks, cls).setUpClass() p = get_test_subprocess() cls.proc = psutil.Process(p.pid) cls.proc.kill() cls.proc.wait() @classmethod def tearDownClass(cls): super(TestTerminatedProcessLeaks, cls).tearDownClass() reap_children() def _call(self, fun, *args, **kwargs): try: fun(*args, **kwargs) except psutil.NoSuchProcess: pass if WINDOWS: def test_kill(self): self.execute(self.proc.kill) def test_terminate(self): self.execute(self.proc.terminate) def test_suspend(self): self.execute(self.proc.suspend) def test_resume(self): self.execute(self.proc.resume) def test_wait(self): self.execute(self.proc.wait) def test_proc_info(self): # test dual implementation def call(): try: return cext.proc_info(self.proc.pid) except OSError as err: if err.errno != errno.ESRCH: raise self.execute(call) # =================================================================== # system APIs # =================================================================== class TestModuleFunctionsLeaks(TestMemLeak): """Test leaks of psutil module functions.""" def test_coverage(self): skip = set(( "version_info", "__version__", "process_iter", "wait_procs", "cpu_percent", "cpu_times_percent", "cpu_count")) for name in psutil.__all__: if not name.islower(): continue if name in skip: continue self.assertTrue(hasattr(self, "test_" + name), msg=name) # --- cpu @skip_if_linux() def test_cpu_count_logical(self): self.execute(psutil.cpu_count, logical=True) @skip_if_linux() def test_cpu_count_physical(self): self.execute(psutil.cpu_count, logical=False) @skip_if_linux() def test_cpu_times(self): self.execute(psutil.cpu_times) @skip_if_linux() def test_per_cpu_times(self): self.execute(psutil.cpu_times, percpu=True) def test_cpu_stats(self): self.execute(psutil.cpu_stats) @skip_if_linux() @unittest.skipIf(not HAS_CPU_FREQ, "not supported") def test_cpu_freq(self): self.execute(psutil.cpu_freq) # --- mem def test_virtual_memory(self): self.execute(psutil.virtual_memory) # TODO: remove this skip when this gets fixed @unittest.skipIf(SUNOS, "worthless on SUNOS (uses a subprocess)") def test_swap_memory(self): self.execute(psutil.swap_memory) @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, "worthless on POSIX (pure python)") def test_pid_exists(self): self.execute(psutil.pid_exists, os.getpid()) # --- disk @unittest.skipIf(POSIX and SKIP_PYTHON_IMPL, "worthless on POSIX (pure python)") def test_disk_usage(self): self.execute(psutil.disk_usage, '.') def test_disk_partitions(self): self.execute(psutil.disk_partitions) @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'), '/proc/diskstats not available on this Linux version') @skip_if_linux() def test_disk_io_counters(self): self.execute(psutil.disk_io_counters, nowrap=False) # --- proc @skip_if_linux() def test_pids(self): self.execute(psutil.pids) # --- net @skip_if_linux() def test_net_io_counters(self): self.execute(psutil.net_io_counters, nowrap=False) @unittest.skipIf(LINUX, "worthless on Linux (pure python)") @unittest.skipIf(OSX and os.getuid() != 0, "need root access") def test_net_connections(self): with create_sockets(): self.execute(psutil.net_connections) def test_net_if_addrs(self): # Note: verified that on Windows this was a false positive. self.execute(psutil.net_if_addrs, tolerance_=80 * 1024 if WINDOWS else None) @unittest.skipIf(TRAVIS, "EPERM on travis") def test_net_if_stats(self): self.execute(psutil.net_if_stats) # --- sensors @skip_if_linux() @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported") def test_sensors_battery(self): self.execute(psutil.sensors_battery) @skip_if_linux() @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported") def test_sensors_temperatures(self): self.execute(psutil.sensors_temperatures) @skip_if_linux() @unittest.skipIf(not HAS_SENSORS_FANS, "not supported") def test_sensors_fans(self): self.execute(psutil.sensors_fans) # --- others @skip_if_linux() def test_boot_time(self): self.execute(psutil.boot_time) # XXX - on Windows this produces a false positive @unittest.skipIf(WINDOWS, "XXX produces a false positive on Windows") def test_users(self): self.execute(psutil.users) if WINDOWS: # --- win services def test_win_service_iter(self): self.execute(cext.winservice_enumerate) def test_win_service_get(self): pass def test_win_service_get_config(self): name = next(psutil.win_service_iter()).name() self.execute(cext.winservice_query_config, name) def test_win_service_get_status(self): name = next(psutil.win_service_iter()).name() self.execute(cext.winservice_query_status, name) def test_win_service_get_description(self): name = next(psutil.win_service_iter()).name() self.execute(cext.winservice_query_descr, name) if __name__ == '__main__': run_test_module_by_name(__file__)
Haynie-Research-and-Development/jarvis
deps/lib/python3.4/site-packages/psutil/tests/test_memory_leaks.py
Python
gpl-2.0
18,259
# # Copyright (C) 2012 Uninett AS # # This file is part of Network Administration Visualized (NAV). # # NAV is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License version 3 as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. You should have received a copy of the GNU General Public License # along with NAV. If not, see <http://www.gnu.org/licenses/>. # """Django URL configuration""" from django.conf.urls import url from nav.web.info.location import views urlpatterns = [ url(r'^$', views.search, name='location-search'), url( r'^(?P<locationid>.+)/upload/', views.upload_image, name='location-info-upload' ), url(r'^(?P<locationid>.+)/$', views.locationinfo, name='location-info'), ]
hmpf/nav
python/nav/web/info/location/urls.py
Python
gpl-3.0
1,011
# -*- coding: utf-8 -*- #------------------------------------------------------------ # JAVIERTV - XBMC Add-on by J (j@gmail.com) # Version 0.2.99 (17.10.2014) #------------------------------------------------------------ # License: GPL (http://www.gnu.org/licenses/gpl-3.0.html) # Gracias a la librería plugintools de Jesús (www.mimediacenter.info import os import sys import urllib import urllib2 import re import shutil import zipfile import xbmc import xbmcgui import xbmcaddon import xbmcplugin import plugintools from resources.tools.resolvers import * from resources.tools.update import * from resources.tools.torrentvru import * from resources.tools.vaughnlive import * from resources.tools.ninestream import * from resources.tools.vercosas import * from resources.tools.torrent1 import * from resources.tools.directwatch import * from resources.tools.freetvcast import * from resources.tools.freebroadcast import * from resources.tools.shidurlive import * from resources.tools.latuerka import * from resources.tools.laligatv import * from resources.tools.updater import * from resources.tools.castalba import * from resources.tools.castdos import * from resources.tools.updater import * from resources.tools.new_regex import * home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.javiertv/', '')) tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.javiertv/resources/tools', '')) addons = xbmc.translatePath(os.path.join('special://home/addons/', '')) art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.javiertv/art', '')) tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.javiertv/tmp', '')) playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', '')) icon = art + 'icon.png' fanart = 'fanart.jpg' # Entry point def run(): plugintools.log("---> javierTV.run <---") # Obteniendo parámetros... params = plugintools.get_params() if params.get("action") is None: main_list(params) else: action = params.get("action") url = params.get("url") exec action+"(params)" if not os.path.exists(playlists) : os.makedirs(playlists) plugintools.close_item_list() # Main menu def main_list(params): plugintools.log("[JavierTV-0.3.0].main_list "+repr(params)) # Control del skin de JavierTV mastermenu = xml_skin() plugintools.log("XML menu: "+mastermenu) try: data = plugintools.read(mastermenu) except: mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' data = plugintools.read(mastermenu) xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('JavierTV', "XML no reconocido...", 3 , art+'icon.png')) matches = plugintools.find_multiple_matches(data,'<menu_info>(.*?)</menu_info>') for entry in matches: title = plugintools.find_single_match(entry,'<title>(.*?)</title>') date = plugintools.find_single_match(entry,'<date>(.*?)</date>') thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>') plugintools.add_item( action="" , title = title + date , fanart = fanart , thumbnail=thumbnail , folder = False , isPlayable = False ) data = plugintools.read(mastermenu) matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>') for entry in matches: title = plugintools.find_single_match(entry,'<name>(.*?)</name>') thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>') action = plugintools.find_single_match(entry,'<action>(.*?)</action>') last_update = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>') url = plugintools.find_single_match(entry,'<url>(.*?)</url>') date = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>') # Control paternal pekes_no = plugintools.get_setting("pekes_no") if pekes_no == "true" : print "Control paternal en marcha" if title.find("Adultos") >= 0 : plugintools.log("Activando control paternal...") else: fixed = title plugintools.log("fixed= "+fixed) if fixed == "Actualizaciones": plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) elif fixed == 'Agenda TV': plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) else: plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) else: fixed = title if fixed == "Actualizaciones": plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) elif fixed == "Agenda TV": plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) else: plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) def play(params): plugintools.log("[JavierTV-0.3.0].play "+repr(params)) # plugintools.direct_play(params.get("url")) # xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(params.get("url")) plugintools.log("[JavierTV 0.2.85]: Playing file...") url = params.get("url") # Notificación de inicio de resolver en caso de enlace RTMP if url.startswith("http") == True: if url.find("allmyvideos") >= 0 : allmyvideos(params) elif url.find("streamcloud") >= 0 : streamcloud(params) elif url.find("vidspot") >= 0 : vidspot(params) elif url.find("played.to") >= 0 : playedto(params) elif url.find("vk.com") >= 0 : vk(params) elif url.find("nowvideo") >= 0 : nowvideo(params) else: url = params.get("url") plugintools.play_resolved_url(url) elif url.startswith("rtp") >= 0: # Control para enlaces de Movistar TV plugintools.play_resolved_url(url) else: plugintools.play_resolved_url(url) def runPlugin(url): xbmc.executebuiltin('XBMC.RunPlugin(' + url +')') def live_items_withlink(params): plugintools.log("[JavierTV-0.3.0].live_items_withlink "+repr(params)) data = plugintools.read(params.get("url")) # ToDo: Agregar función lectura de cabecera (fanart, thumbnail, título, últ. actualización) header_xml(params) fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>') # Localizamos fanart de la lista if fanart == "": fanart = art + 'fanart.jpg' author = plugintools.find_single_match(data, '<poster>(.*?)</poster>') # Localizamos autor de la lista (encabezado) matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>') for entry in matches: title = plugintools.find_single_match(entry,'<title>(.*?)</title>') title = title.replace("<![CDATA[", "") title = title.replace("]]>", "") thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') url = plugintools.find_single_match(entry,'<link>(.*?)</link>') url = url.replace("<![CDATA[", "") url = url.replace("]]>", "") plugintools.add_item(action = "play" , title = title , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) def xml_lists(params): plugintools.log("[JavierTV-0.3.0].xml_lists "+repr(params)) data = plugintools.read( params.get("url") ) name_channel = params.get("title") name_channel = parser_title(name_channel) plugintools.log("name_channel= "+name_channel) pattern = '<name>'+name_channel+'(.*?)</channel>' data = plugintools.find_single_match(data, pattern) plugintools.add_item( action="" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR]' , thumbnail= art + 'special.png' , fanart = fanart , folder = False , isPlayable = False ) # Control paternal pekes_no = plugintools.get_setting("pekes_no") subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<fanart>([^<]+)</fanart>([^<]+)<action>([^<]+)</action>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data) for biny, ciny, diny, winy, pixy, dixy, boxy, susy, lexy, muny, kiny in subchannel: if pekes_no == "true" : print "Control paternal en marcha" if ciny.find("XXX") >= 0 : plugintools.log("Activando control paternal...") else: plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False ) params["fanart"]=dixy # params["thumbnail"]=pixy else: plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False ) params["fanart"]=dixy # params["thumbnail"]=pixy def getstreams_now(params): plugintools.log("[JavierTV-0.3.0].getstreams_now "+repr(params)) data = plugintools.read( params.get("url") ) poster = plugintools.find_single_match(data, '<poster>(.*?)</poster>') plugintools.add_item(action="" , title='[COLOR blue][B]'+poster+'[/B][/COLOR]', url="", folder =False, isPlayable=False) matches = plugintools.find_multiple_matches(data,'<title>(.*?)</link>') for entry in matches: title = plugintools.find_single_match(entry,'(.*?)</title>') url = plugintools.find_single_match(entry,'<link> ([^<]+)') plugintools.add_item( action="play" , title=title , url=url , folder = False , isPlayable = True ) # Soporte de listas de canales por categorías (Livestreams, XBMC México, Motor SportsTV, etc.). def livestreams_channels(params): plugintools.log("[PalcoTV-0.3.0].livestreams_channels "+repr(params)) data = plugintools.read( params.get("url") ) # Extract directory list thumbnail = params.get("thumbnail") if thumbnail == "": thumbnail = 'icon.jpg' plugintools.log(thumbnail) else: plugintools.log(thumbnail) if thumbnail == art + 'icon.png': matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>') for entry in matches: title = plugintools.find_single_match(entry,'<name>(.*?)</name>') thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>') plugintools.add_item( action="livestreams_subchannels" , title=title , url=params.get("url") , thumbnail=thumbnail , fanart=fanart , folder = True , isPlayable = False ) else: matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>') for entry in matches: title = plugintools.find_single_match(entry,'<name>(.*?)</name>') thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>') plugintools.add_item( action="livestreams_items" , title=title , url=params.get("url") , fanart=fanart , thumbnail=thumbnail , folder = True , isPlayable = False ) def livestreams_subchannels(params): plugintools.log("[JavierTV-0.3.0].livestreams_subchannels "+repr(params)) data = plugintools.read( params.get("url") ) # title_channel = params.get("title") title_channel = params.get("title") name_subchannel = '<name>'+title_channel+'</name>' data = plugintools.find_single_match(data, name_subchannel+'(.*?)</channel>') info = plugintools.find_single_match(data, '<info>(.*?)</info>') title = params.get("title") plugintools.add_item( action="" , title='[B]'+title+'[/B] [COLOR yellow]'+info+'[/COLOR]' , folder = False , isPlayable = False ) subchannel = plugintools.find_multiple_matches(data , '<name>(.*?)</name>') for entry in subchannel: plugintools.add_item( action="livestreams_subitems" , title=entry , url=params.get("url") , thumbnail=art+'motorsports-xbmc.jpg' , folder = True , isPlayable = False ) # Pendiente de cargar thumbnail personalizado y fanart... def livestreams_subitems(params): plugintools.log("[JavierTV-0.3.0].livestreams_subitems "+repr(params)) title_subchannel = params.get("title") data = plugintools.read( params.get("url") ) source = plugintools.find_single_match(data , title_subchannel+'(.*?)<subchannel>') titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>').findall(source) url = params.get("url") title = params.get("title") thumbnail = params.get("thumbnail") for entry, quirry, winy in titles: winy = winy.replace("amp;","") plugintools.add_item( action="play" , title = entry , url = winy , thumbnail = thumbnail , folder = False , isPlayable = True ) def livestreams_items(params): plugintools.log("[JavierTV-0.3.0].livestreams_items "+repr(params)) title_subchannel = params.get("title") plugintools.log("title= "+title_subchannel) title_subchannel_fixed = title_subchannel.replace("ñ", "ñ") title_subchannel_fixed = title_subchannel_fixed.replace("\\xc3\\xb1", "ñ") title_subchannel_fixed = plugintools.find_single_match(title_subchannel_fixed, '([^[]+)') title_subchannel_fixed = title_subchannel_fixed.encode('utf-8', 'ignore') plugintools.log("subcanal= "+title_subchannel_fixed) if title_subchannel_fixed.find("+") >= 0: title_subchannel_fixed = title_subchannel_fixed.split("+") title_subchannel_fixed = title_subchannel_fixed[1] title_subchannel_fixxed = title_subchannel_fixed[0] if title_subchannel_fixed == "": title_subchannel_fixed = title_subchannel_fixxed data = plugintools.read( params.get("url") ) source = plugintools.find_single_match(data , title_subchannel_fixed+'(.*?)</channel>') plugintools.log("source= "+source) fanart_channel = plugintools.find_single_match(source, '<fanart>(.*?)</fanart>') titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>([^<]+)<thumbnail>([^<]+)</thumbnail>').findall(source) url = params.get("url") title = params.get("title") thumbnail = params.get("thumbnail") for entry, quirry, winy, xiry, miry in titles: plugintools.log("title= "+entry) plugintools.log("url= "+winy) winy = winy.replace("amp;","") plugintools.add_item( action="play" , title = entry , url = winy , thumbnail = miry , fanart = fanart_channel , folder = False , isPlayable = True ) def xml_items(params): plugintools.log("[JavierTV-0.3.0].xml_items "+repr(params)) data = plugintools.read( params.get("url") ) thumbnail = params.get("thumbnail") #Todo: Implementar una variable que permita seleccionar qué tipo de parseo hacer if thumbnail == "title_link.png": matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>') for entry in matches: title = plugintools.find_single_match(entry,'<title>(.*?)</title>') thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>') url = plugintools.find_single_match(entry,'<link>([^<]+)</link>') fanart = plugintools.find_single_match(entry,'<fanart>([^<]+)</fanart>') plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True ) if thumbnail == "name_rtmp.png": matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>') for entry in matches: title = plugintools.find_single_match(entry,'<name>(.*?)</name>') url = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>') plugintools.add_item( action = "play" , title = title , url = url , fanart = art + 'fanart.jpg' , plot = title , folder = False , isPlayable = True ) def simpletv_items(params): plugintools.log("[JavierTV-0.3.0].simpletv_items "+repr(params)) # Obtenemos fanart y thumbnail del diccionario thumbnail = params.get("thumbnail") plugintools.log("thumbnail= "+thumbnail) if thumbnail == "" : thumbnail = art + 'icon.png' # Parche para solucionar un bug por el cuál el diccionario params no retorna la variable fanart fanart = params.get("extra") if fanart == " " : fanart = params.get("fanart") if fanart == " " : fanart = art + 'fanart.png' title = params.get("plot") texto= params.get("texto") busqueda = "" if title == 'search': title = title + '.txt' plugintools.log("title= "+title) else: title = title + '.m3u' if title == 'search.txt': busqueda = 'search.txt' filename = title file = open(tmp + 'search.txt', "r") file.seek(0) data = file.readline() if data == "": ok = plugintools.message("PalcoTV", "Sin resultados") return ok else: title = params.get("title") title = parser_title(title) ext = params.get("ext") title_plot = params.get("plot") if title_plot == "": filename = title + "." + ext if ext is None: filename = title else: plugintools.log("ext= "+ext) filename = title + "." + ext file = open(playlists + filename, "r") file.seek(0) data = file.readline() plugintools.log("data= "+data) if data == "": print "No es posible leer el archivo!" data = file.readline() plugintools.log("data= "+data) else: file.seek(0) num_items = len(file.readlines()) print num_items plugintools.log("filename= "+filename) plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ filename + '[/B][/I][/COLOR]' , url = playlists + title , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = False) # Lectura de items en lista m3u. ToDo: Control de errores, implementar lectura de fanart y thumbnail # Control para evitar error en búsquedas (cat is null) cat = "" i = -1 file.seek(0) data = file.readline() while i <= num_items: if data.startswith("#EXTINF:-1") == True: title = data.replace("#EXTINF:-1", "") title = title.replace(",", "") title = title.replace("-AZBOX *", "") title = title.replace("-AZBOX-*", "") # Control de la línea del título en caso de búsqueda if busqueda == 'search.txt': title_search = title.split('"') print 'title',title titulo = title_search[0] titulo = titulo.strip() origen = title_search[1] origen = origen.strip() print 'origen',origen i = i + 1 data = file.readline() else: images = m3u_items(title) print 'images',images thumbnail = images[0] fanart = images[1] cat = images[2] title = images[3] origen = title.split(",") title = title.strip() plugintools.log("title= "+title) data = file.readline() i = i + 1 print data if title.startswith("#") == True: title = title.replace("#", "") plugintools.add_item(action="", title = title , url = "", thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False) data = file.readline() print data i = i + 1 continue # Control para determinadas listas de decos sat if title.startswith(' $ExtFilter="') == True: if busqueda == 'search.txt': title = title.replace('$ExtFilter="', "") title_search = title.split('"') titulo = title_search[1] origen = title_search[2] origen = origen.strip() data = file.readline() i = i + 1 else: title = title.replace('$ExtFilter="', "") category = title.split('"') tipo = category[0] tipo = tipo.strip() title = category[1] title = title.strip() print title data = file.readline() i = i + 1 if data != "": title = title.replace("radio=true", "") url = data.strip() if url.startswith("serie") == True: url = data.strip() if cat == "": if busqueda == 'search.txt': url = url.replace("serie:", "") params["fanart"] = fanart plugintools.log("fanart= "+fanart) plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: url = url.replace("serie:", "") params["fanart"] = fanart plugintools.log("fanart= "+fanart) plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: if busqueda == 'search.txt': plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue if data.startswith("http") == True: url = data.strip() if cat != "": # Controlamos el caso de subcategoría de canales if busqueda == 'search.txt': if url.find("allmyvideos") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("streamcloud") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vidspot") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("played.to") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vk.com") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + '[COLOR lavender] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("nowvideo") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("video?=") >= 0: # Video youtube plugintools.log("linea titulo= "+title_search) title = title.split('"') title = title[0] title = title.strip() # plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=n1jS-esrBU4 # https://www.youtube.com/video?=n1jS-esrBU4 videoid = url.replace("https://www.youtube.com/video?=", "") url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [[COLOR red]You[COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.endswith("m3u8") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR purple] [m3u8][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: title = title_search.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "longurl" , title = '[COLOR white]' + title + '[COLOR lightblue] [HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: if url.startswith("serie") == True: url = url.replace("serie:", "") params["fanart"] = fanart plugintools.log("fanart= "+fanart) plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue elif url.find("allmyvideos") >= 0: plugintools.add_item( action = "allmyvideos" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("streamcloud") >= 0: plugintools.add_item( action = "streamcloud" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vidspot") == True: plugintools.add_item( action = "vidspot" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR palegreen] [Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("played.to") >= 0: plugintools.add_item( action = "playedto" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lavender] [Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("9stream") >= 0: plugintools.add_item( action = "ninestreams" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [9stream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("video?=") >= 0: # Video youtube plugintools.log("linea titulo= "+title_search) title = title.split('"') title = title[0] title = title.strip() # plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=n1jS-esrBU4 # https://www.youtube.com/video?=n1jS-esrBU4 videoid = url.replace("https://www.youtube.com/video?=", "") url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [[COLOR red]You[COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR blue] [HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: if busqueda == 'search.txt': if url.find("allmyvideos") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("streamcloud") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + titulo + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vidspot") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("played.to") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vk.com") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("nowvideo") >= 0: title = title.split('"') title = title[0] title = title.strip() plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("video?=") >= 0: title = title.split('"') title = title[0] title = title.strip() videoid = url.replace("https://www.youtube.com/video?=", "") url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid plugintools.add_item( action = "youtube_videos" , title = '[COLOR white][' + title + ' [[COLOR red]You[/COLOR][COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: title = title_search[0] title = title.strip() plugintools.add_item( action = "longurl" , title = '[COLOR white]' + title + '[COLOR blue] [HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: if url.find("allmyvideos") >= 0: plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("streamcloud") >= 0: plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vidspot") >= 0: plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("played.to") >= 0: plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("vk.com") >= 0: plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("nowvideo") >= 0: plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("youtube") >= 0: print "youtube video" videoid = url.split("=") if len(videoid) == 0: videoid = "" else: videoid = videoid[0] url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid plugintools.add_item( action = "youtube_videos" , title = '[COLOR white]' + title + ' [COLOR white][[COLOR red]You[/COLOR][COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + '[/I][/COLOR][COLOR white]' + title + '[COLOR blue] [HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue if data.startswith("rtmp") == True or data.startswith("rtsp") == True: url = data url = parse_url(url) if cat != "": # Controlamos el caso de subcategoría de canales if busqueda == 'search.txt': params["url"] = url server_rtmp(params) server = params.get("server") plugintools.log("params en simpletv" +repr(params) ) url = params.get("url") plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) params["server"] = server print url data = file.readline() i = i + 1 continue else: params["url"] = url server_rtmp(params) server = params.get("server") plugintools.log("params en simpletv" +repr(params) ) plugintools.log("fanart= "+fanart) url = params.get("url") plugintools.add_item( action = "launch_rtmp" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) print url data = file.readline() i = i + 1 continue else: if busqueda == 'search.txt': params["url"] = url server_rtmp(params) server = params.get("server") plugintools.log("params en simpletv" +repr(params) ) url = params.get("url") plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) print url data = file.readline() i = i + 1 continue else: params["url"] = url server_rtmp(params) server = params.get("server") plugintools.log("fanart= "+fanart) plugintools.log("params en simpletv" +repr(params) ) url = params.get("url") plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + title + '[COLOR green] ['+ server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) print url data = file.readline() i = i + 1 continue if data.startswith("udp") == True or data.startswith("rtp") == True: # print "udp" url = data url = parse_url(url) plugintools.log("url retornada= "+url) if cat != "": # Controlamos el caso de subcategoría de canales if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue if data.startswith("mms") == True or data.startswith("rtp") == True: # print "udp" url = data url = parse_url(url) plugintools.log("url retornada= "+url) if cat != "": # Controlamos el caso de subcategoría de canales if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue if data.startswith("plugin") == True: title = title.split('"') title = title[0] title = title.strip() title = title.replace("#EXTINF:-1,", "") url = data if url.find("youtube") >= 0 : if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[[COLOR white] You[COLOR red]Tube[/COLOR][COLOR white] Video[/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = art + "icon.png" , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[[COLOR white] You[COLOR red]Tube[/COLOR][COLOR white] Video[/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("mode=1") >= 0 : if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR lightblue] [Acestream][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR lightblue] [Acestream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif url.find("mode=2") >= 0 : if busqueda == 'search.txt': plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR darkorange] [Sopcast][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif data.startswith("magnet") == True: if busqueda == 'search.txt': # plugin://plugin.video.xbmctorrent/play/ + <magnet_link> url_fixed = urllib.quote_plus(data) title = parser_title(title) url = 'plugin://plugin.video.xbmctorrent/play/' + url_fixed plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR orangered] [Torrent][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: # plugin://plugin.video.xbmctorrent/play/ + <magnet_link> title = parser_title(title) data = data.strip() url_fixed = urllib.quote_plus(data) title = parser_title(title) url = 'plugin://plugin.video.xbmctorrent/play/' + url_fixed plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR orangered][Torrent][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif data.startswith("sop") == True: if busqueda == 'search.txt': title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed url = data plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue else: title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") url = data plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR darkorange][Sopcast][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) data = file.readline() i = i + 1 continue elif data.startswith("ace") == True: if busqueda == 'search.txt': # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title title = parser_title(title) url = data.replace("ace:", "") url = url.strip() url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name=' plugintools.add_item(action="play" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR] [COLOR lightblue][I](' + origen + ')[/COLOR][/I]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) data = file.readline() data = data.strip() i = i + 1 continue else: title = parser_title(title) print 'data',data url = data.replace("ace:", "") url = url.strip() print 'url',url url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name=' plugintools.add_item(action="play" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) data = file.readline() data = data.strip() i = i + 1 continue # Youtube playlist & channel elif data.startswith("yt") == True: if data.startswith("yt_playlist") == True: if busqueda == 'search.txt': title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") youtube_playlist = data.replace("yt_playlist(", "") youtube_playlist = youtube_playlist.replace(")", "") plugintools.log("youtube_playlist= "+youtube_playlist) url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist plugintools.add_item( action = "youtube_videos" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") plugintools.log("title= "+title) youtube_playlist = data.replace("yt_playlist(", "") youtube_playlist = youtube_playlist.replace(")", "") plugintools.log("youtube_playlist= "+youtube_playlist) url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist plugintools.add_item( action = "youtube_videos" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue elif data.startswith("yt_channel") == True: if busqueda == 'search.txt': title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") youtube_channel = data.replace("yt_channel(", "") youtube_channel = youtube_channel.replace(")", "") plugintools.log("youtube_user= "+youtube_channel) url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30' plugintools.add_item( action = "youtube_playlists" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: title = title.split('"') title = title[0] title = title.replace("#EXTINF:-1,", "") plugintools.log("title= "+title) youtube_channel = data.replace("yt_channel(", "") youtube_channel = youtube_channel.replace(")", "") plugintools.log("youtube_user= "+youtube_channel) url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30' plugintools.add_item( action = "youtube_playlists" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue elif data.startswith("m3u") == True: if busqueda == 'search.txt': url = data.replace("m3u:", "") plugintools.add_item( action = "getfile_http" , title = '[[COLOR white]' + title + ' [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: url = data.replace("m3u:", "") plugintools.add_item( action = "getfile_http" , title = '[COLOR white]' + title, url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue elif data.startswith("plx") == True: if busqueda == 'search.txt': url = data.replace("plx:", "") # Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!) plugintools.add_item( action = "plx_items" , plot = "" , title = '[COLOR white]' + title + ' [I][/COLOR][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: url = data.replace("plx:", "") # Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!) plugintools.add_item( action = "plx_items" , plot = "" , title = '[COLOR white]' + title + '[/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False ) data = file.readline() i = i + 1 continue else: data = file.readline() i = i + 1 continue else: data = file.readline() i = i + 1 file.close() if title == 'search.txt': os.remove(tmp + title) def myplaylists_m3u(params): # Mis listas M3U plugintools.log("[JavierTV-0.3.0].myplaylists_m3u "+repr(params)) thumbnail = params.get("thumbnail") plugintools.add_item(action="play" , title = "[COLOR red][B][Tutorial][/B][COLOR lightyellow]: Importar listas M3U a mi biblioteca [/COLOR][COLOR blue][I][Youtube][/I][/COLOR]" , thumbnail = art + "icon.png" , url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=8i0KouM-4-U" , folder = False , isPlayable = True ) plugintools.add_item(action="search_channel" , title = "[B][COLOR lightyellow]Buscador de canales[/COLOR][/B][COLOR lightblue][I] Nuevo![/I][/COLOR]" , thumbnail = art + "search.png" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows # Control paternal pekes_no = plugintools.get_setting("pekes_no") for entry in ficheros: plot = entry.split(".") plot = plot[0] plugintools.log("entry= "+entry) if pekes_no == "true" : print "Control paternal en marcha" if entry.find("XXX") >= 0 : plugintools.log("Activando control paternal...") else: if entry.endswith("plx") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar entry = entry.replace(".plx", "") plugintools.add_item(action="plx_items" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("p2p") == True: entry = entry.replace(".p2p", "") plugintools.add_item(action="p2p_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("m3u") == True: entry = entry.replace(".m3u", "") plugintools.add_item(action="simpletv_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("jsn") == True: entry = entry.replace(".jsn", "") plugintools.add_item(action="json_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) else: if entry.endswith("plx") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar entry = entry.replace(".plx", "") plugintools.add_item(action="plx_items" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("p2p") == True: entry = entry.replace(".p2p", "") plugintools.add_item(action="p2p_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("m3u") == True: entry = entry.replace(".m3u", "") plugintools.add_item(action="simpletv_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) if entry.endswith("jsn") == True: entry = entry.replace(".jsn", "") plugintools.add_item(action="json_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) def playlists_m3u(params): # Biblioteca online plugintools.log("[Javiertv-0.3.0].playlists_m3u "+repr(params)) data = plugintools.read( params.get("url") ) name_channel = params.get("plot") pattern = '<name>'+name_channel+'(.*?)</channel>' data = plugintools.find_single_match(data, pattern) online = '[COLOR yellowgreen][I][Auto][/I][/COLOR]' params["ext"] = 'm3u' plugintools.add_item( action="" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR] - [B][I][COLOR lightyellow]j@gmail.com [/COLOR][/B][/I]' , thumbnail= art + 'icon.png' , folder = False , isPlayable = False ) subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data) # Sustituir por una lista!!! for biny, ciny, diny, winy, pixy, dixy, boxy in subchannel: if ciny == "Vcx7 IPTV": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) params["ext"] = "m3u" title = ciny params["title"]=title elif ciny == "Largo Barbate M3U": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) title = ciny params["title"]=title elif ciny == "XBMC Mexico": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) title = ciny params["title"]=title elif ciny == "allSat": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) title = ciny params["title"]=title elif ciny == "AND Wonder": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) title = ciny params["title"]=title elif ciny == "FenixTV": plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) title = ciny params["title"]=title else: plot = ciny.split("[") plot = plot[0] plugintools.add_item( action="getfile_http" , plot = plot , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) plugintools.log("[JavierTV-0.3.0].playlists_m3u "+repr(params)) def getfile_http(params): # Descarga de lista M3U + llamada a simpletv_items para que liste los items plugintools.log("[JavierTV-0.3.0].getfile_http "+repr(params)) url = params.get("url") params["ext"] = "m3u" getfile_url(params) simpletv_items(params) def parse_url(url): # plugintools.log("url entrante= "+url) if url != "": url = url.strip() url = url.replace("rtmp://$OPT:rtmp-raw=", "") return url else: plugintools.log("error en url= ") # Mostrar diálogo de error al parsear url (por no existir, por ejemplo) def getfile_url(params): plugintools.log("[JavierTV-0.3.0].getfile_url " +repr(params)) ext = params.get("ext") title = params.get("title") if ext == 'plx': filename = parser_title(title) params["plot"]=filename filename = title + ".plx" # El título del archivo con extensión (m3u, p2p, plx) elif ext == 'm3u': filename = params.get("plot") # Vamos a quitar el formato al texto para que sea el nombre del archivo filename = parser_title(title) filename = filename + ".m3u" # El título del archivo con extensión (m3u, p2p, plx) else: ext == 'p2p' filename = parser_title(title) filename = filename + ".p2p" # El título del archivo con extensión (m3u, p2p, plx) if filename.endswith("plx") == True : filename = parser_title(filename) plugintools.log("filename= "+filename) url = params.get("url") plugintools.log("url= "+url) try: response = urllib2.urlopen(url) body = response.read() except: # Control si la lista está en el cuerpo del HTTP request_headers=[] request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"]) body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers) #open the file for writing fh = open(playlists + filename, "wb") # read from request while writing to file fh.write(body) fh.close() file = open(playlists + filename, "r") file.seek(0) data = file.readline() data = data.strip() lista_items = {'linea': data} file.seek(0) lista_items = {'plot': data} file.seek(0) def header_xml(params): plugintools.log("[JavierTV-0.3.0].header_xml "+repr(params)) url = params.get("url") params.get("title") data = plugintools.read(url) # plugintools.log("data= "+data) author = plugintools.find_single_match(data, '<poster>(.*?)</poster>') author = author.strip() fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>') message = plugintools.find_single_match(data, '<message>(.*?)</message>') desc = plugintools.find_single_match(data, '<description>(.*?)</description>') thumbnail = plugintools.find_single_match(data, '<thumbnail>(.*?)</thumbnail>') if author != "": if message != "": plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR][I] ' + message + '[/I]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False ) return fanart else: plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False ) return fanart else: if desc != "": plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + desc + '[/B][/COLOR]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False ) return fanart else: return fanart def search_channel(params): plugintools.log("[JavierTV-0.3.0].search " + repr(params)) buscar = params.get("plot") # plugintools.log("buscar texto: "+buscar) if buscar == "": last_search = plugintools.get_setting("last_search") texto = plugintools.keyboard_input(last_search) plugintools.set_setting("last_search",texto) params["texto"]=texto texto = texto.lower() cat = "" if texto == "": errormsg = plugintools.message("palcoTV","Por favor, introduzca el canal a buscar") return errormsg else: texto = buscar texto = texto.lower() plugintools.log("texto a buscar= "+texto) cat = "" results = open(tmp + 'search.txt', "wb") results.seek(0) results.close() # Listamos archivos de la biblioteca local ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows for entry in ficheros: if entry.endswith("m3u") == True: print "Archivo tipo m3u" plot = entry.split(".") plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt) # Abrimos el primer archivo filename = plot + '.m3u' plugintools.log("Archivo M3U: "+filename) arch = open(playlists + filename, "r") num_items = len(arch.readlines()) print num_items i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista arch.seek(0) data = arch.readline() data = data.strip() plugintools.log("data linea= "+data) texto = texto.strip() plugintools.log("data_antes= "+data) plugintools.log("texto a buscar= "+texto) data = arch.readline() data = data.strip() i = i + 1 while i <= num_items : if data.startswith('#EXTINF:-1') == True: data = data.replace('#EXTINF:-1,', "") # Ignoramos la primera parte de la línea data = data.replace(",", "") title = data.strip() # Ya tenemos el título if data.find('$ExtFilter="') >= 0: data = data.replace('$ExtFilter="', "") if data.find(' $ExtFilter="') >= 0: data = data.replace('$ExtFilter="', "") title = title.replace("-AZBOX*", "") title = title.replace("AZBOX *", "") images = m3u_items(title) print 'images',images thumbnail = images[0] fanart = images[1] cat = images[2] title = images[3] plugintools.log("title= "+title) minus = title.lower() data = arch.readline() data = data.strip() i = i + 1 if minus.find(texto) >= 0: # if re.match(texto, title, re.IGNORECASE): # plugintools.log("Concidencia hallada. Obtenemos url del canal: " + texto) if data.startswith("http") == True: url = data.strip() if cat != "": # Controlamos el caso de subcategoría de canales results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue else: results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue if data.startswith("rtmp") == True: url = data url = parse_url(url) if cat != "": # Controlamos el caso de subcategoría de canales results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue else: results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue if data.startswith("yt") == True: print "CORRECTO" url = data results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue else: data = arch.readline() data = data.strip() plugintools.log("data_buscando_title= "+data) i = i + 1 else: data = arch.readline() data = data.strip() plugintools.log("data_final_while= "+data) i = i + 1 continue # Listamos archivos de la biblioteca local ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows for entry in ficheros: if entry.endswith('p2p') == True: plot = entry.split(".") plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt) # Abrimos el primer archivo plugintools.log("texto a buscar= "+texto) filename = plot + '.p2p' arch = open(playlists + filename, "r") num_items = len(arch.readlines()) plugintools.log("archivo= "+filename) i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista arch.seek(0) while i <= num_items: data = arch.readline() data = data.strip() title = data texto = texto.strip() plugintools.log("linea a buscar title= "+data) i = i + 1 if data.startswith("#") == True: data = arch.readline() data = data.strip() i = i + 1 continue if data.startswith("default=") == True: data = arch.readline() data = data.strip() i = i + 1 continue if data.startswith("art=") == True: data = arch.readline() data = data.strip() i = i + 1 continue if data != "": title = data.strip() # Ya tenemos el título plugintools.log("title= "+title) minus = title.lower() if minus.find(texto) >= 0: plugintools.log("title= "+title) data = arch.readline() i = i + 1 #print i plugintools.log("linea a comprobar url= "+data) if data.startswith("sop") == True: # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast title_fixed = title.replace(" " , "+") url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed plugintools.log("url sopcast= "+url) results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue elif data.startswith("magnet") == True: # magnet:?xt=urn:btih:6CE983D676F2643430B177E2430042E4E65427... title_fixed = title.split('"') title = title_fixed[0] plugintools.log("title magnet= "+title) url = data plugintools.log("url magnet= "+url) results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue elif data.find("://") == -1: # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title title_fixed = title.split('"') title = title_fixed[0] title_fixed = title.replace(" " , "+") url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name=' + title_fixed results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 continue else: plugintools.log("no coinciden titulo y texto a buscar") for entry in ficheros: if entry.endswith('plx') == True: plot = entry.split(".") plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión) # Abrimos el primer archivo plugintools.log("texto a buscar= "+texto) filename = plot + '.plx' plugintools.log("archivo PLX: "+filename) arch = open(playlists + filename, "r") num_items = len(arch.readlines()) print num_items i = 0 arch.seek(0) while i <= num_items: data = arch.readline() data = data.strip() i = i + 1 print i if data.startswith("#") == True: continue if (data == 'type=video') or (data == 'type=audio') == True: data = arch.readline() i = i + 1 print i data = data.replace("name=", "") data = data.strip() title = data minus = title.lower() if minus.find(texto) >= 0: plugintools.log("Título coincidente= "+title) data = arch.readline() plugintools.log("Siguiente linea= "+data) i = i + 1 print i print "Analizamos..." while data <> "" : if data.startswith("thumb") == True: data = arch.readline() plugintools.log("data_plx= "+data) i = i + 1 print i continue if data.startswith("date") == True: data = arch.readline() plugintools.log("data_plx= "+data) i = i + 1 print i continue if data.startswith("background") == True: data = arch.readline() plugintools.log("data_plx= "+data) i = i + 1 print i continue if data.startswith("URL") == True: data = data.replace("URL=", "") data = data.strip() url = data parse_url(url) plugintools.log("URL= "+url) results = open(tmp + 'search.txt', "a") results.write("#EXTINF:-1," + title + '"' + filename + '\n') results.write(url + '\n\n') results.close() data = arch.readline() i = i + 1 break arch.close() results.close() params["plot"] = 'search' # Pasamos a la lista de variables (params) el valor del archivo de resultados para que lo abra la función simpletv_items params['texto']= texto # Agregamos al diccionario una nueva variable que contiene el texto a buscar simpletv_items(params) def agendatv(params): plugintools.log("[JavierTV-0.3.0].agendatv "+repr(params)) hora_partidos = [] lista_equipos=[] campeonato=[] canales=[] url = params.get("url") data = plugintools.read(url) plugintools.log("data= "+data) matches = plugintools.find_multiple_matches(data,'<tr>(.*?)</tr>') horas = plugintools.find_multiple_matches(data, 'color=#990000>(.*?)</td>') txt = plugintools.find_multiple_matches(data, 'color="#000099"><b>(.*?)</td>') tv = plugintools.find_multiple_matches(data, '<td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b>([^<]+)</b></font></td>') # <b><a href="indexf.php?comp=Súper Final Argentino">Súper Final Argentino&nbsp;&nbsp;</td> for entry in matches: torneo = plugintools.find_single_match(entry, '<a href=(.*?)">') torneo = torneo.replace("&nbsp;&nbsp;", "") torneo = torneo.replace("indexf.php?comp=", "") torneo = torneo.replace('>', "") torneo = torneo.replace('"', "") torneo = torneo.replace("\n", "") torneo = torneo.strip() torneo = torneo.replace('\xfa', 'ú') torneo = torneo.replace('\xe9', 'é') torneo = torneo.replace('\xf3', 'ó') torneo = torneo.replace('\xfa', 'ú') torneo = torneo.replace('\xaa', 'ª') torneo = torneo.replace('\xe1', 'á') torneo = torneo.replace('\xf1', 'ñ') torneo = torneo.replace('indexuf.php?comp=', "") torneo = torneo.replace('indexfi.php?comp=', "") plugintools.log("string encoded= "+torneo) if torneo != "": plugintools.log("torneo= "+torneo) campeonato.append(torneo) # ERROR! Hay que añadir las jornadas, tal como estaba antes!! # Vamos a crear dos listas; una de los equipos que se enfrentan cada partido y otra de las horas de juego for dato in txt: lista_equipos.append(dato) for tiempo in horas: hora_partidos.append(tiempo) # <td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b>&nbsp;&nbsp; Canal + Fútbol</b></font></td> # <td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b>&nbsp;&nbsp; IB3</b></font></td> for kanal in tv: kanal = kanal.replace("&nbsp;&nbsp;", "") kanal = kanal.strip() kanal = kanal.replace('\xfa', 'ú') kanal = kanal.replace('\xe9', 'é') kanal = kanal.replace('\xf3', 'ó') kanal = kanal.replace('\xfa', 'ú') kanal = kanal.replace('\xaa', 'ª') kanal = kanal.replace('\xe1', 'á') kanal = kanal.replace('\xf1', 'ñ') canales.append(kanal) print lista_equipos print hora_partidos # Casualmente en esta lista se nos ha añadido los días de partido print campeonato print canales i = 0 # Contador de equipos j = 0 # Contador de horas k = 0 # Contador de competición max_equipos = len(lista_equipos) - 2 print max_equipos for entry in matches: while j <= max_equipos: # plugintools.log("entry= "+entry) fecha = plugintools.find_single_match(entry, 'color=#990000><b>(.*?)</b></td>') fecha = fecha.replace("&#225;", "á") fecha = fecha.strip() gametime = hora_partidos[i] gametime = gametime.replace("<b>", "") gametime = gametime.replace("</b>", "") gametime = gametime.strip() gametime = gametime.replace('&#233;', 'é') gametime = gametime.replace('&#225;', 'á') gametime = gametime.replace('&#233;', 'é') gametime = gametime.replace('&#225;', 'á') print gametime.find(":") if gametime.find(":") == 2: i = i + 1 #print i local = lista_equipos[j] local = local.strip() local = local.replace('\xfa', 'ú') local = local.replace('\xe9', 'é') local = local.replace('\xf3', 'ó') local = local.replace('\xfa', 'ú') local = local.replace('\xaa', 'ª') local = local.replace('\xe1', 'á') local = local.replace('\xf1', 'ñ') j = j + 1 print j visitante = lista_equipos[j] visitante = visitante.strip() visitante = visitante.replace('\xfa', 'ú') visitante = visitante.replace('\xe9', 'é') visitante = visitante.replace('\xf3', 'ó') visitante = visitante.replace('\xfa', 'ú') visitante = visitante.replace('\xaa', 'ª') visitante = visitante.replace('\xe1', 'á') visitante = visitante.replace('\xf1', 'ñ') local = local.replace('&#233;', 'é') local = local.replace('&#225;', 'á') j = j + 1 print j tipo = campeonato[k] channel = canales[k] channel = channel.replace('\xfa', 'ú') channel = channel.replace('\xe9', 'é') channel = channel.replace('\xf3', 'ó') channel = channel.replace('\xfa', 'ú') channel = channel.replace('\xaa', 'ª') channel = channel.replace('\xe1', 'á') channel = channel.replace('\xf1', 'ñ') channel = channel.replace('\xc3\xba', 'ú') channel = channel.replace('Canal +', 'Canal+') title = '[B][COLOR khaki]' + tipo + ':[/B][/COLOR] ' + '[COLOR lightyellow]' + '(' + gametime + ')[COLOR white] ' + local + ' vs ' + visitante + '[/COLOR][COLOR lightblue][I] (' + channel + ')[/I][/COLOR]' plugintools.add_item(plot = channel , action="contextMenu", title=title , url = "", fanart = art + 'agendatv.jpg', thumbnail = art + 'icon.png' , folder = True, isPlayable = False) # diccionario[clave] = valor plugintools.log("channel= "+channel) params["plot"] = channel # plugintools.add_item(plot = channel , action = "search_channel", title = '[COLOR lightblue]' + channel + '[/COLOR]', url= "", thumbnail = art + 'icon.png', fanart = fanart , folder = True, isPlayable = False) k = k + 1 print k plugintools.log("title= "+title) else: plugintools.add_item(action="", title='[B][COLOR red]' + gametime + '[/B][/COLOR]', thumbnail = art + 'icon.png' , fanart = art + 'agendatv.jpg' , folder = True, isPlayable = False) i = i + 1 def encode_string(url): d = { '\xc1':'A', '\xc9':'E', '\xcd':'I', '\xd3':'O', '\xda':'U', '\xdc':'U', '\xd1':'N', '\xc7':'C', '\xed':'i', '\xf3':'o', '\xf1':'n', '\xe7':'c', '\xba':'', '\xb0':'', '\x3a':'', '\xe1':'a', '\xe2':'a', '\xe3':'a', '\xe4':'a', '\xe5':'a', '\xe8':'e', '\xe9':'e', '\xea':'e', '\xeb':'e', '\xec':'i', '\xed':'i', '\xee':'i', '\xef':'i', '\xf2':'o', '\xf3':'o', '\xf4':'o', '\xf5':'o', '\xf0':'o', '\xf9':'u', '\xfa':'u', '\xfb':'u', '\xfc':'u', '\xe5':'a' } nueva_cadena = url for c in d.keys(): plugintools.log("caracter= "+c) nueva_cadena = nueva_cadena.replace(c,d[c]) auxiliar = nueva_cadena.encode('utf-8') url = nueva_cadena return nueva_cadena def plx_items(params): plugintools.log("[PalcoTV-0.3.0].plx_items" +repr(params)) fanart = "" thumbnail = "" # Control para elegir el título (plot, si formateamos el título / title , si no existe plot) if params.get("plot") == "": title = params.get("title").strip() + '.plx' title = parser_title(title) title = title.strip() filename = title params["plot"]=filename params["ext"] = 'plx' getfile_url(params) title = params.get("title") else: title = params.get("plot") title = title.strip() title = parser_title(title) plugintools.log("Lectura del archivo PLX") title = title.replace(" .plx", ".plx") title = title.strip() file = open(playlists + parser_title(title) + '.plx', "r") file.seek(0) num_items = len(file.readlines()) print num_items file.seek(0) # Lectura del título y fanart de la lista background = art + 'fanart.jpg' logo = art + 'plx3.png' file.seek(0) data = file.readline() while data <> "": plugintools.log("data= "+data) if data.startswith("background=") == True: data = data.replace("background=", "") background = data.strip() plugintools.log("background= "+background) if background == "": background = params.get("extra") if background == "": background = art + 'fanart.jpg' data = file.readline() continue if data.startswith("title=") == True: name = data.replace("title=", "") name = name.strip() plugintools.log("name= "+name) if name == "Select sort order for this list": name = "Seleccione criterio para ordenar ésta lista... " data = file.readline() continue if data.startswith("logo=") == True: data = data.replace("logo=", "") logo = data.strip() plugintools.log("logo= "+logo) title = parser_title(title) if thumbnail == "": thumbnail = art + 'plx3.png' plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ title + '[/B][/I][/COLOR]', url = playlists + title , thumbnail = logo , fanart = background , folder = False , isPlayable = False) plugintools.log("fanart= "+fanart) plugintools.add_item(action="" , title = '[I][B]' + name + '[/B][/I]' , url = "" , thumbnail = logo , fanart = background , folder = False , isPlayable = False) data = file.readline() break else: data = file.readline() try: data = file.readline() plugintools.log("data= "+data) if data.startswith("background=") == True: data = data.replace("background=", "") data = data.strip() fanart = data background = fanart plugintools.log("fanart= "+fanart) else: # data = file.readline() if data.startswith("background=") == True: print "Archivo plx!" data = data.replace("background=", "") fanart = data.strip() plugintools.log("fanart= "+fanart) else: if data.startswith("title=") == True: name = data.replace("title=", "") name = name.strip() plugintools.log("name= "+name) except: plugintools.log("ERROR: Unable to load PLX file") data = file.readline() try: if data.startswith("title=") == True: data = data.replace("title=", "") name = data.strip() plugintools.log("title= "+title) plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ title +'[/I][/B][/COLOR]' , url = playlists + title , thumbnail = logo , fanart = fanart , folder = False , isPlayable = False) plugintools.add_item(action="" , title = '[I][B]' + name + '[/B][/I]' , url = "" , thumbnail = art + "icon.png" , fanart = fanart , folder = False , isPlayable = False) except: plugintools.log("Unable to read PLX title") # Lectura de items i = 0 file.seek(0) while i <= num_items: data = file.readline() data = data.strip() i = i + 1 print i if data.startswith("#") == True: continue elif data.startswith("rating") == True: continue elif data.startswith("description") == True: continue if (data == 'type=comment') == True: data = file.readline() i = i + 1 print i while data <> "" : if data.startswith("name") == True: title = data.replace("name=", "") data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("thumb") == True: data = data.replace("thumb=", "") data = data.strip() thumbnail = data if thumbnail == "": thumbnail = logo data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("background") == True: data = data.replace("background=", "") fanart = data.strip() if fanart == "": fanart = background data = file.readline() data = data.strip() i = i + 1 print i continue plugintools.add_item(action="", title = title , url = "", thumbnail = thumbnail , fanart = fanart , folder = False, isPlayable = False) if (data == 'type=video') or (data == 'type=audio') == True: data = file.readline() i = i + 1 print i while data <> "" : if data.startswith("#") == True: data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("description") == True: data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("rating") == True: data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("name") == True: data = data.replace("name=", "") data = data.strip() title = data if title == "[COLOR=FF00FF00]by user-assigned order[/COLOR]" : title = "Seleccione criterio para ordenar ésta lista... " if title == "by user-assigned order" : title = "Según se han agregado en la lista" if title == "by date added, oldest first" : title = "Por fecha de agregación, las más antiguas primero" if title == "by date added, newest first" : title = "Por fecha de agregación, las más nuevas primero" data = file.readline() data = data.strip() i = i + 1 print i elif data.startswith("thumb") == True: data = data.replace("thumb=", "") data = data.strip() thumbnail = data if thumbnail == "": thumbnail = logo data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("date") == True: data = file.readline() i = i + 1 print i continue elif data.startswith("background") == True: data = data.replace("background=", "") fanart = data.strip() if fanart == "": fanart = background data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("URL") == True: # Control para el caso de que no se haya definido fanart en cada entrada de la lista => Se usa el fanart general if fanart == "": fanart = background data = data.replace("URL=", "") data = data.strip() url = data parse_url(url) if url.startswith("yt_channel") == True: youtube_channel = url.replace("yt_channel(", "") youtube_channel = youtube_channel.replace(")", "") url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30' plugintools.add_item(action="youtube_playlists" , title = title + ' [[COLOR red]You[COLOR white]tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False) break elif url.startswith("yt_playlist") == True: youtube_playlist = url.replace("yt_playlist(", "") youtube_playlist = youtube_playlist.replace(")", "") plugintools.log("youtube_playlist= "+youtube_playlist) url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist + '?v=2' plugintools.add_item( action = "youtube_videos" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = art + "icon.png" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False ) data = file.readline() i = i + 1 break # Sintaxis yt(...) a extinguir pero mantengo por Darío: elif url.startswith("yt") == True: url = url.replace("yt(", "") youtube_user = url.replace(")", "") url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_user + '/playlists?v=2&start-index=1&max-results=30' plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) plugintools.add_item(action="youtube_playlists" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False) break elif url.startswith("serie") == True: url = url.replace("serie:", "") plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) plugintools.add_item(action="seriecatcher" , title = title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , extra = fanart , folder = True , isPlayable = False) break elif url.startswith("http") == True: if url.find("allmyvideos") >= 0: plugintools.add_item(action="allmyvideos" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) break elif url.find("streamcloud") >= 0: plugintools.add_item(action="streamcloud" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) break elif url.find("played.to") >= 0: plugintools.add_item(action="playedto" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) break elif url.find("vidspot") >= 0: plugintools.add_item(action="vidspot" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) break elif url.find("vk.com") >= 0: plugintools.add_item(action="vk" , title = title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) break if url.find("nowvideo") >= 0: plugintools.add_item(action="nowvideo" , title = title + ' [COLOR red][Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL= "+url) break elif url.endswith("flv") == True: plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) plugintools.add_item( action = "play" , title = title + ' [COLOR cyan][Flash][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) break elif url.endswith("m3u8") == True: plugintools.log("URL= "+url) plugintools.log("FANART = "+fanart) plugintools.add_item( action = "play" , title = title + ' [COLOR purple][m3u8][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) break else: plugintools.log("URL= "+url) plugintools.add_item( action = "play" , title = title + ' [COLOR white][HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) break elif url.startswith("rtmp") == True: params["url"] = url server_rtmp(params) server = params.get("server") url = params.get("url") plugintools.add_item( action = "launch_rtmp" , title = title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) break elif url.startswith("plugin") == True: if url.find("plugin.video.youtube") >= 0: plugintools.log("URL= "+url) plugintools.add_item( action = "play" , title = title + ' [COLOR white] [[COLOR red]You[COLOR white]tube Video][/COLOR][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) break else: plugintools.add_item(action="play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("URL = "+url) break elif data == "" : break else: data = file.readline() data = data.strip() i = i + 1 print i if (data == 'type=playlist') == True: # Control si no se definió fanart en cada entrada de la lista => Se usa fanart global de la lista if fanart == "": fanart = background data = file.readline() i = i + 1 print i while data <> "" : if data.startswith("name") == True : data = data.replace("name=", "") title = data.strip() if title == '>>>' : title = title.replace(">>>", "[I][COLOR lightyellow]Siguiente[/I][/COLOR]") data = file.readline() data = data.strip() i = i + 1 elif title == '<<<' : title = title.replace("<<<", "[I][COLOR lightyellow]Anterior[/I][/COLOR]") data = file.readline() data = data.strip() i = i + 1 elif title.find("Sorted by user-assigned order") >= 0: title = "[I][COLOR lightyellow]Ordenar listas por...[/I][/COLOR]" data = file.readline() data = data.strip() i = i + 1 elif title.find("Sorted A-Z") >= 0: title = "[I][COLOR lightyellow][COLOR lightyellow]De la A a la Z[/I][/COLOR]" data = file.readline() data = data.strip() i = i + 1 elif title.find("Sorted Z-A") >= 0: title = "[I][COLOR lightyellow]De la Z a la A[/I][/COLOR]" data = file.readline() data = data.strip() i = i + 1 elif title.find("Sorted by date added, newest first") >= 0: title = "Ordenado por: Las + recientes primero..." data = file.readline() data = data.strip() i = i + 1 elif title.find("Sorted by date added, oldest first") >= 0: title = "Ordenado por: Las + antiguas primero..." data = file.readline() data = data.strip() i = i + 1 elif title.find("by user-assigned order") >= 0: title = "[COLOR lightyellow]Ordenar listas por...[/COLOR]" data = file.readline() data = data.strip() i = i + 1 elif title.find("by date added, newest first") >= 0 : title = "Las + recientes primero..." data = file.readline() data = data.strip() i = i + 1 elif title.find("by date added, oldest first") >= 0 : title = "Las + antiguas primero..." data = file.readline() data = data.strip() i = i + 1 elif data.startswith("thumb") == True: data = data.replace("thumb=", "") data = data.strip() thumbnail = data data = file.readline() data = data.strip() i = i + 1 print i continue elif data.startswith("URL") == True: data = data.replace("URL=", "") data = data.strip() url = data parse_url(url) plugintools.add_item(action="plx_items" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False) break elif data == "" : break else: data = file.readline() data = data.strip() i = i + 1 print i continue file.close() # Purga de listas erróneas creadas al abrir listas PLX (por los playlists de ordenación que crea Navixtreme) if os.path.isfile(playlists + 'Siguiente.plx'): os.remove(playlists + 'Siguiente.plx') print "Correcto!" else: pass if os.path.isfile(playlists + 'Ordenar listas por....plx'): os.remove(playlists + 'Ordenar listas por....plx') print "Ordenar listas por....plx eliminado!" print "Correcto!" else: print "No es posible!" pass if os.path.isfile(playlists + 'A-Z.plx'): os.remove(playlists + 'A-Z.plx') print "A-Z.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'De la A a la Z.plx'): os.remove(playlists + 'De la A a la Z.plx') print "De la A a la Z.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Z-A.plx'): os.remove(playlists + 'Z-A.plx') print "Z-A.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'De la Z a la A.plx'): os.remove(playlists + 'De la Z a la A.plx') print "De la Z a la A.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Las + antiguas primero....plx'): os.remove(playlists + 'Las + antiguas primero....plx') print "Las más antiguas primero....plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'by date added, oldest first.plx'): os.remove(playlists + 'by date added, oldest first.plx') print "by date added, oldest first.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Las + recientes primero....plx'): os.remove(playlists + 'Las + recientes primero....plx') else: print "No es posible!" pass if os.path.isfile(playlists + 'by date added, newest first.plx'): os.remove(playlists + 'by date added, newest first.plx') print "by date added, newest first.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Sorted by user-assigned order.plx'): os.remove(playlists + 'Sorted by user-assigned order.plx') print "Sorted by user-assigned order.plx eliminado!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Ordenado por.plx'): os.remove(playlists + 'Ordenado por.plx') print "Correcto!" else: print "No es posible!" pass if os.path.isfile(playlists + 'Ordenado por'): os.remove(playlists + 'Ordenado por') print "Correcto!" else: print "No es posible!" pass def futbolenlatv(params): plugintools.log("[PalcoTV-0.3.0].futbolenlaTV "+repr(params)) hora_partidos = [] lista_equipos=[] campeonato=[] canales=[] url = params.get("url") print url fecha = get_fecha() dia_manana = params.get("plot") data = plugintools.read(url) if dia_manana == "": # Control para si es agenda de hoy o mañana plugintools.add_item(action="", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ fecha + '[/I][/COLOR]', folder = False , isPlayable = False ) else: dia_manana = dia_manana.split("-") dia_manana = dia_manana[2] + "/" + dia_manana[1] + "/" + dia_manana[0] plugintools.add_item(action="", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ dia_manana + '[/I][/COLOR]', folder = False , isPlayable = False ) bloque = plugintools.find_multiple_matches(data,'<span class="cuerpo-partido">(.*?)</div>') for entry in bloque: category = plugintools.find_single_match(entry, '<i class=(.*?)</i>') category = category.replace("ftvi-", "") category = category.replace('comp">', '') category = category.replace('"', '') category = category.replace("-", " ") category = category.replace("Futbol", "Fútbol") category = category.strip() category = category.capitalize() plugintools.log("cat= "+category) champ = plugintools.find_single_match(entry, '<span class="com-detalle">(.*?)</span>') champ = encode_string(champ) champ = champ.strip() event = plugintools.find_single_match(entry, '<span class="bloque">(.*?)</span>') event = encode_string(event) event = event.strip() momentum = plugintools.find_single_match(entry, '<time itemprop="startDate" datetime=([^<]+)</time>') # plugintools.log("momentum= "+momentum) momentum = momentum.split(">") momentum = momentum[1] gametime = plugintools.find_multiple_matches(entry, '<span class="n">(.*?)</span>') for tiny in gametime: day = tiny month = tiny sport = plugintools.find_single_match(entry, '<meta itemprop="eventType" content=(.*?)/>') sport = sport.replace('"', '') sport = sport.strip() if sport == "Partido de fútbol": sport = "Fútbol" # plugintools.log("sport= "+sport) gameday = plugintools.find_single_match(entry, '<span class="dia">(.*?)</span>') rivals = plugintools.find_multiple_matches(entry, '<span>([^<]+)</span>([^<]+)<span>([^<]+)</span>') rivales = "" for diny in rivals: print diny items = len(diny) items = items - 1 i = -1 diny[i].strip() while i <= items: if diny[i] == "": del diny[0] i = i + 1 else: print diny[i] rival = diny[i] rival = encode_string(rival) rival = rival.strip() plugintools.log("rival= "+rival) if rival == "-": i = i + 1 continue else: if rivales != "": rivales = rivales + " vs " + rival plugintools.log("rivales= "+rivales) break else: rivales = rival plugintools.log("rival= "+rival) i = i + 1 tv = plugintools.find_single_match(entry, '<span class="hidden-phone hidden-tablet canales"([^<]+)</span>') tv = tv.replace(">", "") tv = encode_string(tv) if tv == "": continue else: tv = tv.replace("(Canal+, Astra", "") tv = tv.split(",") tv_a = tv[0] tv_a = tv_a.rstrip() tv_a = tv_a.lstrip() tv_a = tv_a.replace(")", "") plugintools.log("tv_a= "+tv_a) print len(tv) if len(tv) == 2: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv = tv_a + " / " + tv_b plot = tv plugintools.log("plot= "+plot) elif len(tv) == 3: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv_c = tv[2] tv_c = tv_c.lstrip() tv_c = tv_c.rstrip() tv_c = tv_c.replace(")", "") tv_c = tv_c.replace("(Bar+ dial 333-334", "") tv_c = tv_c.replace("(Canal+", "") tv = tv_a + " / " + tv_b + " / " + tv_c plot = tv plugintools.log("plot= "+plot) elif len(tv) == 4: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv_c = tv[2] tv_c = tv_c.lstrip() tv_c = tv_c.rstrip() tv_c = tv_c.replace(")", "") tv_c = tv_c.replace("(Bar+ dial 333-334", "") tv_c = tv_c.replace("(Canal+", "") tv_d = tv[3] tv_d = tv_d.lstrip() tv_d = tv_d.rstrip() tv_d = tv_d.replace(")", "") tv_d = tv_d.replace("(Bar+ dial 333-334", "") tv_d = tv_d.replace("(Canal+", "") tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d plot = tv plugintools.log("plot= "+plot) elif len(tv) == 5: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv_c = tv[2] tv_c = tv_c.lstrip() tv_c = tv_c.rstrip() tv_c = tv_c.replace(")", "") tv_c = tv_c.replace("(Bar+ dial 333-334", "") tv_c = tv_c.replace("(Canal+", "") tv_d = tv[3] tv_d = tv_d.lstrip() tv_d = tv_d.rstrip() tv_d = tv_d.replace(")", "") tv_d = tv_d.replace("(Bar+ dial 333-334", "") tv_d = tv_d.replace("(Canal+", "") tv_e = tv[4] tv_e = tv_e.lstrip() tv_e = tv_e.rstrip() tv_e = tv_e.replace(")", "") tv_e = tv_e.replace("(Bar+ dial 333-334", "") tv_e = tv_e.replace("(Canal+", "") tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e # tv = tv.replace(")", "") plot = tv plugintools.log("plot= "+plot) elif len(tv) == 6: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv_c = tv[2] tv_c = tv_c.lstrip() tv_c = tv_c.rstrip() tv_c = tv_c.replace(")", "") tv_c = tv_c.replace("(Bar+ dial 333-334", "") tv_c = tv_c.replace("(Canal+", "") tv_d = tv[3] tv_d = tv_d.lstrip() tv_d = tv_d.rstrip() tv_d = tv_d.replace(")", "") tv_d = tv_d.replace("(Bar+ dial 333-334", "") tv_d = tv_d.replace("(Canal+", "") tv_e = tv[4] tv_e = tv_e.lstrip() tv_e = tv_e.rstrip() tv_e = tv_e.replace(")", "") tv_e = tv_e.replace("(Bar+ dial 333-334", "") tv_e = tv_e.replace("(Canal+", "") tv_f = tv[5] tv_f = tv_f.lstrip() tv_f = tv_f.rstrip() tv_f = tv_f.replace(")", "") tv_f = tv_f.replace("(Bar+ dial 333-334", "") tv_f = tv_f.replace("(Canal+", "") tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e + " / " + tv_f # tv = tv.replace(")", "") plot = tv plugintools.log("plot= "+plot) elif len(tv) == 7: tv_b = tv[1] tv_b = tv_b.lstrip() tv_b = tv_b.rstrip() tv_b = tv_b.replace(")", "") tv_b = tv_b.replace("(Bar+ dial 333-334", "") tv_b = tv_b.replace("(Canal+", "") tv_c = tv[2] tv_c = tv_c.lstrip() tv_c = tv_c.rstrip() tv_c = tv_c.replace(")", "") tv_c = tv_c.replace("(Bar+ dial 333-334", "") tv_c = tv_c.replace("(Canal+", "") tv_d = tv[3] tv_d = tv_d.lstrip() tv_d = tv_d.rstrip() tv_d = tv_d.replace(")", "") tv_d = tv_d.replace("(Bar+ dial 333-334", "") tv_d = tv_d.replace("(Canal+", "") tv_e = tv[4] tv_e = tv_e.lstrip() tv_e = tv_e.rstrip() tv_e = tv_e.replace(")", "") tv_e = tv_e.replace("(Bar+ dial 333-334", "") tv_e = tv_e.replace("(Canal+", "") tv_f = tv[5] tv_f = tv_f.lstrip() tv_f = tv_f.rstrip() tv_f = tv_f.replace(")", "") tv_f = tv_f.replace("(Bar+ dial 333-334", "") tv_f = tv_f.replace("(Canal+", "") tv_g = tv[6] tv_g = tv_g.lstrip() tv_g = tv_g.rstrip() tv_g = tv_g.replace(")", "") tv_g = tv_g.replace("(Bar+ dial 333-334", "") tv_g = tv_g.replace("(Canal+", "") tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e + " / " + tv_f + " / " + tv_g plot = tv plugintools.log("plot= "+plot) else: tv = tv_a plot = tv_a plugintools.log("plot= "+plot) plugintools.add_item(action="contextMenu", plot = plot , title = momentum + "h " + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + '[COLOR green]' + champ + '[/COLOR]' + " " + '[COLOR lightyellow][I]' + rivales + '[/I][/COLOR] [I][COLOR red]' + plot + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False) # plugintools.add_item(action="contextMenu", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]', thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False) # plugintools.add_item(action="contextMenu", title = gameday + '/' + day + "(" + momentum + ") " + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + champ + ": " + rivales , plot = plot , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False) # plugintools.add_item(action="contextMenu", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False) def encode_string(txt): plugintools.log("[JavierTV-0.3.0].encode_string: "+txt) txt = txt.replace("&#231;", "ç") txt = txt.replace('&#233;', 'é') txt = txt.replace('&#225;', 'á') txt = txt.replace('&#233;', 'é') txt = txt.replace('&#225;', 'á') txt = txt.replace('&#241;', 'ñ') txt = txt.replace('&#250;', 'ú') txt = txt.replace('&#237;', 'í') txt = txt.replace('&#243;', 'ó') txt = txt.replace('&#39;', "'") txt = txt.replace("&nbsp;", "") txt = txt.replace("&nbsp;", "") txt = txt.replace('&#39;', "'") return txt def splive_items(params): plugintools.log("[JavierTV-0.3.0].SPlive_items "+repr(params)) data = plugintools.read( params.get("url") ) channel = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>') for entry in channel: # plugintools.log("channel= "+channel) title = plugintools.find_single_match(entry,'<name>(.*?)</name>') category = plugintools.find_single_match(entry,'<category>(.*?)</category>') thumbnail = plugintools.find_single_match(entry,'<link_logo>(.*?)</link_logo>') rtmp = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>') isIliveTo = plugintools.find_single_match(entry,'<isIliveTo>([^<]+)</isIliveTo>') rtmp = rtmp.strip() pageurl = plugintools.find_single_match(entry,'<url_html>([^<]+)</url_html>') link_logo = plugintools.find_single_match(entry,'<link_logo>([^<]+)</link_logo>') if pageurl == "SinProgramacion": pageurl = "" playpath = plugintools.find_single_match(entry, '<playpath>([^<]+)</playpath>') playpath = playpath.replace("Referer: ", "") token = plugintools.find_single_match(entry, '<token>([^<]+)</token>') iliveto = 'rtmp://188.122.91.73/edge' if isIliveTo == "0": if token == "0": url = rtmp url = url.replace("&amp;", "&") parse_url(url) plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True ) plugintools.log("url= "+url) else: url = rtmp + " pageUrl=" + pageurl + " " + 'token=' + token + playpath + " live=1" parse_url(url) plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True ) plugintools.log("url= "+url) if isIliveTo == "1": if token == "1": url = iliveto + " pageUrl=" + pageurl + " " + 'token=' + token + playpath + " live=1" url = url.replace("&amp;", "&") parse_url(url) plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True ) plugintools.log("url= "+url) else: url = iliveto + ' swfUrl=' + rtmp + " playpath=" + playpath + " pageUrl=" + pageurl url = url.replace("&amp;", "&") parse_url(url) plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True ) plugintools.log("url= "+url) def get_fecha(): from datetime import datetime ahora = datetime.now() anno_actual = ahora.year mes_actual = ahora.month dia_actual = ahora.day fecha = str(dia_actual) + "/" + str(mes_actual) + "/" + str(anno_actual) plugintools.log("fecha de hoy= "+fecha) return fecha def p2p_items(params): plugintools.log("[JavierTV-0.3.0].p2p_items" +repr(params)) # Vamos a localizar el título title = params.get("plot") if title == "": title = params.get("title") data = plugintools.read("http://pastebin.com/raw.php?i=bjCUnJjG") subcanal = plugintools.find_single_match(data,'<name>' + title + '(.*?)</subchannel>') thumbnail = plugintools.find_single_match(subcanal, '<thumbnail>(.*?)</thumbnail>') fanart = plugintools.find_single_match(subcanal, '<fanart>(.*?)</fanart>') plugintools.log("thumbnail= "+thumbnail) # Controlamos el caso en que no haya thumbnail en el menú de PalcoTV if thumbnail == "": thumbnail = art + 'p2p.png' elif thumbnail == 'name_rtmp.png': thumbnail = art + 'p2p.png' if fanart == "": fanart = art + 'p2p.png' # Comprobamos si la lista ha sido descargada o no plot = params.get("plot") if plot == "": title = params.get("title") title = parser_title(title) filename = title + '.p2p' getfile_url(params) else: print "Lista ya descargada (plot no vacío)" filename = params.get("plot") params["ext"] = 'p2p' params["plot"]=filename filename = filename + '.p2p' plugintools.log("Lectura del archivo P2P") plugintools.add_item(action="" , title='[COLOR lightyellow][I][B]' + title + '[/B][/I][/COLOR]' , thumbnail=thumbnail , fanart=fanart , folder=False, isPlayable=False) # Abrimos el archivo P2P y calculamos número de líneas file = open(playlists + filename, "r") file.seek(0) data = file.readline() num_items = len(file.readlines()) print num_items file.seek(0) data = file.readline() if data.startswith("default") == True: data = data.replace("default=", "") data = data.split(",") thumbnail = data[0] fanart = data[1] plugintools.log("fanart= "+fanart) # Leemos entradas i = 0 file.seek(0) data = file.readline() data = data.strip() while i <= num_items: if data == "": data = file.readline() data = data.strip() # plugintools.log("linea vacia= "+data) i = i + 1 #print i continue elif data.startswith("default") == True: data = file.readline() data = data.strip() i = i + 1 #print i continue elif data.startswith("#") == True: title = data.replace("#", "") plugintools.log("title comentario= "+title) plugintools.add_item(action="play" , title = title , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) data = file.readline() data = data.strip() i = i + 1 continue else: title = data title = title.strip() plugintools.log("title= "+title) data = file.readline() data = data.strip() i = i + 1 #print i plugintools.log("linea URL= "+data) if data.startswith("sop") == True: print "empieza el sopcast..." # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast title_fixed = parser_title(title) title = title.replace(" " , "+") print title_fixed print title url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed plugintools.add_item(action="play" , title = title_fixed + ' [COLOR lightgreen][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) data = file.readline() data = data.strip() i = i + 1 #print i continue elif data.startswith("magnet") == True: print "empieza el torrent..." # plugin://plugin.video.xbmctorrent/play/ + <magnet_link> url_fixed = urllib.quote_plus(data) title = parser_title(title) url = 'plugin://plugin.video.xbmctorrent/play/' + url_fixed data = file.readline() data = data.strip() i = i + 1 if data.startswith("art") == True: data = data.replace("art=", "") data = data.split(",") icon = data[0] wall = data[1] plugintools.add_item(action="play" , title = title + ' [COLOR lightyellow][Torrent][/COLOR]' , url = url, thumbnail = icon , fanart = wall , folder = False , isPlayable = True) thumbnail = thumbnail fanart = fanart data = file.readline() data = data.strip() i = i + 1 #print i continue else: plugintools.add_item(action="play" , title = title + ' [COLOR lightyellow][Torrent][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) plugintools.log("fanart= "+fanart) data = file.readline() data = data.strip() i = i + 1 #print i continue else: print "empieza el acestream..." # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title title_fixed = title title = parser_title(title) title = title.replace(" " , "+") print title_fixed print title url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name=' plugintools.add_item(action="play" , title = title_fixed + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True) data = file.readline() data = data.strip() i = i + 1 #print i def contextMenu(params): plugintools.log("[JavierTV-0.3.0].contextMenu " +repr(params)) dialog = xbmcgui.Dialog() plot = params.get("plot") canales = plot.split("/") len_canales = len(canales) print len_canales plugintools.log("canales= "+repr(canales)) if len_canales == 1: tv_a = canales[0] tv_a = parse_channel(tv_a) search_channel(params) selector = "" else: if len_canales == 2: print "len_2" tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) selector = dialog.select('palcoTV', [tv_a, tv_b]) elif len_canales == 3: tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) tv_c = canales[2] tv_c = parse_channel(tv_c) selector = dialog.select('palcoTV', [tv_a, tv_b, tv_c]) elif len_canales == 4: tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) tv_c = canales[2] tv_c = parse_channel(tv_c) tv_d = canales[3] tv_d = parse_channel(tv_d) selector = dialog.select('palcoTV', [tv_a, tv_b, tv_c, tv_d]) elif len_canales == 5: tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) tv_c = canales[2] tv_c = parse_channel(tv_c) tv_d = canales[3] tv_d = parse_channel(tv_d) tv_e = canales[4] tv_e = parse_channel(tv_e) selector = dialog.select('palcoTV', [tv_a, tv_b, tv_c, tv_d, tv_e]) elif len_canales == 6: tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) tv_c = canales[2] tv_c = parse_channel(tv_c) tv_d = canales[3] tv_d = parse_channel(tv_d) tv_e = canales[4] tv_e = parse_channel(tv_e) tv_f = canales[5] tv_f = parse_channel(tv_f) selector = dialog.select('palcoTV', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f]) elif len_canales == 7: tv_a = canales[0] tv_a = parse_channel(tv_a) tv_b = canales[1] tv_b = parse_channel(tv_b) tv_c = canales[2] tv_c = parse_channel(tv_c) tv_d = canales[3] tv_d = parse_channel(tv_d) tv_e = canales[4] tv_e = parse_channel(tv_e) tv_f = canales[5] tv_f = parse_channel(tv_f) tv_g = canales[6] tv_g = parse_channel(tv_g) selector = dialog.select('palcoTV', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f, tv_g]) if selector == 0: print selector if tv_a.startswith("Gol") == True: tv_a = "Gol" params["plot"] = tv_a plugintools.log("tv= "+tv_a) search_channel(params) elif selector == 1: print selector if tv_b.startswith("Gol") == True: tv_b = "Gol" params["plot"] = tv_b plugintools.log("tv= "+tv_b) search_channel(params) elif selector == 2: print selector if tv_c.startswith("Gol") == True: tv_c = "Gol" params["plot"] = tv_c plugintools.log("tv= "+tv_c) search_channel(params) elif selector == 3: print selector if tv_d.startswith("Gol") == True: tv_d = "Gol" params["plot"] = tv_d plugintools.log("tv= "+tv_d) search_channel(params) elif selector == 4: print selector if tv_e.startswith("Gol") == True: tv_e = "Gol" params["plot"] = tv_e plugintools.log("tv= "+tv_e) search_channel(params) elif selector == 5: print selector if tv_f.startswith("Gol") == True: tv_f = "Gol" params["plot"] = tv_f plugintools.log("tv= "+tv_f) search_channel(params) elif selector == 6: print selector if tv_g.startswith("Gol") == True: tv_g = "Gol" params["plot"] = tv_g plugintools.log("tv= "+tv_g) search_channel(params) else: pass def magnet_items(params): plugintools.log("[JavierTV-0.3.0].magnet_items" +repr(params)) plot = params.get("plot") title = params.get("title") fanart = "" thumbnail = "" if plot != "": filename = params.get("plot") params["ext"] = 'p2p' params["plot"]=filename title = plot + '.p2p' else: getfile_url(params) title = params.get("title") title = title + '.p2p' # Abrimos el archivo P2P y calculamos número de líneas file = open(playlists + title, "r") file.seek(0) data = file.readline() num_items = len(file.readlines()) # Leemos entradas file.seek(0) i = 0 while i <= num_items: data = file.readline() i = i + 1 #print i if data != "": data = data.strip() title = data data = file.readline() i = i + 1 #print i data = data.strip() if data.startswith("magnet:") == True: # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast title_fixed = title.replace(" " , "+") url_fixed = urllib.quote_plus(link) url = 'plugin://plugin.video.xbmctorrent/play/' + url_fixed plugintools.add_item(action="play" , title = data + ' [COLOR indianred][Torrent][/COLOR]' , url = url, thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True) else: data = file.readline() i = i + 1 #print i else: data = file.readline() i = i + 1 #print i def parse_channel(txt): plugintools.log("[JavierTV-0.3.0].encode_string: "+txt) txt = txt.rstrip() txt = txt.lstrip() return txt def futbolenlatv_manana(params): plugintools.log("[JavierTV-0.3.0].futbolenlatv " + repr(params)) # Fecha de mañana import datetime today = datetime.date.today() manana = today + datetime.timedelta(days=1) anno_manana = manana.year mes_manana = manana.month if mes_manana == 1: mes_manana = "enero" elif mes_manana == 2: mes_manana = "febrero" elif mes_manana == 3: mes_manana = "marzo" elif mes_manana == 4: mes_manana = "abril" elif mes_manana == 5: mes_manana = "mayo" elif mes_manana == 6: mes_manana = "junio" elif mes_manana == 7: mes_manana = "julio" elif mes_manana == 8: mes_manana = "agosto" elif mes_manana == 9: mes_manana = "septiembre" elif mes_manana == 10: mes_manana = "octubre" elif mes_manana == 11: mes_manana = "noviembre" elif mes_manana == 12: mes_manana = "diciembre" dia_manana = manana.day plot = str(anno_manana) + "-" + str(mes_manana) + "-" + str(dia_manana) print manana url = 'http://www.futbolenlatv.com/m/Fecha/' + plot + '/agenda/false/false' plugintools.log("URL mañana= "+url) params["url"] = url params["plot"] = plot futbolenlatv(params) def parser_title(title): plugintools.log("[JavierTV-0.3.0].parser_title " + title) cyd = title cyd = cyd.replace("[COLOR lightyellow]", "") cyd = cyd.replace("[COLOR green]", "") cyd = cyd.replace("[COLOR red]", "") cyd = cyd.replace("[COLOR blue]", "") cyd = cyd.replace("[COLOR royalblue]", "") cyd = cyd.replace("[COLOR white]", "") cyd = cyd.replace("[COLOR pink]", "") cyd = cyd.replace("[COLOR cyan]", "") cyd = cyd.replace("[COLOR steelblue]", "") cyd = cyd.replace("[COLOR forestgreen]", "") cyd = cyd.replace("[COLOR olive]", "") cyd = cyd.replace("[COLOR khaki]", "") cyd = cyd.replace("[COLOR lightsalmon]", "") cyd = cyd.replace("[COLOR orange]", "") cyd = cyd.replace("[COLOR lightgreen]", "") cyd = cyd.replace("[COLOR lightblue]", "") cyd = cyd.replace("[COLOR lightpink]", "") cyd = cyd.replace("[COLOR skyblue]", "") cyd = cyd.replace("[COLOR darkorange]", "") cyd = cyd.replace("[COLOR greenyellow]", "") cyd = cyd.replace("[COLOR yellow]", "") cyd = cyd.replace("[COLOR yellowgreen]", "") cyd = cyd.replace("[COLOR orangered]", "") cyd = cyd.replace("[COLOR grey]", "") cyd = cyd.replace("[COLOR gold]", "") cyd = cyd.replace("[COLOR=FF00FF00]", "") cyd = cyd.replace("[/COLOR]", "") cyd = cyd.replace("[B]", "") cyd = cyd.replace("[/B]", "") cyd = cyd.replace("[I]", "") cyd = cyd.replace("[/I]", "") cyd = cyd.replace("[Auto]", "") cyd = cyd.replace("[Parser]", "") cyd = cyd.replace("[TinyURL]", "") cyd = cyd.replace("[Auto]", "") # Control para evitar filenames con corchetes cyd = cyd.replace(" [Lista M3U]", "") cyd = cyd.replace(" [Lista PLX]", "") title = cyd title = title.strip() if title.endswith(" .plx") == True: title = title.replace(" .plx", ".plx") plugintools.log("title_parsed= "+title) return title def json_items(params): plugintools.log("[JavierTV-0.3.0].json_items "+repr(params)) data = plugintools.read(params.get("url")) # Título y autor de la lista match = plugintools.find_single_match(data, '"name"(.*?)"url"') match = match.split(",") namelist = match[0].strip() author = match[1].strip() namelist = namelist.replace('"', "") namelist = namelist.replace(": ", "") author = author.replace('"author":', "") author = author.replace('"', "") fanart = params.get("extra") thumbnail = params.get("thumbnail") plugintools.log("title= "+namelist) plugintools.log("author= "+author) plugintools.add_item(action="", title = '[B][COLOR lightyellow]' + namelist + '[/B][/COLOR]' , url = "" , thumbnail = thumbnail , fanart = fanart, isPlayable = False , folder = False) # Items de la lista data = plugintools.find_single_match(data, '"stations"(.*?)]') matches = plugintools.find_multiple_matches(data, '"name"(.*?)}') for entry in matches: if entry.find("isHost") <= 0: title = plugintools.find_single_match(entry,'(.*?)\n') title = title.replace(": ", "") title = title.replace('"', "") title = title.replace(",", "") url = plugintools.find_single_match(entry,'"url":(.*?)\n') url = url.replace('"', "") url = url.strip() params["url"]=url server_rtmp(params) thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) # Control por si en la lista no aparece el logo en cada entrada if thumbnail == "" : thumbnail = params.get("thumbnail") plugintools.add_item( action="play" , title = '[COLOR white] ' + title + '[COLOR green] ['+ params.get("server") + '][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True ) else: title = plugintools.find_single_match(entry,'(.*?)\n') title = title.replace(": ", "") title = title.replace('"', "") title = title.replace(",", "") url = plugintools.find_single_match(entry,'"url":(.*?)\n') url = url.replace('"', "") url = url.strip() if url.find("allmyvideos")>= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") plugintools.add_item( action="allmyvideos" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) elif url.find("streamcloud") >= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") plugintools.add_item( action="streamcloud" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) elif url.find("played.to") >= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") plugintools.add_item( action="playedto" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) elif url.find("vidspot") >= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") if url.find("vk.com")>= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") if url.find("nowvideo")>= 0: url = url.replace(",", "") plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") plugintools.add_item( action="vidspot" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) else: # Canales no reproducibles en XBMC (de momento) params["url"]=url server_rtmp(params) plugintools.add_item( action="play" , title = '[COLOR red] ' + title + ' ['+ params.get("server") + '][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) if title == "": plugintools.log("url= "+url) fanart = params.get("extra") thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n') thumbnail = thumbnail.replace('"', "") thumbnail = thumbnail.replace(',', "") thumbnail = thumbnail.strip() plugintools.log("thumbnail= "+thumbnail) if thumbnail == "": thumbnail = params.get("thumbnail") def youtube_playlists(params): plugintools.log("[JavierTV-0.3.0].youtube_playlists "+repr(params)) data = plugintools.read( params.get("url") ) pattern = "" matches = plugintools.find_multiple_matches(data,"<entry(.*?)</entry>") for entry in matches: plugintools.log("entry="+entry) title = plugintools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>") plot = plugintools.find_single_match(entry,"<media\:descriptio[^>]+>([^<]+)</media\:description>") thumbnail = plugintools.find_single_match(entry,"<media\:thumbnail url='([^']+)'") url = plugintools.find_single_match(entry,"<content type\='application/atom\+xml\;type\=feed' src='([^']+)'/>") fanart = art + 'youtube.png' plugintools.add_item( action="youtube_videos" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , folder=True ) plugintools.log("fanart= "+fanart) # Muestra todos los vídeos del playlist de Youtube def youtube_videos(params): plugintools.log("[Javier-0.3.0].youtube_videos "+repr(params)) # Fetch video list from YouTube feed data = plugintools.read( params.get("url") ) plugintools.log("data= "+data) # Extract items from feed pattern = "" matches = plugintools.find_multiple_matches(data,"<entry(.*?)</entry>") for entry in matches: plugintools.log("entry="+entry) # Not the better way to parse XML, but clean and easy title = plugintools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>") title = title.replace("I Love Handball | ","") plot = plugintools.find_single_match(entry,"<summa[^>]+>([^<]+)</summa") thumbnail = plugintools.find_single_match(entry,"<media\:thumbnail url='([^']+)'") fanart = art+'youtube.png' video_id = plugintools.find_single_match(entry,"http\://www.youtube.com/watch\?v\=([0-9A-Za-z_-]{11})") url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+video_id # Appends a new item to the xbmc item list plugintools.add_item( action="play" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , isPlayable=True, folder=False ) def server_rtmp(params): plugintools.log("[JavierTV-0.3.0].server_rtmp " + repr(params)) url = params.get("url") plugintools.log("URL= "+url) if url.find("iguide.to") >= 0: params["server"] = 'iguide' return params if url.find("freetvcast.pw") >= 0: params["server"] = 'freetvcast' return params elif url.find("9stream") >= 0: params["server"] = '9stream' return params elif url.find("freebroadcast") >= 0: params["server"] = 'freebroadcast' return params elif url.find("goodgame.ru") >= 0: params["server"] = 'goodgame.ru' return params elif url.find("hdcast") >= 0: params["server"] = 'hdcast' return params elif url.find("sharecast") >= 0: params["server"] = 'sharecast' return params elif url.find("cast247") >= 0: params["server"] = 'cast247' return params elif url.find("castalba") >= 0: params["server"] = 'castalba' return params elif url.find("direct2watch") >= 0: params["server"] = 'direct2watch' return params elif url.find("vaughnlive") >= 0: params["server"] = 'vaughnlive' return params elif url.find("totalplay") >= 0: params["server"] = 'vaughnlive' return params elif url.find("shidurlive") >= 0: params["server"] = 'shidurlive' return params elif url.find("everyon") >= 0: params["server"] = 'everyon' return params elif url.find("iviplanet") >= 0: params["server"] = 'iviplanet' return params elif url.find("cxnlive") >= 0: params["server"] = 'cxnlive' return params elif url.find("ucaster") >= 0: params["server"] = 'ucaster' return params elif url.find("mediapro") >= 0: params["server"] = 'mediapro' return params elif url.find("veemi") >= 0: params["server"] = 'veemi' return params elif url.find("yukons.net") >= 0: params["server"] = 'yukons.net' return params elif url.find("janjua") >= 0: params["server"] = 'janjua' return params elif url.find("mips") >= 0: params["server"] = 'mips' return params elif url.find("zecast") >= 0: params["server"] = 'zecast' return params elif url.find("vertvdirecto") >= 0: params["server"] = 'vertvdirecto' return params elif url.find("9stream") >= 0: params["server"] = '9stream' return params elif url.find("filotv") >= 0: params["server"] = 'filotv' return params elif url.find("dinozap") >= 0: params["server"] = 'dinozap' return params elif url.find("ezcast") >= 0: params["server"] = 'ezcast' return params elif url.find("flashstreaming") >= 0: params["server"] = 'flashstreaming' return params elif url.find("shidurlive") >= 0: params["server"] = 'shidurlive' return params elif url.find("multistream") >= 0: params["server"] = 'multistream' return params elif url.find("playfooty") >= 0: params["server"] = 'playfooty' return params elif url.find("flashtv") >= 0: params["server"] = 'flashtv' return params elif url.find("04stream") >= 0: params["server"] = '04stream' return params elif url.find("vercosas") >= 0: params["server"] = 'vercosasgratis' return params elif url.find("dcast") >= 0: params["server"] = 'dcast' return params elif url.find("playfooty") >= 0: params["server"] = 'playfooty' return params elif url.find("pvtserverz") >= 0: params["server"] = 'pvtserverz' return params else: params["server"] = 'undefined' return params def launch_rtmp(params): plugintools.log("[JavierTV-0.3.0].launch_rtmp " + repr(params)) url = params.get("url") plugintools.log("URL= "+url) title = params.get("title") title = title.replace("[/COLOR]", "") title = title.strip() print title if title.endswith("[9stream]") == True: print '9stream' params["server"] = '9stream' ninestreams(params) elif title.endswith("[iguide]") == True: params["server"] = 'iguide' plugintools.play_resolved_url(url) elif title.endswith("[vercosasgratis]") == True: print 'vercosasgratis' params["server"] = 'vercosasgratis' vercosas(params) elif title.endswith("[freebroadcast]") == True: print 'freebroadcast' params["server"] = 'freebroadcast' freebroadcast(params) elif title.endswith("[ucaster]") == True: params["server"] = 'ucaster' plugintools.play_resolved_url(url) elif title.endswith("[direct2watch]") == True: params["server"] = 'direct2watch' directwatch(params) elif title.endswith("[shidurlive]") == True: params["server"] = 'shidurlive' shidurlive(params) elif title.endswith("[cast247]") == True: params["server"] = 'cast247' castdos(params) elif url.find("hdcast") >= 0: params["server"] = 'hdcast' plugintools.play_resolved_url(url) elif url.find("janjua") >= 0: params["server"] = 'janjua' plugintools.play_resolved_url(url) elif url.find("mips") >= 0: params["server"] = 'mips' plugintools.play_resolved_url(url) elif url.find("zecast") >= 0: params["server"] = 'zecast' plugintools.play_resolved_url(url) elif url.find("filotv") >= 0: params["server"] = 'filotv' print "filotv" plugintools.play_resolved_url(url) elif url.find("ezcast") >= 0: params["server"] = 'ezcast' plugintools.play_resolved_url(url) elif url.find("flashstreaming") >= 0: params["server"] = 'flashstreaming' plugintools.play_resolved_url(url) elif url.find("shidurlive") >= 0: params["server"] = 'shidurlive' plugintools.play_resolved_url(url) elif url.find("multistream") >= 0: params["server"] = 'multistream' print "multistream" plugintools.play_resolved_url(url) elif url.find("playfooty") >= 0: params["server"] = 'playfooty' plugintools.play_resolved_url(url) elif url.find("flashtv") >= 0: params["server"] = 'flashtv' print "flashtv" plugintools.play_resolved_url(url) elif url.find("freetvcast") >= 0: params["server"] = 'freetvcast' print "freetvcast" freetvcast(params) elif url.find("04stream") >= 0: params["server"] = '04stream' plugintools.play_resolved_url(url) elif url.find("sharecast") >= 0: params["server"] = 'sharecast' plugintools.play_resolved_url(url) elif url.find("vaughnlive") >= 0: params["server"] = 'vaughnlive' resolve_vaughnlive(params) elif url.find("goodcast") >= 0: params["server"] = 'goodcast' plugintools.play_resolved_url(url) elif url.find("dcast.tv") >= 0: params["server"] = 'dcast.tv' plugintools.play_resolved_url(url) elif url.find("castalba") >= 0: params["server"] = 'castalba' castalba(params) elif url.find("tutelehd.com") >= 0: params["server"] = 'tutelehd.com' plugintools.play_resolved_url(url) elif url.find("flexstream") >= 0: params["server"] = 'flexstream' plugintools.play_resolved_url(url) elif url.find("xxcast") >= 0: params["server"] = 'xxcast' plugintools.play_resolved_url(url) elif url.find("vipi.tv") >= 0: params["server"] = 'vipi.tv' plugintools.play_resolved_url(url) elif url.find("watchjsc") >= 0: params["server"] = 'watchjsc' plugintools.play_resolved_url(url) elif url.find("zenex.tv") >= 0: params["server"] = 'zenex.tv' plugintools.play_resolved_url(url) elif url.find("castto") >= 0: params["server"] = 'castto' plugintools.play_resolved_url(url) elif url.find("tvzune") >= 0: params["server"] = 'tvzune' plugintools.play_resolved_url(url) elif url.find("flashcast") >= 0: params["server"] = 'flashcast' plugintools.play_resolved_url(url) elif url.find("ilive.to") >= 0: params["server"] = 'ilive.to' print "iliveto" plugintools.play_resolved_url(url) elif url.find("Direct2Watch") >= 0: params["server"] = 'Direct2Watch' print "direct2watch" plugintools.play_resolved_url(url) else: params["server"] = 'undefined' print "ninguno" plugintools.play_resolved_url(url) def peliseries(params): plugintools.log("[JavierTV-0.3.0].peliseries " +repr(params)) # Abrimos archivo remoto url = params.get("url") filepelis = urllib2.urlopen(url) # Creamos archivo local para pegar las entradas plot = params.get("plot") plot = parser_title(plot) if plot == "": title = params.get("title") title = parser_title(title) filename = title + ".m3u" fh = open(playlists + filename, "wb") else: filename = params.get("plot") + ".m3u" fh = open(playlists + filename, "wb") plugintools.log("filename= "+filename) url = params.get("url") plugintools.log("url= "+url) #open the file for writing fw = open(playlists + filename, "wb") #open the file for writing fh = open(playlists + 'filepelis.m3u', "wb") fh.write(filepelis.read()) fh.close() fw = open(playlists + filename, "wb") fr = open(playlists + 'filepelis.m3u', "r") fr.seek(0) num_items = len(fr.readlines()) print num_items fw.seek(0) fr.seek(0) data = fr.readline() fanart = params.get("extra") thumbnail = params.get("thumbnail") fw.write('#EXTM3U:"background"='+fanart+',"thumbnail"='+thumbnail) fw.write("#EXTINF:-1,[COLOR lightyellow][I]playlists / " + filename + '[/I][/COLOR]' + '\n\n') i = 0 while i <= num_items: if data == "": data = fr.readline() data = data.strip() plugintools.log("data= " +data) i = i + 1 print i continue elif data.find("http") >= 0 : data = data.split("http") chapter = data[0] chapter = chapter.strip() url = "http" + data[1] url = url.strip() plugintools.log("url= "+url) fw.write("\n#EXTINF:-1," + chapter + '\n') fw.write(url + '\n\n') data = fr.readline() plugintools.log("data= " +data) i = i + 1 print i continue else: data = fr.readline() data = data.strip() plugintools.log("data= "+data) i = i + 1 print i continue fw.close() fr.close() params["ext"]='m3u' filename = filename.replace(".m3u", "") params["plot"]=filename params["title"]=filename # Capturamos de nuevo thumbnail y fanart os.remove(playlists + 'filepelis.m3u') simpletv_items(params) def tinyurl(params): plugintools.log("[JavierTV-0.3.0].tinyurl "+repr(params)) url = params.get("url") url_getlink = 'http://www.getlinkinfo.com/info?link=' +url plugintools.log("url_fixed= "+url_getlink) request_headers=[] request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"]) body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers) plugintools.log("data= "+body) r = plugintools.find_multiple_matches(body, '<dt class="link-effective-url">Effective URL</dt>(.*?)</a></dd>') xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('JavierTV', "Redireccionando enlace...", 3 , art+'icon.png')) for entry in r: entry = entry.replace("<dd><a href=", "") entry = entry.replace('rel="nofollow">', "") entry = entry.split('"') entry = entry[1] entry = entry.strip() plugintools.log("vamos= "+entry) if entry.startswith("http"): plugintools.play_resolved_url(entry) # Conexión con el servicio longURL.org para obtener URL original def longurl(params): plugintools.log("[JavierTV-0.3.0].longURL "+repr(params)) url = params.get("url") url_getlink = 'http://api.longurl.org/v2/expand?url=' +url plugintools.log("url_fixed= "+url_getlink) try: request_headers=[] request_headers.append(["User-Agent","Application-Name/3.7"]) body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers) plugintools.log("data= "+body) # <long-url><![CDATA[http://85.25.43.51:8080/DE_skycomedy?u=euorocard:p=besplatna]]></long-url> # xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Redireccionando enlace...", 3 , art+'icon.png')) longurl = plugintools.find_single_match(body, '<long-url>(.*?)</long-url>') longurl = longurl.replace("<![CDATA[", "") longurl = longurl.replace("]]>", "") plugintools.log("longURL= "+longurl) if longurl.startswith("http"): plugintools.play_resolved_url(longurl) except: play(params) def opentxt(self): texto = xbmcgui.ControlTextBox (100, 250, 300, 300, textColor='0xFFFFFFFF') texto.setText('log.txt') texto.setVisible(window) def arenavision_parser(params): plugintools.log("[JavierTV-0.3.0].arenavision_parser "+repr(params)) url = params.get("url") thumbnail = params.get("thumbnail") title = params.get("title") plugintools.log("title= "+title) data = plugintools.read(url) plugintools.add_item(action="" , title=title, url=url, thumbnail=thumbnail , fanart='http://wallpaper-download.net/wallpapers/football-wallpapers-football-stadium-wallpaper-wallpaper-36537.jpg' , folder = False, isPlayable = False) params["fanart"]=fanart plugintools.log("fanart= "+fanart) matches = plugintools.find_multiple_matches(data, '<li><a href=(.*?)>(.*?)</a></li>') for url, title in matches: url = url.replace("'", "") if title.startswith("AV") == True: parse_av_channel(title, url, params) def parse_av_channel(title, url, params): plugintools.log("[JavierTV-0.3.0].parse_av_channel "+repr(params)) data = plugintools.read(url) fanart = params.get("fanart") plugintools.log("fanart= "+fanart) thumbnail = params.get("thumbnail") url = plugintools.find_single_match(data, 'sop://(.*?)>') url = url.replace('"', "") url = 'sop://' + url url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=2&name=' + title plugintools.add_item(action="play" , title=title, url=url, thumbnail=thumbnail , fanart='http://wallpaper-download.net/wallpapers/football-wallpapers-football-stadium-wallpaper-wallpaper-36537.jpg' , folder = False, isPlayable = True) def encode_url(url): url_fixed= urlencode(url) print url_fixed def seriecatcher(params): plugintools.log("[JavierTV-0.3.0].seriecatcher "+repr(params)) url = params.get("url") fanart = params.get("extra") data = plugintools.read(url) temp = plugintools.find_multiple_matches(data, '<i class=\"glyphicon\"></i>(.*?)</a>') SelectTemp(params, temp) def GetSerieChapters(params): plugintools.log("[JavierTV-0.3.0].GetSerieChapters "+repr(params)) season = params.get("season") data = plugintools.read(params.get("url")) season = plugintools.find_multiple_matches(data, season + '(.*?)</table>') season = season[0] for entry in season: url_cap = plugintools.find_multiple_matches(season, '<a href=\"/capitulo(.*?)\" class=\"color4\"') title = plugintools.find_multiple_matches(season, 'class=\"color4\">(.*?)</a>') num_items = len(url_cap) i = 1 while i <= num_items: url_cap_fixed = 'http://seriesadicto.com/capitulo/' + url_cap[i-1] title_fixed = title[i-1] fanart = params.get("extra") GetSerieLinks(fanart , url_cap_fixed, i, title_fixed) i = i + 1 def GetSerieLinks(fanart , url_cap_fixed, i, title_fixed): plugintools.log("[JavierTV-0.3.0].GetSerieLinks") data = plugintools.read(url_cap_fixed) amv = plugintools.find_multiple_matches(data, 'allmyvideos.net/(.*?)"') strcld = plugintools.find_multiple_matches(data, 'streamcloud.eu/(.*?)"') vdspt = plugintools.find_multiple_matches(data, 'vidspot.net/(.*?)"') plydt = plugintools.find_multiple_matches(data, 'played.to/(.*?)"') thumbnail = plugintools.find_single_match(data, 'src=\"/img/series/(.*?)"') thumbnail_fixed = 'http://seriesadicto.com/img/series/' + thumbnail for entry in amv: amv_url = 'http://allmyvideos.net/' + entry plugintools.add_item(action="play" , title = title_fixed + '[COLOR lightyellow] [Allmyvideos][/COLOR]', url = amv_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True) for entry in strcld: strcld_url = 'http://streamcloud.eu/' + entry plugintools.add_item(action="play" , title = title_fixed + '[COLOR lightskyblue] [Streamcloud][/COLOR]', url = strcld_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True) for entry in vdspt: vdspt_url = 'http://vidspot.net/' + entry plugintools.add_item(action="play" , title = title_fixed + '[COLOR palegreen] [Vidspot][/COLOR]', url = vdspt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True) for entry in plydt: plydt_url = 'http://played.to/' + entry plugintools.add_item(action="play" , title = title_fixed + '[COLOR lavender] [Played.to][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True) def SelectTemp(params, temp): plugintools.log("[JavierTV-0.3.0].SelectTemp "+repr(params)) seasons = len(temp) dialog = xbmcgui.Dialog() if seasons == 1: selector = dialog.select('JavierTV', [temp[0]]) if seasons == 2: selector = dialog.select(Javier'TV', [temp[0], temp[1]]) if seasons == 3: selector = dialog.select('JavierTV', [temp[0],temp[1], temp[2]]) if seasons == 4: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3]]) if seasons == 5: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3], temp[4]]) if seasons == 6: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5]]) if seasons == 7: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6]]) if seasons == 8: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7]]) if seasons == 9: selector = dialog.select('JavierTV', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8]]) if seasons == 10: selector = dialog.select(Javier'TV', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8], temp[9]]) i = 0 while i<= seasons : if selector == i: params["season"] = temp[i] GetSerieChapters(params) i = i + 1 def m3u_items(title): plugintools.log("[JavierTV-0.3.0].m3u_items= "+title) thumbnail = art + 'icon.png' fanart = art + 'fanart.jpg' only_title = title if title.find("tvg-logo") >= 0: thumbnail = re.compile('tvg-logo="(.*?)"').findall(title) num_items = len(thumbnail) print 'num_items',num_items if num_items == 0: thumbnail = 'm3u.png' else: thumbnail = thumbnail[0] #plugintools.log("thumbnail= "+thumbnail) only_title = only_title.replace('tvg-logo="', "") only_title = only_title.replace(thumbnail, "") if title.find("tvg-wall") >= 0: fanart = re.compile('tvg-wall="(.*?)"').findall(title) fanart = fanart[0] only_title = only_title.replace('tvg-wall="', "") only_title = only_title.replace(fanart, "") if title.find("group-title") >= 0: cat = re.compile('group-title="(.*?)"').findall(title) if len(cat) == 0: cat = "" else: cat = cat[0] plugintools.log("m3u_categoria= "+cat) only_title = only_title.replace('group-title=', "") only_title = only_title.replace(cat, "") else: cat = "" if title.find("tvg-id") >= 0: title = title.replace('”', '"') title = title.replace('“', '"') tvgid = re.compile('tvg-id="(.*?)"').findall(title) print 'tvgid',tvgid tvgid = tvgid[0] plugintools.log("m3u_categoria= "+tvgid) only_title = only_title.replace('tvg-id=', "") only_title = only_title.replace(tvgid, "") else: tvgid = "" if title.find("tvg-name") >= 0: tvgname = re.compile('tvg-name="(.*?)').findall(title) tvgname = tvgname[0] plugintools.log("m3u_categoria= "+tvgname) only_title = only_title.replace('tvg-name=', "") only_title = only_title.replace(tvgname, "") else: tvgname = "" only_title = only_title.replace('"', "") #plugintools.log("m3u_thumbnail= "+thumbnail) #plugintools.log("m3u_fanart= "+fanart) #plugintools.log("only_title= "+only_title) return thumbnail, fanart, cat, only_title, tvgid, tvgname def xml_skin(): plugintools.log("[JavierTV-0.3.0].xml_skin") mastermenu = plugintools.get_setting("mastermenu") xmlmaster = plugintools.get_setting("xmlmaster") SelectXMLmenu = plugintools.get_setting("SelectXMLmenu") # values="Juarrox|Reig|Simple|Sebas|Pastebin|Personalizado" if xmlmaster == 'true': if SelectXMLmenu == '0': mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' plugintools.log("[Javier.xml_skin: "+SelectXMLmenu) # Control para ver la intro de JavierTV ver_intro = plugintools.get_setting("ver_intro") if ver_intro == "true": xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4') elif SelectXMLmenu == '1': mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' plugintools.log("[JavierTV.xml_skin: "+SelectXMLmenu) # Control para ver la intro de Reig ver_intro = plugintools.get_setting("ver_intro") if ver_intro == "true": xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'Reig.mp4') elif SelectXMLmenu == '2': mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' plugintools.log("[JavierTV.xml_skin: "+SelectXMLmenu) elif SelectXMLmenu == '3': mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' plugintools.log("[JavierTV.xml_skin: "+SelectXMLmenu) # Control para ver la intro de Sebas ver_intro = plugintools.get_setting("ver_intro") if ver_intro == "true": xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'Sebas.mp4') elif SelectXMLmenu == '4': # Pastebin id_pastebin = plugintools.get_setting("id_pastebin") if id_pastebin == "": plugintools.log("[JavierTV.xml_skin: No definido") mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' else: mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' +id_pastebin plugintools.log("[Javier.xml_skin: "+mastermenu) elif SelectXMLmenu == '5': # Skin personalizado if mastermenu == "": plugintools.log("[Javier.xml_skin: No definido") mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' # Control para ver la intro de JavierTV ver_intro = plugintools.get_setting("ver_intro") if ver_intro == "true": xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4') else: mastermenu = SelectXMLmenu else: # xmlmaster = False (no activado), menú por defecto mastermenu = 'http://pastebin.com/raw.php?i=5piRTXuq' # Control para ver la intro deJavier TV ver_intro = plugintools.get_setting("ver_intro") if ver_intro == "true": xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4') return mastermenu run()
javier3407/Plugin.Video.Eljavitv.iptv
default.py
Python
gpl-3.0
200,215
import readline class completer: def __init__( self, cmds, jids ): readline.set_completer( self.complete ) readline.parse_and_bind("tab: complete") self.cmds = cmds self.jids = jids def set_jids( self, jids ): self.jids = jids def complete( self, word, state, choices=None ): line = readline.get_line_buffer() if choices == None: # initial call choices = self.cmds if line.strip() == '': choice = choices[state] return choice cmds = [ s for s in choices if s.startswith( word ) ] if len( cmds ) == 1 and state == 0: return cmds[0] + ' ' elif line.endswith( ' ' ): pass else: return cmds[state]
hanzz/spectrum
spectrumctl/spectrum/completer.py
Python
gpl-2.0
652
#!/usr/bin/env python3 import sys from makediary.DiaryInfo import DiaryInfo from makediary.DSC import preamble, postamble from makediary.PostscriptPage import PostscriptPage class ImageFilePage(PostscriptPage): """A page whose content is made up of an image file.""" def __init__(self, dinfo, imgfilename, imagetitle=None, left=0.0, right=1.0): PostscriptPage.__init__(self, dinfo) self.imgfilename = imgfilename self.imagetitle = imagetitle assert left >= 0.0 assert right > 0.0 assert left < 1.0 assert right <= 1.0 assert left < right assert (right - left) > 0.1 self.left = left self.right = right def body(self): imgfilepathname = None # If we are given a full or relative-to-pwd path to the file, use that. if self.imgfilename.startswith('/') or self.imgfilename.startswith('./') \ or self.imgfilename.startswith('../'): imgfilepathname = self.imgfilename else: # Otherwise, construct the full path to the file. If we are running from the # development directory, or otherwise not from a full path name, look at relative # locations first. if self.di.myname.startswith('.'): searchpath = ['.', '..', '../..'] for p in sys.path: searchpath.append(p) else: searchpath = sys.path #print >>sys.stderr, "searchpath is %s" % str(searchpath) for path in searchpath: imgfilepathname = self.searchfor(path, 'image', self.imgfilename) if imgfilepathname: break if imgfilepathname: inset = self.pWidth / 200.0 sclip = "newpath %5.3f %5.3f %5.3f %5.3f rectclip\n" % \ (self.pLeft, self.pBottom, self.pWidth, self.pHeight) # Calculate the bounds of the entire image. The image will be clipped to the page # layout boundaries. w = self.pWidth / (self.right - self.left) - 2*inset x = self.pLeft - w * self.left + inset y = self.pBottom + inset h = self.pHeight - 2*inset imgp = self.image(imgfilepathname, x, y, w, h) if self.imagetitle: return self.title(self.imagetitle) + sclip + imgp else: return sclip + imgp else: print("Can't find %s" % self.imgfilename, file=sys.stderr) return "%% -- Can't find %s\n" % self.imgfilename class TwoImageFilePages: """Two pages whose content is made up of two halves of one image file.""" def __init__(self, dinfo, imgfilename, imagetitle=None, coverage=0.5): self.dinfo = dinfo self.imgfilename = imgfilename self.imagetitle = imagetitle self.coverage = coverage def page(self): s = '' s = s + ImageFilePage(self.dinfo, self.imgfilename, self.imagetitle, 0.0, self.coverage).page() s = s + ImageFilePage(self.dinfo, self.imgfilename, self.imagetitle, 1.0-self.coverage, 1.0).page() return s if __name__ == '__main__': di = DiaryInfo(sys.argv[0], sys.argv[1:]) print(preamble(di)) print(ImageFilePage(di, './makediary-qrcode.png', './makediary-qrcode.png').page()) print(TwoImageFilePages(di, './makediary-qrcode.png', './makediary-qrcode.png').page()) print(postamble(di))
russells/makediary
makediary/ImageFilePages.py
Python
gpl-2.0
3,593
#!/usr/bin/python # -*- coding: utf8 -*- #from qt import * from PyQt4 import QtCore, QtGui #Todo: gérer le cas où n sélectionne un élément pour le changer de couleur #class MyQListBoxText(QListBoxText): # color = {'OK':QColor(0,170,127),'NOOK':QColor(170,2,86),'BADCOMPRESSION':QColor('lightgray'),'SELECTED':QColor('lightgray')} # state = 'NOOK' # def paint(self, arg2): # colorAppliquee = self.color[self.state] # arg2.setBackgroundColor(colorAppliquee) # arg2.setPen(colorAppliquee) # QListBoxText.paint(self,arg2) # def setState(self,state): # if state in self.color.keys(): # self.state = state class MyQListBoxText(QtGui.QListWidgetItem): color = {'OK':QtGui.QColor(0,170,127),'NOOK':QtGui.QColor(170,2,86),'BADCOMPRESSION':QtGui.QColor('lightgray')} state = 'NOOK' def paintEvent(self, event): print "PaintEvent" painter = QtGui.QPainter() colorAppliquee = self.color[self.state] painter.setBackgroundColor(colorAppliquee) painter.setPen(colorAppliquee) QListWidget.paintEvent(self,event) def setState(self,state): if state in self.color.keys(): self.state = state self.setTextColor(self.color[self.state])
naparuba/pyndsgest
myQListBoxText.py
Python
gpl-2.0
1,300
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Tests for the command-line interface. """ from __future__ import division, absolute_import, print_function import os import shutil import re import subprocess import platform from copy import deepcopy import six import unittest from mock import patch, Mock from test import _common from test.helper import capture_stdout, has_program, TestHelper, control_stdin from beets import library from beets import ui from beets.ui import commands from beets import autotag from beets.autotag.match import distance from beets.mediafile import MediaFile from beets import config from beets import plugins from beets.util.confit import ConfigError from beets import util from beets.util import syspath class ListTest(unittest.TestCase): def setUp(self): self.lib = library.Library(':memory:') self.item = _common.item() self.item.path = 'xxx/yyy' self.lib.add(self.item) self.lib.add_album([self.item]) def _run_list(self, query=u'', album=False, path=False, fmt=u''): with capture_stdout() as stdout: commands.list_items(self.lib, query, album, fmt) return stdout def test_list_outputs_item(self): stdout = self._run_list() self.assertIn(u'the title', stdout.getvalue()) def test_list_unicode_query(self): self.item.title = u'na\xefve' self.item.store() self.lib._connection().commit() stdout = self._run_list([u'na\xefve']) out = stdout.getvalue() if six.PY2: out = out.decode(stdout.encoding) self.assertTrue(u'na\xefve' in out) def test_list_item_path(self): stdout = self._run_list(fmt=u'$path') self.assertEqual(stdout.getvalue().strip(), u'xxx/yyy') def test_list_album_outputs_something(self): stdout = self._run_list(album=True) self.assertGreater(len(stdout.getvalue()), 0) def test_list_album_path(self): stdout = self._run_list(album=True, fmt=u'$path') self.assertEqual(stdout.getvalue().strip(), u'xxx') def test_list_album_omits_title(self): stdout = self._run_list(album=True) self.assertNotIn(u'the title', stdout.getvalue()) def test_list_uses_track_artist(self): stdout = self._run_list() self.assertIn(u'the artist', stdout.getvalue()) self.assertNotIn(u'the album artist', stdout.getvalue()) def test_list_album_uses_album_artist(self): stdout = self._run_list(album=True) self.assertNotIn(u'the artist', stdout.getvalue()) self.assertIn(u'the album artist', stdout.getvalue()) def test_list_item_format_artist(self): stdout = self._run_list(fmt=u'$artist') self.assertIn(u'the artist', stdout.getvalue()) def test_list_item_format_multiple(self): stdout = self._run_list(fmt=u'$artist - $album - $year') self.assertEqual(u'the artist - the album - 0001', stdout.getvalue().strip()) def test_list_album_format(self): stdout = self._run_list(album=True, fmt=u'$genre') self.assertIn(u'the genre', stdout.getvalue()) self.assertNotIn(u'the album', stdout.getvalue()) class RemoveTest(_common.TestCase): def setUp(self): super(RemoveTest, self).setUp() self.io.install() self.libdir = os.path.join(self.temp_dir, b'testlibdir') os.mkdir(self.libdir) # Copy a file into the library. self.lib = library.Library(':memory:', self.libdir) item_path = os.path.join(_common.RSRC, b'full.mp3') self.i = library.Item.from_path(item_path) self.lib.add(self.i) self.i.move(True) def test_remove_items_no_delete(self): self.io.addinput('y') commands.remove_items(self.lib, u'', False, False, False) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertTrue(os.path.exists(self.i.path)) def test_remove_items_with_delete(self): self.io.addinput('y') commands.remove_items(self.lib, u'', False, True, False) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertFalse(os.path.exists(self.i.path)) def test_remove_items_with_force_no_delete(self): commands.remove_items(self.lib, u'', False, False, True) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertTrue(os.path.exists(self.i.path)) def test_remove_items_with_force_delete(self): commands.remove_items(self.lib, u'', False, True, True) items = self.lib.items() self.assertEqual(len(list(items)), 0) self.assertFalse(os.path.exists(self.i.path)) class ModifyTest(unittest.TestCase, TestHelper): def setUp(self): self.setup_beets() self.album = self.add_album_fixture() [self.item] = self.album.items() def tearDown(self): self.teardown_beets() def modify_inp(self, inp, *args): with control_stdin(inp): self.run_command('modify', *args) def modify(self, *args): self.modify_inp('y', *args) # Item tests def test_modify_item(self): self.modify(u"title=newTitle") item = self.lib.items().get() self.assertEqual(item.title, u'newTitle') def test_modify_item_abort(self): item = self.lib.items().get() title = item.title self.modify_inp('n', u"title=newTitle") item = self.lib.items().get() self.assertEqual(item.title, title) def test_modify_item_no_change(self): title = u"Tracktitle" item = self.add_item_fixture(title=title) self.modify_inp('y', u"title", u"title={0}".format(title)) item = self.lib.items(title).get() self.assertEqual(item.title, title) def test_modify_write_tags(self): self.modify(u"title=newTitle") item = self.lib.items().get() item.read() self.assertEqual(item.title, u'newTitle') def test_modify_dont_write_tags(self): self.modify(u"--nowrite", u"title=newTitle") item = self.lib.items().get() item.read() self.assertNotEqual(item.title, 'newTitle') def test_move(self): self.modify(u"title=newTitle") item = self.lib.items().get() self.assertIn(b'newTitle', item.path) def test_not_move(self): self.modify(u"--nomove", u"title=newTitle") item = self.lib.items().get() self.assertNotIn(b'newTitle', item.path) def test_no_write_no_move(self): self.modify(u"--nomove", u"--nowrite", u"title=newTitle") item = self.lib.items().get() item.read() self.assertNotIn(b'newTitle', item.path) self.assertNotEqual(item.title, u'newTitle') def test_update_mtime(self): item = self.item old_mtime = item.mtime self.modify(u"title=newTitle") item.load() self.assertNotEqual(old_mtime, item.mtime) self.assertEqual(item.current_mtime(), item.mtime) def test_reset_mtime_with_no_write(self): item = self.item self.modify(u"--nowrite", u"title=newTitle") item.load() self.assertEqual(0, item.mtime) def test_selective_modify(self): title = u"Tracktitle" album = u"album" original_artist = u"composer" new_artist = u"coverArtist" for i in range(0, 10): self.add_item_fixture(title=u"{0}{1}".format(title, i), artist=original_artist, album=album) self.modify_inp('s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn', title, u"artist={0}".format(new_artist)) original_items = self.lib.items(u"artist:{0}".format(original_artist)) new_items = self.lib.items(u"artist:{0}".format(new_artist)) self.assertEqual(len(list(original_items)), 3) self.assertEqual(len(list(new_items)), 7) # Album Tests def test_modify_album(self): self.modify(u"--album", u"album=newAlbum") album = self.lib.albums().get() self.assertEqual(album.album, u'newAlbum') def test_modify_album_write_tags(self): self.modify(u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() self.assertEqual(item.album, u'newAlbum') def test_modify_album_dont_write_tags(self): self.modify(u"--album", u"--nowrite", u"album=newAlbum") item = self.lib.items().get() item.read() self.assertEqual(item.album, u'the album') def test_album_move(self): self.modify(u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() self.assertIn(b'newAlbum', item.path) def test_album_not_move(self): self.modify(u"--nomove", u"--album", u"album=newAlbum") item = self.lib.items().get() item.read() self.assertNotIn(b'newAlbum', item.path) # Misc def test_write_initial_key_tag(self): self.modify(u"initial_key=C#m") item = self.lib.items().get() mediafile = MediaFile(syspath(item.path)) self.assertEqual(mediafile.initial_key, u'C#m') def test_set_flexattr(self): self.modify(u"flexattr=testAttr") item = self.lib.items().get() self.assertEqual(item.flexattr, u'testAttr') def test_remove_flexattr(self): item = self.lib.items().get() item.flexattr = u'testAttr' item.store() self.modify(u"flexattr!") item = self.lib.items().get() self.assertNotIn(u"flexattr", item) @unittest.skip(u'not yet implemented') def test_delete_initial_key_tag(self): item = self.lib.items().get() item.initial_key = u'C#m' item.write() item.store() mediafile = MediaFile(syspath(item.path)) self.assertEqual(mediafile.initial_key, u'C#m') self.modify(u"initial_key!") mediafile = MediaFile(syspath(item.path)) self.assertIsNone(mediafile.initial_key) def test_arg_parsing_colon_query(self): (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle", u"title=newTitle"]) self.assertEqual(query, [u"title:oldTitle"]) self.assertEqual(mods, {"title": u"newTitle"}) def test_arg_parsing_delete(self): (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle", u"title!"]) self.assertEqual(query, [u"title:oldTitle"]) self.assertEqual(dels, ["title"]) def test_arg_parsing_query_with_exclaimation(self): (query, mods, dels) = commands.modify_parse_args([u"title:oldTitle!", u"title=newTitle!"]) self.assertEqual(query, [u"title:oldTitle!"]) self.assertEqual(mods, {"title": u"newTitle!"}) def test_arg_parsing_equals_in_value(self): (query, mods, dels) = commands.modify_parse_args([u"title:foo=bar", u"title=newTitle"]) self.assertEqual(query, [u"title:foo=bar"]) self.assertEqual(mods, {"title": u"newTitle"}) class WriteTest(unittest.TestCase, TestHelper): def setUp(self): self.setup_beets() def tearDown(self): self.teardown_beets() def write_cmd(self, *args): return self.run_with_output('write', *args) def test_update_mtime(self): item = self.add_item_fixture() item['title'] = u'a new title' item.store() item = self.lib.items().get() self.assertEqual(item.mtime, 0) self.write_cmd() item = self.lib.items().get() self.assertEqual(item.mtime, item.current_mtime()) def test_non_metadata_field_unchanged(self): """Changing a non-"tag" field like `bitrate` and writing should have no effect. """ # An item that starts out "clean". item = self.add_item_fixture() item.read() # ... but with a mismatched bitrate. item.bitrate = 123 item.store() output = self.write_cmd() self.assertEqual(output, '') def test_write_metadata_field(self): item = self.add_item_fixture() item.read() old_title = item.title item.title = u'new title' item.store() output = self.write_cmd() self.assertTrue(u'{0} -> new title'.format(old_title) in output) class MoveTest(_common.TestCase): def setUp(self): super(MoveTest, self).setUp() self.io.install() self.libdir = os.path.join(self.temp_dir, b'testlibdir') os.mkdir(self.libdir) self.itempath = os.path.join(self.libdir, b'srcfile') shutil.copy(os.path.join(_common.RSRC, b'full.mp3'), self.itempath) # Add a file to the library but don't copy it in yet. self.lib = library.Library(':memory:', self.libdir) self.i = library.Item.from_path(self.itempath) self.lib.add(self.i) self.album = self.lib.add_album([self.i]) # Alternate destination directory. self.otherdir = os.path.join(self.temp_dir, b'testotherdir') def _move(self, query=(), dest=None, copy=False, album=False, pretend=False): commands.move_items(self.lib, dest, query, copy, album, pretend) def test_move_item(self): self._move() self.i.load() self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_copy_item(self): self._move(copy=True) self.i.load() self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertExists(self.itempath) def test_move_album(self): self._move(album=True) self.i.load() self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_copy_album(self): self._move(copy=True, album=True) self.i.load() self.assertTrue(b'testlibdir' in self.i.path) self.assertExists(self.i.path) self.assertExists(self.itempath) def test_move_item_custom_dir(self): self._move(dest=self.otherdir) self.i.load() self.assertTrue(b'testotherdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_move_album_custom_dir(self): self._move(dest=self.otherdir, album=True) self.i.load() self.assertTrue(b'testotherdir' in self.i.path) self.assertExists(self.i.path) self.assertNotExists(self.itempath) def test_pretend_move_item(self): self._move(dest=self.otherdir, pretend=True) self.i.load() self.assertIn(b'srcfile', self.i.path) def test_pretend_move_album(self): self._move(album=True, pretend=True) self.i.load() self.assertIn(b'srcfile', self.i.path) class UpdateTest(_common.TestCase): def setUp(self): super(UpdateTest, self).setUp() self.io.install() self.libdir = os.path.join(self.temp_dir, b'testlibdir') # Copy a file into the library. self.lib = library.Library(':memory:', self.libdir) item_path = os.path.join(_common.RSRC, b'full.mp3') self.i = library.Item.from_path(item_path) self.lib.add(self.i) self.i.move(True) self.album = self.lib.add_album([self.i]) # Album art. artfile = os.path.join(self.temp_dir, b'testart.jpg') _common.touch(artfile) self.album.set_art(artfile) self.album.store() os.remove(artfile) def _update(self, query=(), album=False, move=False, reset_mtime=True, fields=None): self.io.addinput('y') if reset_mtime: self.i.mtime = 0 self.i.store() commands.update_items(self.lib, query, album, move, False, fields=fields) def test_delete_removes_item(self): self.assertTrue(list(self.lib.items())) os.remove(self.i.path) self._update() self.assertFalse(list(self.lib.items())) def test_delete_removes_album(self): self.assertTrue(self.lib.albums()) os.remove(self.i.path) self._update() self.assertFalse(self.lib.albums()) def test_delete_removes_album_art(self): artpath = self.album.artpath self.assertExists(artpath) os.remove(self.i.path) self._update() self.assertNotExists(artpath) def test_modified_metadata_detected(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.save() self._update() item = self.lib.items().get() self.assertEqual(item.title, u'differentTitle') def test_modified_metadata_moved(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.save() self._update(move=True) item = self.lib.items().get() self.assertTrue(b'differentTitle' in item.path) def test_modified_metadata_not_moved(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.save() self._update(move=False) item = self.lib.items().get() self.assertTrue(b'differentTitle' not in item.path) def test_selective_modified_metadata_moved(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.genre = u'differentGenre' mf.save() self._update(move=True, fields=['title']) item = self.lib.items().get() self.assertTrue(b'differentTitle' in item.path) self.assertNotEqual(item.genre, u'differentGenre') def test_selective_modified_metadata_not_moved(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.genre = u'differentGenre' mf.save() self._update(move=False, fields=['title']) item = self.lib.items().get() self.assertTrue(b'differentTitle' not in item.path) self.assertNotEqual(item.genre, u'differentGenre') def test_modified_album_metadata_moved(self): mf = MediaFile(syspath(self.i.path)) mf.album = u'differentAlbum' mf.save() self._update(move=True) item = self.lib.items().get() self.assertTrue(b'differentAlbum' in item.path) def test_modified_album_metadata_art_moved(self): artpath = self.album.artpath mf = MediaFile(syspath(self.i.path)) mf.album = u'differentAlbum' mf.save() self._update(move=True) album = self.lib.albums()[0] self.assertNotEqual(artpath, album.artpath) def test_selective_modified_album_metadata_moved(self): mf = MediaFile(syspath(self.i.path)) mf.album = u'differentAlbum' mf.genre = u'differentGenre' mf.save() self._update(move=True, fields=['album']) item = self.lib.items().get() self.assertTrue(b'differentAlbum' in item.path) self.assertNotEqual(item.genre, u'differentGenre') def test_selective_modified_album_metadata_not_moved(self): mf = MediaFile(syspath(self.i.path)) mf.album = u'differentAlbum' mf.genre = u'differentGenre' mf.save() self._update(move=True, fields=['genre']) item = self.lib.items().get() self.assertTrue(b'differentAlbum' not in item.path) self.assertEqual(item.genre, u'differentGenre') def test_mtime_match_skips_update(self): mf = MediaFile(syspath(self.i.path)) mf.title = u'differentTitle' mf.save() # Make in-memory mtime match on-disk mtime. self.i.mtime = os.path.getmtime(self.i.path) self.i.store() self._update(reset_mtime=False) item = self.lib.items().get() self.assertEqual(item.title, u'full') class PrintTest(_common.TestCase): def setUp(self): super(PrintTest, self).setUp() self.io.install() def test_print_without_locale(self): lang = os.environ.get('LANG') if lang: del os.environ['LANG'] try: ui.print_(u'something') except TypeError: self.fail(u'TypeError during print') finally: if lang: os.environ['LANG'] = lang def test_print_with_invalid_locale(self): old_lang = os.environ.get('LANG') os.environ['LANG'] = '' old_ctype = os.environ.get('LC_CTYPE') os.environ['LC_CTYPE'] = 'UTF-8' try: ui.print_(u'something') except ValueError: self.fail(u'ValueError during print') finally: if old_lang: os.environ['LANG'] = old_lang else: del os.environ['LANG'] if old_ctype: os.environ['LC_CTYPE'] = old_ctype else: del os.environ['LC_CTYPE'] class ImportTest(_common.TestCase): def test_quiet_timid_disallowed(self): config['import']['quiet'] = True config['import']['timid'] = True self.assertRaises(ui.UserError, commands.import_files, None, [], None) @_common.slow_test() class ConfigTest(unittest.TestCase, TestHelper, _common.Assertions): def setUp(self): self.setup_beets() # Don't use the BEETSDIR from `helper`. Instead, we point the home # directory there. Some tests will set `BEETSDIR` themselves. del os.environ['BEETSDIR'] self._old_home = os.environ.get('HOME') os.environ['HOME'] = util.py3_path(self.temp_dir) # Also set APPDATA, the Windows equivalent of setting $HOME. self._old_appdata = os.environ.get('APPDATA') os.environ['APPDATA'] = \ util.py3_path(os.path.join(self.temp_dir, b'AppData', b'Roaming')) self._orig_cwd = os.getcwd() self.test_cmd = self._make_test_cmd() commands.default_commands.append(self.test_cmd) # Default user configuration if platform.system() == 'Windows': self.user_config_dir = os.path.join( self.temp_dir, b'AppData', b'Roaming', b'beets' ) else: self.user_config_dir = os.path.join( self.temp_dir, b'.config', b'beets' ) os.makedirs(self.user_config_dir) self.user_config_path = os.path.join(self.user_config_dir, b'config.yaml') # Custom BEETSDIR self.beetsdir = os.path.join(self.temp_dir, b'beetsdir') os.makedirs(self.beetsdir) self._reset_config() def tearDown(self): commands.default_commands.pop() os.chdir(self._orig_cwd) if self._old_home is not None: os.environ['HOME'] = self._old_home if self._old_appdata is None: del os.environ['APPDATA'] else: os.environ['APPDATA'] = self._old_appdata self.teardown_beets() def _make_test_cmd(self): test_cmd = ui.Subcommand('test', help=u'test') def run(lib, options, args): test_cmd.lib = lib test_cmd.options = options test_cmd.args = args test_cmd.func = run return test_cmd def _reset_config(self): # Config should read files again on demand config.clear() config._materialized = False def write_config_file(self): return open(self.user_config_path, 'w') def test_paths_section_respected(self): with self.write_config_file() as config: config.write('paths: {x: y}') self.run_command('test', lib=None) key, template = self.test_cmd.lib.path_formats[0] self.assertEqual(key, 'x') self.assertEqual(template.original, 'y') def test_default_paths_preserved(self): default_formats = ui.get_path_formats() self._reset_config() with self.write_config_file() as config: config.write('paths: {x: y}') self.run_command('test', lib=None) key, template = self.test_cmd.lib.path_formats[0] self.assertEqual(key, 'x') self.assertEqual(template.original, 'y') self.assertEqual(self.test_cmd.lib.path_formats[1:], default_formats) def test_nonexistant_db(self): with self.write_config_file() as config: config.write('library: /xxx/yyy/not/a/real/path') with self.assertRaises(ui.UserError): self.run_command('test', lib=None) def test_user_config_file(self): with self.write_config_file() as file: file.write('anoption: value') self.run_command('test', lib=None) self.assertEqual(config['anoption'].get(), 'value') def test_replacements_parsed(self): with self.write_config_file() as config: config.write("replace: {'[xy]': z}") self.run_command('test', lib=None) replacements = self.test_cmd.lib.replacements self.assertEqual(replacements, [(re.compile(u'[xy]'), 'z')]) def test_multiple_replacements_parsed(self): with self.write_config_file() as config: config.write("replace: {'[xy]': z, foo: bar}") self.run_command('test', lib=None) replacements = self.test_cmd.lib.replacements self.assertEqual(replacements, [ (re.compile(u'[xy]'), u'z'), (re.compile(u'foo'), u'bar'), ]) def test_cli_config_option(self): config_path = os.path.join(self.temp_dir, b'config.yaml') with open(config_path, 'w') as file: file.write('anoption: value') self.run_command('--config', config_path, 'test', lib=None) self.assertEqual(config['anoption'].get(), 'value') def test_cli_config_file_overwrites_user_defaults(self): with open(self.user_config_path, 'w') as file: file.write('anoption: value') cli_config_path = os.path.join(self.temp_dir, b'config.yaml') with open(cli_config_path, 'w') as file: file.write('anoption: cli overwrite') self.run_command('--config', cli_config_path, 'test', lib=None) self.assertEqual(config['anoption'].get(), 'cli overwrite') def test_cli_config_file_overwrites_beetsdir_defaults(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) env_config_path = os.path.join(self.beetsdir, b'config.yaml') with open(env_config_path, 'w') as file: file.write('anoption: value') cli_config_path = os.path.join(self.temp_dir, b'config.yaml') with open(cli_config_path, 'w') as file: file.write('anoption: cli overwrite') self.run_command('--config', cli_config_path, 'test', lib=None) self.assertEqual(config['anoption'].get(), 'cli overwrite') # @unittest.skip('Difficult to implement with optparse') # def test_multiple_cli_config_files(self): # cli_config_path_1 = os.path.join(self.temp_dir, b'config.yaml') # cli_config_path_2 = os.path.join(self.temp_dir, b'config_2.yaml') # # with open(cli_config_path_1, 'w') as file: # file.write('first: value') # # with open(cli_config_path_2, 'w') as file: # file.write('second: value') # # self.run_command('--config', cli_config_path_1, # '--config', cli_config_path_2, 'test', lib=None) # self.assertEqual(config['first'].get(), 'value') # self.assertEqual(config['second'].get(), 'value') # # @unittest.skip('Difficult to implement with optparse') # def test_multiple_cli_config_overwrite(self): # cli_config_path = os.path.join(self.temp_dir, b'config.yaml') # cli_overwrite_config_path = os.path.join(self.temp_dir, # b'overwrite_config.yaml') # # with open(cli_config_path, 'w') as file: # file.write('anoption: value') # # with open(cli_overwrite_config_path, 'w') as file: # file.write('anoption: overwrite') # # self.run_command('--config', cli_config_path, # '--config', cli_overwrite_config_path, 'test') # self.assertEqual(config['anoption'].get(), 'cli overwrite') def test_cli_config_paths_resolve_relative_to_user_dir(self): cli_config_path = os.path.join(self.temp_dir, b'config.yaml') with open(cli_config_path, 'w') as file: file.write('library: beets.db\n') file.write('statefile: state') self.run_command('--config', cli_config_path, 'test', lib=None) self.assert_equal_path( util.bytestring_path(config['library'].as_filename()), os.path.join(self.user_config_dir, b'beets.db') ) self.assert_equal_path( util.bytestring_path(config['statefile'].as_filename()), os.path.join(self.user_config_dir, b'state') ) def test_cli_config_paths_resolve_relative_to_beetsdir(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) cli_config_path = os.path.join(self.temp_dir, b'config.yaml') with open(cli_config_path, 'w') as file: file.write('library: beets.db\n') file.write('statefile: state') self.run_command('--config', cli_config_path, 'test', lib=None) self.assert_equal_path( util.bytestring_path(config['library'].as_filename()), os.path.join(self.beetsdir, b'beets.db') ) self.assert_equal_path( util.bytestring_path(config['statefile'].as_filename()), os.path.join(self.beetsdir, b'state') ) def test_command_line_option_relative_to_working_dir(self): os.chdir(self.temp_dir) self.run_command('--library', 'foo.db', 'test', lib=None) self.assert_equal_path(config['library'].as_filename(), os.path.join(os.getcwd(), 'foo.db')) def test_cli_config_file_loads_plugin_commands(self): cli_config_path = os.path.join(self.temp_dir, b'config.yaml') with open(cli_config_path, 'w') as file: file.write('pluginpath: %s\n' % _common.PLUGINPATH) file.write('plugins: test') self.run_command('--config', cli_config_path, 'plugin', lib=None) self.assertTrue(plugins.find_plugins()[0].is_test_plugin) def test_beetsdir_config(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) env_config_path = os.path.join(self.beetsdir, b'config.yaml') with open(env_config_path, 'w') as file: file.write('anoption: overwrite') config.read() self.assertEqual(config['anoption'].get(), 'overwrite') def test_beetsdir_points_to_file_error(self): beetsdir = os.path.join(self.temp_dir, b'beetsfile') open(beetsdir, 'a').close() os.environ['BEETSDIR'] = util.py3_path(beetsdir) self.assertRaises(ConfigError, self.run_command, 'test') def test_beetsdir_config_does_not_load_default_user_config(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) with open(self.user_config_path, 'w') as file: file.write('anoption: value') config.read() self.assertFalse(config['anoption'].exists()) def test_default_config_paths_resolve_relative_to_beetsdir(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) config.read() self.assert_equal_path( util.bytestring_path(config['library'].as_filename()), os.path.join(self.beetsdir, b'library.db') ) self.assert_equal_path( util.bytestring_path(config['statefile'].as_filename()), os.path.join(self.beetsdir, b'state.pickle') ) def test_beetsdir_config_paths_resolve_relative_to_beetsdir(self): os.environ['BEETSDIR'] = util.py3_path(self.beetsdir) env_config_path = os.path.join(self.beetsdir, b'config.yaml') with open(env_config_path, 'w') as file: file.write('library: beets.db\n') file.write('statefile: state') config.read() self.assert_equal_path( util.bytestring_path(config['library'].as_filename()), os.path.join(self.beetsdir, b'beets.db') ) self.assert_equal_path( util.bytestring_path(config['statefile'].as_filename()), os.path.join(self.beetsdir, b'state') ) class ShowModelChangeTest(_common.TestCase): def setUp(self): super(ShowModelChangeTest, self).setUp() self.io.install() self.a = _common.item() self.b = _common.item() self.a.path = self.b.path def _show(self, **kwargs): change = ui.show_model_changes(self.a, self.b, **kwargs) out = self.io.getoutput() return change, out def test_identical(self): change, out = self._show() self.assertFalse(change) self.assertEqual(out, '') def test_string_fixed_field_change(self): self.b.title = 'x' change, out = self._show() self.assertTrue(change) self.assertTrue(u'title' in out) def test_int_fixed_field_change(self): self.b.track = 9 change, out = self._show() self.assertTrue(change) self.assertTrue(u'track' in out) def test_floats_close_to_identical(self): self.a.length = 1.00001 self.b.length = 1.00005 change, out = self._show() self.assertFalse(change) self.assertEqual(out, u'') def test_floats_different(self): self.a.length = 1.00001 self.b.length = 2.00001 change, out = self._show() self.assertTrue(change) self.assertTrue(u'length' in out) def test_both_values_shown(self): self.a.title = u'foo' self.b.title = u'bar' change, out = self._show() self.assertTrue(u'foo' in out) self.assertTrue(u'bar' in out) class ShowChangeTest(_common.TestCase): def setUp(self): super(ShowChangeTest, self).setUp() self.io.install() self.items = [_common.item()] self.items[0].track = 1 self.items[0].path = b'/path/to/file.mp3' self.info = autotag.AlbumInfo( u'the album', u'album id', u'the artist', u'artist id', [ autotag.TrackInfo(u'the title', u'track id', index=1) ] ) def _show_change(self, items=None, info=None, cur_artist=u'the artist', cur_album=u'the album', dist=0.1): """Return an unicode string representing the changes""" items = items or self.items info = info or self.info mapping = dict(zip(items, info.tracks)) config['ui']['color'] = False album_dist = distance(items, info, mapping) album_dist._penalties = {'album': [dist]} commands.show_change( cur_artist, cur_album, autotag.AlbumMatch(album_dist, info, mapping, set(), set()), ) # FIXME decoding shouldn't be done here return util.text_string(self.io.getoutput().lower()) def test_null_change(self): msg = self._show_change() self.assertTrue('similarity: 90' in msg) self.assertTrue('tagging:' in msg) def test_album_data_change(self): msg = self._show_change(cur_artist='another artist', cur_album='another album') self.assertTrue('correcting tags from:' in msg) def test_item_data_change(self): self.items[0].title = u'different' msg = self._show_change() self.assertTrue('different -> the title' in msg) def test_item_data_change_with_unicode(self): self.items[0].title = u'caf\xe9' msg = self._show_change() self.assertTrue(u'caf\xe9 -> the title' in msg) def test_album_data_change_with_unicode(self): msg = self._show_change(cur_artist=u'caf\xe9', cur_album=u'another album') self.assertTrue(u'correcting tags from:' in msg) def test_item_data_change_title_missing(self): self.items[0].title = u'' msg = re.sub(r' +', ' ', self._show_change()) self.assertTrue(u'file.mp3 -> the title' in msg) def test_item_data_change_title_missing_with_unicode_filename(self): self.items[0].title = u'' self.items[0].path = u'/path/to/caf\xe9.mp3'.encode('utf-8') msg = re.sub(r' +', ' ', self._show_change()) self.assertTrue(u'caf\xe9.mp3 -> the title' in msg or u'caf.mp3 ->' in msg) @patch('beets.library.Item.try_filesize', Mock(return_value=987)) class SummarizeItemsTest(_common.TestCase): def setUp(self): super(SummarizeItemsTest, self).setUp() item = library.Item() item.bitrate = 4321 item.length = 10 * 60 + 54 item.format = "F" self.item = item def test_summarize_item(self): summary = commands.summarize_items([], True) self.assertEqual(summary, u"") summary = commands.summarize_items([self.item], True) self.assertEqual(summary, u"F, 4kbps, 10:54, 987.0 B") def test_summarize_items(self): summary = commands.summarize_items([], False) self.assertEqual(summary, u"0 items") summary = commands.summarize_items([self.item], False) self.assertEqual(summary, u"1 items, F, 4kbps, 10:54, 987.0 B") i2 = deepcopy(self.item) summary = commands.summarize_items([self.item, i2], False) self.assertEqual(summary, u"2 items, F, 4kbps, 21:48, 1.9 KiB") i2.format = "G" summary = commands.summarize_items([self.item, i2], False) self.assertEqual(summary, u"2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB") summary = commands.summarize_items([self.item, i2, i2], False) self.assertEqual(summary, u"3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB") class PathFormatTest(_common.TestCase): def test_custom_paths_prepend(self): default_formats = ui.get_path_formats() config['paths'] = {u'foo': u'bar'} pf = ui.get_path_formats() key, tmpl = pf[0] self.assertEqual(key, u'foo') self.assertEqual(tmpl.original, u'bar') self.assertEqual(pf[1:], default_formats) @_common.slow_test() class PluginTest(_common.TestCase, TestHelper): def test_plugin_command_from_pluginpath(self): config['pluginpath'] = [_common.PLUGINPATH] config['plugins'] = ['test'] self.run_command('test', lib=None) @_common.slow_test() class CompletionTest(_common.TestCase, TestHelper): def test_completion(self): # Load plugin commands config['pluginpath'] = [_common.PLUGINPATH] config['plugins'] = ['test'] # Do not load any other bash completion scripts on the system. env = dict(os.environ) env['BASH_COMPLETION_DIR'] = os.devnull env['BASH_COMPLETION_COMPAT_DIR'] = os.devnull # Open a `bash` process to run the tests in. We'll pipe in bash # commands via stdin. cmd = os.environ.get('BEETS_TEST_SHELL', '/bin/bash --norc').split() if not has_program(cmd[0]): self.skipTest(u'bash not available') tester = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=env) # Load bash_completion library. for path in commands.BASH_COMPLETION_PATHS: if os.path.exists(util.syspath(path)): bash_completion = path break else: self.skipTest(u'bash-completion script not found') try: with open(util.syspath(bash_completion), 'rb') as f: tester.stdin.writelines(f) except IOError: self.skipTest(u'could not read bash-completion script') # Load completion script. self.io.install() self.run_command('completion', lib=None) completion_script = self.io.getoutput().encode('utf-8') self.io.restore() tester.stdin.writelines(completion_script.splitlines(True)) # Load test suite. test_script_name = os.path.join(_common.RSRC, b'test_completion.sh') with open(test_script_name, 'rb') as test_script_file: tester.stdin.writelines(test_script_file) out, err = tester.communicate() if tester.returncode != 0 or out != b'completion tests passed\n': print(out.decode('utf-8')) self.fail(u'test/test_completion.sh did not execute properly') class CommonOptionsParserCliTest(unittest.TestCase, TestHelper): """Test CommonOptionsParser and formatting LibModel formatting on 'list' command. """ def setUp(self): self.setup_beets() self.lib = library.Library(':memory:') self.item = _common.item() self.item.path = b'xxx/yyy' self.lib.add(self.item) self.lib.add_album([self.item]) def tearDown(self): self.teardown_beets() def test_base(self): l = self.run_with_output(u'ls') self.assertEqual(l, u'the artist - the album - the title\n') l = self.run_with_output(u'ls', u'-a') self.assertEqual(l, u'the album artist - the album\n') def test_path_option(self): l = self.run_with_output(u'ls', u'-p') self.assertEqual(l, u'xxx/yyy\n') l = self.run_with_output(u'ls', u'-a', u'-p') self.assertEqual(l, u'xxx\n') def test_format_option(self): l = self.run_with_output(u'ls', u'-f', u'$artist') self.assertEqual(l, u'the artist\n') l = self.run_with_output(u'ls', u'-a', u'-f', u'$albumartist') self.assertEqual(l, u'the album artist\n') def test_format_option_unicode(self): l = self.run_with_output(b'ls', b'-f', u'caf\xe9'.encode(util.arg_encoding())) self.assertEqual(l, u'caf\xe9\n') def test_root_format_option(self): l = self.run_with_output(u'--format-item', u'$artist', u'--format-album', u'foo', u'ls') self.assertEqual(l, u'the artist\n') l = self.run_with_output(u'--format-item', u'foo', u'--format-album', u'$albumartist', u'ls', u'-a') self.assertEqual(l, u'the album artist\n') def test_help(self): l = self.run_with_output(u'help') self.assertIn(u'Usage:', l) l = self.run_with_output(u'help', u'list') self.assertIn(u'Usage:', l) with self.assertRaises(ui.UserError): self.run_command(u'help', u'this.is.not.a.real.command') def test_stats(self): l = self.run_with_output(u'stats') self.assertIn(u'Approximate total size:', l) # # Need to have more realistic library setup for this to work # l = self.run_with_output('stats', '-e') # self.assertIn('Total size:', l) def test_version(self): l = self.run_with_output(u'version') self.assertIn(u'Python version', l) self.assertIn(u'no plugins loaded', l) # # Need to have plugin loaded # l = self.run_with_output('version') # self.assertIn('plugins: ', l) class CommonOptionsParserTest(unittest.TestCase, TestHelper): def setUp(self): self.setup_beets() def tearDown(self): self.teardown_beets() def test_album_option(self): parser = ui.CommonOptionsParser() self.assertFalse(parser._album_flags) parser.add_album_option() self.assertTrue(bool(parser._album_flags)) self.assertEqual(parser.parse_args([]), ({'album': None}, [])) self.assertEqual(parser.parse_args([u'-a']), ({'album': True}, [])) self.assertEqual(parser.parse_args([u'--album']), ({'album': True}, [])) def test_path_option(self): parser = ui.CommonOptionsParser() parser.add_path_option() self.assertFalse(parser._album_flags) config['format_item'].set('$foo') self.assertEqual(parser.parse_args([]), ({'path': None}, [])) self.assertEqual(config['format_item'].as_str(), u'$foo') self.assertEqual(parser.parse_args([u'-p']), ({'path': True, 'format': u'$path'}, [])) self.assertEqual(parser.parse_args(['--path']), ({'path': True, 'format': u'$path'}, [])) self.assertEqual(config['format_item'].as_str(), u'$path') self.assertEqual(config['format_album'].as_str(), u'$path') def test_format_option(self): parser = ui.CommonOptionsParser() parser.add_format_option() self.assertFalse(parser._album_flags) config['format_item'].set('$foo') self.assertEqual(parser.parse_args([]), ({'format': None}, [])) self.assertEqual(config['format_item'].as_str(), u'$foo') self.assertEqual(parser.parse_args([u'-f', u'$bar']), ({'format': u'$bar'}, [])) self.assertEqual(parser.parse_args([u'--format', u'$baz']), ({'format': u'$baz'}, [])) self.assertEqual(config['format_item'].as_str(), u'$baz') self.assertEqual(config['format_album'].as_str(), u'$baz') def test_format_option_with_target(self): with self.assertRaises(KeyError): ui.CommonOptionsParser().add_format_option(target='thingy') parser = ui.CommonOptionsParser() parser.add_format_option(target='item') config['format_item'].set('$item') config['format_album'].set('$album') self.assertEqual(parser.parse_args([u'-f', u'$bar']), ({'format': u'$bar'}, [])) self.assertEqual(config['format_item'].as_str(), u'$bar') self.assertEqual(config['format_album'].as_str(), u'$album') def test_format_option_with_album(self): parser = ui.CommonOptionsParser() parser.add_album_option() parser.add_format_option() config['format_item'].set('$item') config['format_album'].set('$album') parser.parse_args([u'-f', u'$bar']) self.assertEqual(config['format_item'].as_str(), u'$bar') self.assertEqual(config['format_album'].as_str(), u'$album') parser.parse_args([u'-a', u'-f', u'$foo']) self.assertEqual(config['format_item'].as_str(), u'$bar') self.assertEqual(config['format_album'].as_str(), u'$foo') parser.parse_args([u'-f', u'$foo2', u'-a']) self.assertEqual(config['format_album'].as_str(), u'$foo2') def test_add_all_common_options(self): parser = ui.CommonOptionsParser() parser.add_all_common_options() self.assertEqual(parser.parse_args([]), ({'album': None, 'path': None, 'format': None}, [])) class EncodingTest(_common.TestCase): """Tests for the `terminal_encoding` config option and our `_in_encoding` and `_out_encoding` utility functions. """ def out_encoding_overridden(self): config['terminal_encoding'] = 'fake_encoding' self.assertEqual(ui._out_encoding(), 'fake_encoding') def in_encoding_overridden(self): config['terminal_encoding'] = 'fake_encoding' self.assertEqual(ui._in_encoding(), 'fake_encoding') def out_encoding_default_utf8(self): with patch('sys.stdout') as stdout: stdout.encoding = None self.assertEqual(ui._out_encoding(), 'utf-8') def in_encoding_default_utf8(self): with patch('sys.stdin') as stdin: stdin.encoding = None self.assertEqual(ui._in_encoding(), 'utf-8') def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='suite')
Kraymer/beets
test/test_ui.py
Python
mit
49,238
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2016-12-06 09:04 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cms', '0016_auto_20160608_1535'), ('maps', '0002_auto_20160926_1157'), ] operations = [ migrations.AddField( model_name='googlemap', name='cms_page', field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page'), ), ]
rouxcode/django-cms-plugins
cmsplugins/maps/migrations/0003_googlemap_cms_page.py
Python
mit
601
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import copy import json from twisted.internet import defer from twisted.python import log from buildbot.data import base from buildbot.data import sourcestamps from buildbot.data import types from buildbot.process import metrics from buildbot.process.users import users from buildbot.util import datetime2epoch from buildbot.util import epoch2datetime class FixerMixin: @defer.inlineCallbacks def _fixChange(self, change, is_graphql): # TODO: make these mods in the DB API if change: change = change.copy() change['when_timestamp'] = datetime2epoch(change['when_timestamp']) if is_graphql: props = change['properties'] change['properties'] = [ {'name': k, 'source': v[1], 'value': json.dumps(v[0])} for k, v in props.items() ] else: sskey = ('sourcestamps', str(change['sourcestampid'])) change['sourcestamp'] = yield self.master.data.get(sskey) del change['sourcestampid'] return change fieldMapping = { 'changeid': 'changes.changeid', } class ChangeEndpoint(FixerMixin, base.Endpoint): isCollection = False pathPatterns = """ /changes/n:changeid """ def get(self, resultSpec, kwargs): d = self.master.db.changes.getChange(kwargs['changeid']) d.addCallback(self._fixChange, is_graphql='graphql' in kwargs) return d class ChangesEndpoint(FixerMixin, base.BuildNestingMixin, base.Endpoint): isCollection = True pathPatterns = """ /changes /builders/n:builderid/builds/n:build_number/changes /builds/n:buildid/changes /sourcestamps/n:ssid/changes """ rootLinkName = 'changes' @defer.inlineCallbacks def get(self, resultSpec, kwargs): buildid = kwargs.get('buildid') if 'build_number' in kwargs: buildid = yield self.getBuildid(kwargs) ssid = kwargs.get('ssid') if buildid is not None: changes = yield self.master.db.changes.getChangesForBuild(buildid) elif ssid is not None: change = yield self.master.db.changes.getChangeFromSSid(ssid) if change is not None: changes = [change] else: changes = [] else: if resultSpec is not None: resultSpec.fieldMapping = self.fieldMapping changes = yield self.master.db.changes.getChanges(resultSpec=resultSpec) results = [] for ch in changes: results.append((yield self._fixChange(ch, is_graphql='graphql' in kwargs))) return results class Change(base.ResourceType): name = "change" plural = "changes" endpoints = [ChangeEndpoint, ChangesEndpoint] eventPathPatterns = """ /changes/:changeid """ keyField = "changeid" subresources = ["Build", "Property"] class EntityType(types.Entity): changeid = types.Integer() parent_changeids = types.List(of=types.Integer()) author = types.String() committer = types.String() files = types.List(of=types.String()) comments = types.String() revision = types.NoneOk(types.String()) when_timestamp = types.Integer() branch = types.NoneOk(types.String()) category = types.NoneOk(types.String()) revlink = types.NoneOk(types.String()) properties = types.SourcedProperties() repository = types.String() project = types.String() codebase = types.String() sourcestamp = sourcestamps.SourceStamp.entityType entityType = EntityType(name, 'Change') @base.updateMethod @defer.inlineCallbacks def addChange(self, files=None, comments=None, author=None, committer=None, revision=None, when_timestamp=None, branch=None, category=None, revlink='', properties=None, repository='', codebase=None, project='', src=None): metrics.MetricCountEvent.log("added_changes", 1) if properties is None: properties = {} # add the source to the properties for k in properties: properties[k] = (properties[k], 'Change') # get a user id if src: # create user object, returning a corresponding uid uid = yield users.createUserObject(self.master, author, src) else: uid = None if not revlink and revision and repository and callable(self.master.config.revlink): # generate revlink from revision and repository using the configured callable revlink = self.master.config.revlink(revision, repository) or '' if callable(category): pre_change = self.master.config.preChangeGenerator(author=author, committer=committer, files=files, comments=comments, revision=revision, when_timestamp=when_timestamp, branch=branch, revlink=revlink, properties=properties, repository=repository, project=project) category = category(pre_change) # set the codebase, either the default, supplied, or generated if codebase is None \ and self.master.config.codebaseGenerator is not None: pre_change = self.master.config.preChangeGenerator(author=author, committer=committer, files=files, comments=comments, revision=revision, when_timestamp=when_timestamp, branch=branch, category=category, revlink=revlink, properties=properties, repository=repository, project=project) codebase = self.master.config.codebaseGenerator(pre_change) codebase = str(codebase) else: codebase = codebase or '' # add the Change to the database changeid = yield self.master.db.changes.addChange( author=author, committer=committer, files=files, comments=comments, revision=revision, when_timestamp=epoch2datetime(when_timestamp), branch=branch, category=category, revlink=revlink, properties=properties, repository=repository, codebase=codebase, project=project, uid=uid) # get the change and munge the result for the notification change = yield self.master.data.get(('changes', str(changeid))) change = copy.deepcopy(change) self.produceEvent(change, 'new') # log, being careful to handle funny characters msg = f"added change with revision {revision} to database" log.msg(msg.encode('utf-8', 'replace')) return changeid
pmisik/buildbot
master/buildbot/data/changes.py
Python
gpl-2.0
8,824
''' Created on 22 lut 2014 @author: mkieszek ''' from openerp.osv import osv, fields class jp_candidate_tag(osv.Model): def name_get(self, cr, uid, ids, context=None): """Return the categories' display name, including their direct parent by default. :param dict context: the ``partner_category_display`` key can be used to select the short version of the category name (without the direct parent), when set to ``'short'``. The default is the long version.""" if context is None: context = {} if context.get('partner_category_display') == 'short': return super(res_partner_category, self).name_get(cr, uid, ids, context=context) if isinstance(ids, (int, long)): ids = [ids] reads = self.read(cr, uid, ids, ['name', 'parent_id'], context=context) res = [] for record in reads: name = record['name'] if record['parent_id']: name = record['parent_id'][1] + ' / ' + name res.append((record['id'], name)) return res def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100): if not args: args = [] if not context: context = {} if name: # Be sure name_search is symetric to name_get name = name.split(' / ')[-1] ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context) else: ids = self.search(cr, uid, args, limit=limit, context=context) return self.name_get(cr, uid, ids, context) def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None): res = self.name_get(cr, uid, ids, context=context) return dict(res) _name = 'jp.candidate.tag' _description = 'Candidate tag' _columns = { 'name': fields.char('Category Name', required=True, size=64, translate=True), 'parent_id': fields.many2one('jp.candidate.tag', 'Parent Category', select=True, ondelete='cascade'), 'complete_name': fields.function(_name_get_fnc, type="char", string='Full Name'), 'child_ids': fields.one2many('jp.candidate.tag', 'parent_id', 'Child Categories'), 'active': fields.boolean('Active', help="The active field allows you to hide the category without removing it."), 'parent_left': fields.integer('Left parent', select=True), 'parent_right': fields.integer('Right parent', select=True), 'partner_ids': fields.many2many('jp.candidate', id1='category_id', id2='partner_id', string='Partners'), } _constraints = [ (osv.osv._check_recursion, 'Error ! You can not create recursive categories.', ['parent_id']) ] _defaults = { 'active': 1, } _parent_store = True _parent_order = 'name' _order = 'parent_left'
mkieszek/jobsplus
jobsplus_marketing/jp_candidate_tag.py
Python
agpl-3.0
3,032
# Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from typing import Callable, List, Set from . import constants def _get_file_paths( root_dir: str, predicate: Callable[[str], bool] ) -> List[str]: """Recursively list the files in a given directory whose names match the provided predicate function Args: root_dir: the root directory to search from predicate: the predicate function to filter filenames with Returns: A list of filepaths relative to root_dir that match the predicate """ if os.path.basename(os.path.normpath(root_dir)).startswith('.'): # Ignore dot-directories return [] paths = [os.path.join(root_dir, path) for path in os.listdir(root_dir)] folders = [path for path in paths if not os.path.isfile(path)] files = [path for path in paths if os.path.isfile(path) and predicate(path)] for file in folders: files += _get_file_paths(file, predicate) return files def get_python_files(root_dir: str) -> List[str]: """Recursively lists the Python files in a directory Args: root_dir: the root directory to search from Returns: A list of Python filepaths relative to root_dir """ # Not language-agnostic, so keep it in this method gae_lib_regex = re.compile(r'/appengine/(.+/)*lib/') return _get_file_paths( root_dir, lambda path: ( path.endswith('.py') and not gae_lib_regex.search(path) ) ) def get_drift_yaml_files(root_dir: str) -> List[str]: """Recursively lists the DRIFT yaml metadata files in a directory Args: root_dir: the root directory to search from Returns: A list of DRIFT yaml metadata filepaths relative to root_dir """ return _get_file_paths( root_dir, lambda path: ( os.path.basename(path) == '.drift-data.yml' or os.path.basename(path) == '.drift-data.yaml' ) ) def get_region_tags(root_dir: str) -> List[str]: """Recursively find the region tags in a directory Args: root_dir: the root directory to search from Returns: The list of region tags found in root_dir """ file_paths = _get_file_paths(root_dir, constants.region_tag_predicate) region_tags: Set[str] = set() for path in file_paths: with open(path, 'r') as file: file_contents = file.read() file_region_tags = \ constants.START_VERB_REGEX.findall(file_contents) if file_region_tags: region_tags = region_tags.union(set(file_region_tags)) return list(region_tags)
GoogleCloudPlatform/repo-automation-playground
xunit-autolabeler-v2/ast_parser/lib/file_utils.py
Python
apache-2.0
3,255
#!/usr/bin/env python # -*- coding: utf-8 -*- """ openslides.system.forms ~~~~~~~~~~~~~~~~~~~~~~~ Forms for the system app. :copyright: 2011 by the OpenSlides team, see AUTHORS. :license: GNU GPL, see LICENSE for more details. """ from django.forms import Form, CharField, TextInput, BooleanField, IntegerField, ChoiceField, Textarea, Select from django.utils.translation import ugettext as _ from system.api import config_get class SystemConfigForm(Form): error_css_class = 'error' required_css_class = 'required' #user_registration = BooleanField(label=_("User registration"), required=False) system_url = CharField(widget=TextInput(), required=False, label=_("System URL")) system_welcometext = CharField(widget=Textarea(), required=False, label=_("Welcome text (for password PDF)")) system_enable_anonymous = BooleanField(required=False, label=_("Access for anonymous / guest users"), help_text=_("Allow access for guest users")) class EventConfigForm(Form): error_css_class = 'error' required_css_class = 'required' event_name = CharField(widget=TextInput(),label=_("Event name"), max_length=30) event_description = CharField(widget=TextInput(),label=_("Short description of event"), max_length=100, required=False) event_date = CharField(widget=TextInput(), required=False, label=_("Event date")) event_location = CharField(widget=TextInput(), required=False, label=_("Event location")) event_organizer = CharField(widget=TextInput(), required=False, label=_("Event organizer")) class AgendaConfigForm(Form): error_css_class = 'error' required_css_class = 'required' agenda_countdown_time = IntegerField(widget=TextInput(attrs={'class':'small-input'}),label=_("Countdown (in seconds)"),initial=60, min_value=0) class ApplicationConfigForm(Form): error_css_class = 'error' required_css_class = 'required' application_min_supporters = IntegerField(widget=TextInput(attrs={'class':'small-input'}),label=_("Number of (minimum) required supporters for a application"),initial=4, min_value=0, max_value=8) application_preamble = CharField(widget=TextInput(), required=False, label=_("Application preamble")) application_pdf_ballot_papers_selection = ChoiceField(widget=Select(), required=False, label=_("Number of ballot papers (selection)"), choices=[("1", _("Number of all delegates")),("2", _("Number of all participants")),("0", _("Use the following custum number"))]) application_pdf_ballot_papers_number = IntegerField(widget=TextInput(attrs={'class':'small-input'}), required=False, min_value=1, label=_("Custom number of ballot papers")) application_pdf_title = CharField(widget=TextInput(), required=False, label=_("Title for PDF document (all applications)")) application_pdf_preamble = CharField(widget=Textarea(), required=False, label=_("Preamble text for PDF document (all applications)")) class AssignmentConfigForm(Form): error_css_class = 'error' required_css_class = 'required' assignment_pdf_title = CharField(widget=TextInput(), required=False, label=_("Title for PDF document (all elections)")) assignment_pdf_preamble = CharField(widget=Textarea(), required=False, label=_("Preamble text for PDF document (all elections)")) assignment_pdf_ballot_papers_selection = ChoiceField(widget=Select(), required=False, label=_("Number of ballot papers (selection)"), choices=[("1", _("Number of all delegates")),("2", _("Number of all participants")),("0", _("Use the following custum number"))]) assignment_pdf_ballot_papers_number = IntegerField(widget=TextInput(attrs={'class':'small-input'}), required=False, min_value=1, label=_("Custom number of ballot papers"))
svschannak/openslides-templates-pmv
openslides/system/forms.py
Python
gpl-2.0
3,753
""" This module implements AIMP-like headers for playlists, which help to distinguish songs from different directories. """ import gtk import pango import cairo import gobject import glib import os.path import re """ CellRenderer for ui.treeview. Columns for songs, which need a header, must have a gproperty "aimp_header", which contains header text. As gtk's TreeView does not support colspans, this cell renderer can be used only in single-column mode. Also, column sizing should be set to gtk.TREE_VIEW_COLUMN_AUTOSIZE and fixed_height_mode should be disabled. """ class CellRenderer(gtk.GenericCellRenderer): """ Properties """ __gproperties__ = { "markup" : (gobject.TYPE_STRING, "Marked up text to render", "Marked up text to render.", None, gobject.PARAM_READWRITE), "aimp_header" : (gobject.TYPE_STRING, "AIMP-like header, if needed", "AIMP-like header, if needed.", None, gobject.PARAM_READWRITE), } def do_set_property(self, key, value): self.__properties[key] = value def do_get_property(self, key): return self.__properties[key] def get_header(self): return self.get_property("aimp_header").replace("<b>", "").replace("</b>", "") """ gtk.GenericCellRenderer overrides """ def __init__(self): gtk.GenericCellRenderer.__init__(self) self.__properties = {} def on_get_size(self, widget, cell_area): if cell_area == None: if self.get_header(): return (0, 0, 0, 34) else: return (0, 0, 0, 16) x = cell_area.x y = cell_area.y w = cell_area.width h = cell_area.height return (x, y, w, h) def on_render(self, window, widget, background_area, cell_area, expose_area, flags): state = gtk.STATE_NORMAL header_color = '#999999' if flags & gtk.CELL_RENDERER_SELECTED: header_color = '#000000' if widget.has_focus(): state = gtk.STATE_SELECTED else: state = gtk.STATE_ACTIVE context = widget.get_pango_context() layout = pango.Layout(context) layout.set_markup(self.get_property("markup")) if self.get_header(): widget.style.paint_layout(window, state, True, cell_area, widget, "", cell_area.x + 2, cell_area.y + 20, layout) layout = pango.Layout(context) layout.set_alignment(pango.ALIGN_RIGHT) layout.set_markup("<span color=\"" + header_color + "\" underline=\"low\" weight=\"bold\">" + glib.markup_escape_text(self.get_header()) + "</span>") widget.style.paint_layout(window, state, True, cell_area, widget, '', cell_area.x + 2, cell_area.y + 2, layout) else: widget.style.paint_layout(window, state, True, cell_area, widget, '', cell_area.x + 2, cell_area.y + 2, layout) return def on_activate(self, event, widget, path, background_area, cell_area, flags): pass def on_start_editing(self, event, widget, path, background_area, cell_area, flags): pass gobject.type_register(CellRenderer) """ Get AIMP header by filename """ def by_filename(filename): bits = os.path.dirname(filename).split("/") try: # Cut CD1, CD2 if re.match(r"^CD", bits[-1]): bits.pop() # Year - Album m = re.match(r"^([0-9]{4})\s*-\s*(.+)$", bits[-1]) if m: album_title = m.group(2) + " (" + m.group(1) + ")" else: album_title = bits[-1] bits.pop() # Cut EPs/Singles/Albums if re.match(r"^(EP|Single|Album)", bits[-1]): bits.pop() return bits[-1] + " - " + album_title except: return os.path.join(bits) """ Get AIMP header by metadata """ def by_metadata(mpdh, track): return '%s - %s (%s)' % (mpdh.get(track, 'artist'), mpdh.get(track, 'album'), mpdh.get(track, 'date'))
themylogin/sonata
sonata/aimpheaders.py
Python
gpl-3.0
3,991
from datetime import datetime, timedelta, timezone as datetime_timezone import pytz try: import zoneinfo except ImportError: try: from backports import zoneinfo except ImportError: zoneinfo = None from django.conf import settings from django.db.models import ( DateField, DateTimeField, F, IntegerField, Max, OuterRef, Subquery, TimeField, ) from django.db.models.functions import ( Extract, ExtractDay, ExtractHour, ExtractIsoWeekDay, ExtractIsoYear, ExtractMinute, ExtractMonth, ExtractQuarter, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncQuarter, TruncSecond, TruncTime, TruncWeek, TruncYear, ) from django.test import ( TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from django.utils import timezone from ..models import Author, DTModel, Fan ZONE_CONSTRUCTORS = (pytz.timezone,) if zoneinfo is not None: ZONE_CONSTRUCTORS += (zoneinfo.ZoneInfo,) def truncate_to(value, kind, tzinfo=None): # Convert to target timezone before truncation if tzinfo is not None: value = value.astimezone(tzinfo) def truncate(value, kind): if kind == 'second': return value.replace(microsecond=0) if kind == 'minute': return value.replace(second=0, microsecond=0) if kind == 'hour': return value.replace(minute=0, second=0, microsecond=0) if kind == 'day': if isinstance(value, datetime): return value.replace(hour=0, minute=0, second=0, microsecond=0) return value if kind == 'week': if isinstance(value, datetime): return (value - timedelta(days=value.weekday())).replace(hour=0, minute=0, second=0, microsecond=0) return value - timedelta(days=value.weekday()) if kind == 'month': if isinstance(value, datetime): return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(day=1) if kind == 'quarter': month_in_quarter = value.month - (value.month - 1) % 3 if isinstance(value, datetime): return value.replace(month=month_in_quarter, day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(month=month_in_quarter, day=1) # otherwise, truncate to year if isinstance(value, datetime): return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(month=1, day=1) value = truncate(value, kind) if tzinfo is not None: # If there was a daylight saving transition, then reset the timezone. value = timezone.make_aware(value.replace(tzinfo=None), tzinfo) return value @override_settings(USE_TZ=False) class DateFunctionTests(TestCase): def create_model(self, start_datetime, end_datetime): return DTModel.objects.create( name=start_datetime.isoformat() if start_datetime else 'None', start_datetime=start_datetime, end_datetime=end_datetime, start_date=start_datetime.date() if start_datetime else None, end_date=end_datetime.date() if end_datetime else None, start_time=start_datetime.time() if start_datetime else None, end_time=end_datetime.time() if end_datetime else None, duration=(end_datetime - start_datetime) if start_datetime and end_datetime else None, ) def test_extract_year_exact_lookup(self): """ Extract year uses a BETWEEN filter to compare the year to allow indexes to be used. """ start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__exact' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # exact is implied and should be the same qs = DTModel.objects.filter(**{'start_datetime__%s' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # date and datetime fields should behave the same qs = DTModel.objects.filter(**{'start_date__%s' % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 1) self.assertEqual(query_string.count('extract'), 0) # an expression rhs cannot use the between optimization. qs = DTModel.objects.annotate( start_year=ExtractYear('start_datetime'), ).filter(end_datetime__year=F('start_year') + 1) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(' between '), 0) self.assertEqual(query_string.count('extract'), 3) def test_extract_year_greaterthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__gt' % lookup: 2015}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).lower().count('extract'), 0) qs = DTModel.objects.filter(**{'start_datetime__%s__gte' % lookup: 2015}) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).lower().count('extract'), 0) qs = DTModel.objects.annotate( start_year=ExtractYear('start_datetime'), ).filter(**{'end_datetime__%s__gte' % lookup: F('start_year')}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2) def test_extract_year_lessthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ('year', 'iso_year'): with self.subTest(lookup): qs = DTModel.objects.filter(**{'start_datetime__%s__lt' % lookup: 2016}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).count('extract'), 0) qs = DTModel.objects.filter(**{'start_datetime__%s__lte' % lookup: 2016}) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).count('extract'), 0) qs = DTModel.objects.annotate( end_year=ExtractYear('end_datetime'), ).filter(**{'start_datetime__%s__lte' % lookup: F('end_year')}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count('extract'), 2) def test_extract_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) with self.assertRaisesMessage(ValueError, 'lookup_name must be provided'): Extract('start_datetime') msg = 'Extract input expression must be DateField, DateTimeField, TimeField, or DurationField.' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract('name', 'hour'))) with self.assertRaisesMessage( ValueError, "Cannot extract time component 'second' from DateField 'start_date'."): list(DTModel.objects.annotate(extracted=Extract('start_date', 'second'))) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'year')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'quarter')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 2)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'month')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'day')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'week')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'week_day')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1) ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract('start_datetime', 'iso_week_day'), ).order_by('start_datetime'), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'hour')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'minute')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('start_datetime', 'second')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__year=Extract('start_datetime', 'year')).count(), 2) self.assertEqual(DTModel.objects.filter(start_datetime__hour=Extract('start_datetime', 'hour')).count(), 2) self.assertEqual(DTModel.objects.filter(start_date__month=Extract('start_date', 'month')).count(), 2) self.assertEqual(DTModel.objects.filter(start_time__hour=Extract('start_time', 'hour')).count(), 2) def test_extract_none(self): self.create_model(None, None) for t in (Extract('start_datetime', 'year'), Extract('start_date', 'year'), Extract('start_time', 'hour')): with self.subTest(t): self.assertIsNone(DTModel.objects.annotate(extracted=t).first().extracted) @skipUnlessDBFeature('has_native_duration_field') def test_extract_duration(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract('duration', 'second')).order_by('start_datetime'), [ (start_datetime, (end_datetime - start_datetime).seconds % 60), (end_datetime, (start_datetime - end_datetime).seconds % 60) ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual( DTModel.objects.annotate( duration_days=Extract('duration', 'day'), ).filter(duration_days__gt=200).count(), 1 ) @skipIfDBFeature('has_native_duration_field') def test_extract_duration_without_native_duration_field(self): msg = 'Extract requires native DurationField database support.' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract('duration', 'second'))) def test_extract_duration_unsupported_lookups(self): msg = "Cannot extract component '%s' from DurationField 'duration'." for lookup in ( 'year', 'iso_year', 'month', 'week', 'week_day', 'iso_week_day', 'quarter', ): with self.subTest(lookup): with self.assertRaisesMessage(ValueError, msg % lookup): DTModel.objects.annotate(extracted=Extract('duration', lookup)) def test_extract_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__year=ExtractYear('start_datetime')).count(), 2) def test_extract_iso_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractIsoYear('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractIsoYear('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted) ) # Both dates are from the same week year. self.assertEqual(DTModel.objects.filter(start_datetime__iso_year=ExtractIsoYear('start_datetime')).count(), 2) def test_extract_iso_year_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False) week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False) week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] self.create_model(week_53_day_2015, end_datetime) self.create_model(week_52_day_2014, end_datetime) self.create_model(week_1_day_2014_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=days).annotate( extracted=ExtractIsoYear('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (week_52_day_2014, 2014), (week_1_day_2014_2015, 2015), (week_53_day_2015, 2015), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.month), (end_datetime, end_datetime.month)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__month=ExtractMonth('start_datetime')).count(), 2) def test_extract_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay('start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__day=ExtractDay('start_datetime')).count(), 2) def test_extract_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek('start_datetime')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek('start_date')).order_by('start_datetime'), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted) ) # both dates are from the same week. self.assertEqual(DTModel.objects.filter(start_datetime__week=ExtractWeek('start_datetime')).count(), 2) def test_extract_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 8, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractQuarter('start_datetime')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractQuarter('start_date')).order_by('start_datetime'), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__quarter=ExtractQuarter('start_datetime')).count(), 2) def test_extract_quarter_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) last_quarter_2014 = datetime(2014, 12, 31, 13, 0) first_quarter_2015 = datetime(2015, 1, 1, 13, 0) if settings.USE_TZ: last_quarter_2014 = timezone.make_aware(last_quarter_2014, is_dst=False) first_quarter_2015 = timezone.make_aware(first_quarter_2015, is_dst=False) dates = [last_quarter_2014, first_quarter_2015] self.create_model(last_quarter_2014, end_datetime) self.create_model(first_quarter_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=dates).annotate( extracted=ExtractQuarter('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (last_quarter_2014, 4), (first_quarter_2015, 1), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_week_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime, is_dst=False) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015, is_dst=False) week_52_day_2014 = timezone.make_aware(week_52_day_2014, is_dst=False) week_53_day_2015 = timezone.make_aware(week_53_day_2015, is_dst=False) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] self.create_model(week_53_day_2015, end_datetime) self.create_model(week_52_day_2014, end_datetime) self.create_model(week_1_day_2014_2015, end_datetime) qs = DTModel.objects.filter(start_datetime__in=days).annotate( extracted=ExtractWeek('start_datetime'), ).order_by('start_datetime') self.assertQuerysetEqual(qs, [ (week_52_day_2014, 52), (week_1_day_2014_2015, 1), (week_53_day_2015, 53), ], lambda m: (m.start_datetime, m.extracted)) def test_extract_weekday_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeekDay('start_datetime')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeekDay('start_date')).order_by('start_datetime'), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__week_day=ExtractWeekDay('start_datetime')).count(), 2) def test_extract_iso_weekday_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractIsoWeekDay('start_datetime'), ).order_by('start_datetime'), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractIsoWeekDay('start_date'), ).order_by('start_datetime'), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual( DTModel.objects.filter( start_datetime__week_day=ExtractWeekDay('start_datetime'), ).count(), 2, ) def test_extract_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__hour=ExtractHour('start_datetime')).count(), 2) def test_extract_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMinute('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMinute('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__minute=ExtractMinute('start_datetime')).count(), 2) def test_extract_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractSecond('start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractSecond('start_time')).order_by('start_datetime'), [(start_datetime, start_datetime.second), (end_datetime, end_datetime.second)], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__second=ExtractSecond('start_datetime')).count(), 2) def test_trunc_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) msg = 'output_field must be either DateField, TimeField, or DateTimeField' with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField()))) with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField()))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField()))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField()))) def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_datetime', kind, output_field=DateTimeField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, kind)), (end_datetime, truncate_to(end_datetime, kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_date', kind, output_field=DateField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), kind)), (end_datetime, truncate_to(end_datetime.date(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_time', kind, output_field=TimeField()) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)) ], lambda m: (m.start_datetime, m.truncated) ) def test_datetime_to_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc('start_datetime', kind, output_field=TimeField()), ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)), ], lambda m: (m.start_datetime, m.truncated), ) test_date_kind('year') test_date_kind('quarter') test_date_kind('month') test_date_kind('week') test_date_kind('day') test_time_kind('hour') test_time_kind('minute') test_time_kind('second') test_datetime_kind('year') test_datetime_kind('quarter') test_datetime_kind('month') test_datetime_kind('week') test_datetime_kind('day') test_datetime_kind('hour') test_datetime_kind('minute') test_datetime_kind('second') test_datetime_to_time_kind('hour') test_datetime_to_time_kind('minute') test_datetime_to_time_kind('second') qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField())) self.assertEqual(qs.count(), 2) def test_trunc_none(self): self.create_model(None, None) for t in (Trunc('start_datetime', 'year'), Trunc('start_date', 'year'), Trunc('start_time', 'hour')): with self.subTest(t): self.assertIsNone(DTModel.objects.annotate(truncated=t).first().truncated) def test_trunc_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'year') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'year')), (end_datetime, truncate_to(end_datetime, 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'year')), (end_datetime, truncate_to(end_datetime.date(), 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField()))) def test_trunc_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 10, 15, 14, 10, 50, 123), 'quarter') last_quarter_2015 = truncate_to(datetime(2015, 12, 31, 14, 10, 50, 123), 'quarter') first_quarter_2016 = truncate_to(datetime(2016, 1, 1, 14, 10, 50, 123), 'quarter') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) last_quarter_2015 = timezone.make_aware(last_quarter_2015, is_dst=False) first_quarter_2016 = timezone.make_aware(first_quarter_2016, is_dst=False) self.create_model(start_datetime=start_datetime, end_datetime=end_datetime) self.create_model(start_datetime=end_datetime, end_datetime=start_datetime) self.create_model(start_datetime=last_quarter_2015, end_datetime=end_datetime) self.create_model(start_datetime=first_quarter_2016, end_datetime=end_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'quarter')), (last_quarter_2015, truncate_to(last_quarter_2015.date(), 'quarter')), (first_quarter_2016, truncate_to(first_quarter_2016.date(), 'quarter')), (end_datetime, truncate_to(end_datetime.date(), 'quarter')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'quarter')), (last_quarter_2015, truncate_to(last_quarter_2015, 'quarter')), (first_quarter_2016, truncate_to(first_quarter_2016, 'quarter')), (end_datetime, truncate_to(end_datetime, 'quarter')), ], lambda m: (m.start_datetime, m.extracted) ) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncQuarter('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncQuarter('start_time', output_field=TimeField()))) def test_trunc_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'month') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'month')), (end_datetime, truncate_to(end_datetime, 'month')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'month')), (end_datetime, truncate_to(end_datetime.date(), 'month')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncMonth('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMonth('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMonth('start_time', output_field=TimeField()))) def test_trunc_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'week') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncWeek('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'week')), (end_datetime, truncate_to(end_datetime, 'week')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncWeek('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncWeek('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncWeek('start_time', output_field=TimeField()))) def test_trunc_date_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDate('start_datetime')).order_by('start_datetime'), [ (start_datetime, start_datetime.date()), (end_datetime, end_datetime.date()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__date=TruncDate('start_datetime')).count(), 2) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"): list(DTModel.objects.annotate(truncated=TruncDate('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateField"): list(DTModel.objects.annotate(truncated=TruncDate('start_time', output_field=TimeField()))) def test_trunc_date_none(self): self.create_model(None, None) self.assertIsNone(DTModel.objects.annotate(truncated=TruncDate('start_datetime')).first().truncated) def test_trunc_time_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'), [ (start_datetime, start_datetime.time()), (end_datetime, end_datetime.time()), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"): list(DTModel.objects.annotate(truncated=TruncTime('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"): list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField()))) def test_trunc_time_none(self): self.create_model(None, None) self.assertIsNone(DTModel.objects.annotate(truncated=TruncTime('start_datetime')).first().truncated) def test_trunc_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'day') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDay('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'day')), (end_datetime, truncate_to(end_datetime, 'day')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncDay('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncDay('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncDay('start_time', output_field=TimeField()))) def test_trunc_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'hour') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'hour')), (end_datetime, truncate_to(end_datetime, 'hour')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'hour')), (end_datetime, truncate_to(end_datetime.time(), 'hour')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncHour('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncHour('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncHour('start_date', output_field=DateField()))) def test_trunc_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'minute') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'minute')), (end_datetime, truncate_to(end_datetime, 'minute')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'minute')), (end_datetime, truncate_to(end_datetime.time(), 'minute')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncMinute('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMinute('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncMinute('start_date', output_field=DateField()))) def test_trunc_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), 'second') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'second')), (end_datetime, truncate_to(end_datetime, 'second')) ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond('start_time')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.time(), 'second')), (end_datetime, truncate_to(end_datetime.time(), 'second')) ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncSecond('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncSecond('start_date'))) with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncSecond('start_date', output_field=DateField()))) def test_trunc_subquery_with_parameters(self): author_1 = Author.objects.create(name='J. R. R. Tolkien') author_2 = Author.objects.create(name='G. R. R. Martin') fan_since_1 = datetime(2016, 2, 3, 15, 0, 0) fan_since_2 = datetime(2015, 2, 3, 15, 0, 0) fan_since_3 = datetime(2017, 2, 3, 15, 0, 0) if settings.USE_TZ: fan_since_1 = timezone.make_aware(fan_since_1, is_dst=False) fan_since_2 = timezone.make_aware(fan_since_2, is_dst=False) fan_since_3 = timezone.make_aware(fan_since_3, is_dst=False) Fan.objects.create(author=author_1, name='Tom', fan_since=fan_since_1) Fan.objects.create(author=author_1, name='Emma', fan_since=fan_since_2) Fan.objects.create(author=author_2, name='Isabella', fan_since=fan_since_3) inner = Fan.objects.filter( author=OuterRef('pk'), name__in=('Emma', 'Isabella', 'Tom') ).values('author').annotate(newest_fan=Max('fan_since')).values('newest_fan') outer = Author.objects.annotate( newest_fan_year=TruncYear(Subquery(inner, output_field=DateTimeField())) ) tz = timezone.utc if settings.USE_TZ else None self.assertSequenceEqual( outer.order_by('name').values('name', 'newest_fan_year'), [ {'name': 'G. R. R. Martin', 'newest_fan_year': datetime(2017, 1, 1, 0, 0, tzinfo=tz)}, {'name': 'J. R. R. Tolkien', 'newest_fan_year': datetime(2016, 1, 1, 0, 0, tzinfo=tz)}, ] ) @override_settings(USE_TZ=True, TIME_ZONE='UTC') class DateFunctionWithTimeZoneTests(DateFunctionTests): def get_timezones(self, key): for constructor in ZONE_CONSTRUCTORS: yield constructor(key) def test_extract_func_with_timezone(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) delta_tzinfo_pos = datetime_timezone(timedelta(hours=5)) delta_tzinfo_neg = datetime_timezone(timedelta(hours=-5, minutes=17)) for melb in self.get_timezones('Australia/Melbourne'): with self.subTest(repr(melb)): qs = DTModel.objects.annotate( day=Extract('start_datetime', 'day'), day_melb=Extract('start_datetime', 'day', tzinfo=melb), week=Extract('start_datetime', 'week', tzinfo=melb), isoyear=ExtractIsoYear('start_datetime', tzinfo=melb), weekday=ExtractWeekDay('start_datetime'), weekday_melb=ExtractWeekDay('start_datetime', tzinfo=melb), isoweekday=ExtractIsoWeekDay('start_datetime'), isoweekday_melb=ExtractIsoWeekDay('start_datetime', tzinfo=melb), quarter=ExtractQuarter('start_datetime', tzinfo=melb), hour=ExtractHour('start_datetime'), hour_melb=ExtractHour('start_datetime', tzinfo=melb), hour_with_delta_pos=ExtractHour('start_datetime', tzinfo=delta_tzinfo_pos), hour_with_delta_neg=ExtractHour('start_datetime', tzinfo=delta_tzinfo_neg), minute_with_delta_neg=ExtractMinute('start_datetime', tzinfo=delta_tzinfo_neg), ).order_by('start_datetime') utc_model = qs.get() self.assertEqual(utc_model.day, 15) self.assertEqual(utc_model.day_melb, 16) self.assertEqual(utc_model.week, 25) self.assertEqual(utc_model.isoyear, 2015) self.assertEqual(utc_model.weekday, 2) self.assertEqual(utc_model.weekday_melb, 3) self.assertEqual(utc_model.isoweekday, 1) self.assertEqual(utc_model.isoweekday_melb, 2) self.assertEqual(utc_model.quarter, 2) self.assertEqual(utc_model.hour, 23) self.assertEqual(utc_model.hour_melb, 9) self.assertEqual(utc_model.hour_with_delta_pos, 4) self.assertEqual(utc_model.hour_with_delta_neg, 18) self.assertEqual(utc_model.minute_with_delta_neg, 47) with timezone.override(melb): melb_model = qs.get() self.assertEqual(melb_model.day, 16) self.assertEqual(melb_model.day_melb, 16) self.assertEqual(melb_model.week, 25) self.assertEqual(melb_model.isoyear, 2015) self.assertEqual(melb_model.weekday, 3) self.assertEqual(melb_model.isoweekday, 2) self.assertEqual(melb_model.quarter, 2) self.assertEqual(melb_model.weekday_melb, 3) self.assertEqual(melb_model.isoweekday_melb, 2) self.assertEqual(melb_model.hour, 9) self.assertEqual(melb_model.hour_melb, 9) def test_extract_func_explicit_timezone_priority(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) for melb in self.get_timezones('Australia/Melbourne'): with self.subTest(repr(melb)): with timezone.override(melb): model = DTModel.objects.annotate( day_melb=Extract('start_datetime', 'day'), day_utc=Extract('start_datetime', 'day', tzinfo=timezone.utc), ).order_by('start_datetime').get() self.assertEqual(model.day_melb, 16) self.assertEqual(model.day_utc, 15) def test_extract_invalid_field_with_timezone(self): for melb in self.get_timezones('Australia/Melbourne'): with self.subTest(repr(melb)): msg = 'tzinfo can only be used with DateTimeField.' with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( day_melb=Extract('start_date', 'day', tzinfo=melb), ).get() with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( hour_melb=Extract('start_time', 'hour', tzinfo=melb), ).get() def test_trunc_timezone_applied_before_truncation(self): start_datetime = datetime(2016, 1, 1, 1, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) for melb, pacific in zip( self.get_timezones('Australia/Melbourne'), self.get_timezones('America/Los_Angeles') ): with self.subTest((repr(melb), repr(pacific))): model = DTModel.objects.annotate( melb_year=TruncYear('start_datetime', tzinfo=melb), pacific_year=TruncYear('start_datetime', tzinfo=pacific), melb_date=TruncDate('start_datetime', tzinfo=melb), pacific_date=TruncDate('start_datetime', tzinfo=pacific), melb_time=TruncTime('start_datetime', tzinfo=melb), pacific_time=TruncTime('start_datetime', tzinfo=pacific), ).order_by('start_datetime').get() melb_start_datetime = start_datetime.astimezone(melb) pacific_start_datetime = start_datetime.astimezone(pacific) self.assertEqual(model.start_datetime, start_datetime) self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb)) self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific)) self.assertEqual(model.start_datetime.year, 2016) self.assertEqual(model.melb_year.year, 2016) self.assertEqual(model.pacific_year.year, 2015) self.assertEqual(model.melb_date, melb_start_datetime.date()) self.assertEqual(model.pacific_date, pacific_start_datetime.date()) self.assertEqual(model.melb_time, melb_start_datetime.time()) self.assertEqual(model.pacific_time, pacific_start_datetime.time()) def test_trunc_ambiguous_and_invalid_times(self): sao = pytz.timezone('America/Sao_Paulo') utc = timezone.utc start_datetime = datetime(2016, 10, 16, 13, tzinfo=utc) end_datetime = datetime(2016, 2, 21, 1, tzinfo=utc) self.create_model(start_datetime, end_datetime) with timezone.override(sao): with self.assertRaisesMessage(pytz.NonExistentTimeError, '2016-10-16 00:00:00'): model = DTModel.objects.annotate(truncated_start=TruncDay('start_datetime')).get() with self.assertRaisesMessage(pytz.AmbiguousTimeError, '2016-02-20 23:00:00'): model = DTModel.objects.annotate(truncated_end=TruncHour('end_datetime')).get() model = DTModel.objects.annotate( truncated_start=TruncDay('start_datetime', is_dst=False), truncated_end=TruncHour('end_datetime', is_dst=False), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0)) self.assertEqual(model.truncated_end.dst(), timedelta(0)) model = DTModel.objects.annotate( truncated_start=TruncDay('start_datetime', is_dst=True), truncated_end=TruncHour('end_datetime', is_dst=True), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0, 3600)) self.assertEqual(model.truncated_end.dst(), timedelta(0, 3600)) def test_trunc_func_with_timezone(self): """ If the truncated datetime transitions to a different offset (daylight saving) then the returned value will have that new timezone/offset. """ start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for melb in self.get_timezones('Australia/Melbourne'): with self.subTest(repr(melb)): def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( 'start_datetime', kind, output_field=DateTimeField(), tzinfo=melb ) ).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.astimezone(melb), kind, melb)), (end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb)) ], lambda m: (m.start_datetime, m.truncated) ) def test_datetime_to_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( 'start_datetime', kind, output_field=DateField(), tzinfo=melb, ), ).order_by('start_datetime'), [ ( start_datetime, truncate_to(start_datetime.astimezone(melb).date(), kind), ), ( end_datetime, truncate_to(end_datetime.astimezone(melb).date(), kind), ), ], lambda m: (m.start_datetime, m.truncated), ) def test_datetime_to_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( 'start_datetime', kind, output_field=TimeField(), tzinfo=melb, ) ).order_by('start_datetime'), [ ( start_datetime, truncate_to(start_datetime.astimezone(melb).time(), kind), ), ( end_datetime, truncate_to(end_datetime.astimezone(melb).time(), kind), ), ], lambda m: (m.start_datetime, m.truncated), ) test_datetime_to_date_kind('year') test_datetime_to_date_kind('quarter') test_datetime_to_date_kind('month') test_datetime_to_date_kind('week') test_datetime_to_date_kind('day') test_datetime_to_time_kind('hour') test_datetime_to_time_kind('minute') test_datetime_to_time_kind('second') test_datetime_kind('year') test_datetime_kind('quarter') test_datetime_kind('month') test_datetime_kind('week') test_datetime_kind('day') test_datetime_kind('hour') test_datetime_kind('minute') test_datetime_kind('second') qs = DTModel.objects.filter( start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()) ) self.assertEqual(qs.count(), 2) def test_trunc_invalid_field_with_timezone(self): for melb in self.get_timezones('Australia/Melbourne'): with self.subTest(repr(melb)): msg = 'tzinfo can only be used with DateTimeField.' with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( day_melb=Trunc('start_date', 'day', tzinfo=melb), ).get() with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( hour_melb=Trunc('start_time', 'hour', tzinfo=melb), ).get()
elena/django
tests/db_functions/datetime/test_extract_trunc.py
Python
bsd-3-clause
69,302
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras estimator API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.util.tf_export import tf_export # Keras has undeclared dependency on tensorflow/estimator:estimator_py. # As long as you depend //third_party/py/tensorflow:tensorflow target # everything will work as normal. try: import tensorflow.python.estimator.keras as keras_lib # pylint: disable=g-import-not-at-top model_to_estimator = tf_export('keras.estimator.model_to_estimator')( keras_lib.model_to_estimator) except Exception: # pylint: disable=broad-except # pylint: disable=unused-argument def stub_model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None): raise NotImplementedError( 'tf.keras.estimator.model_to_estimator function not available in your ' 'installation.') # pylint: enable=unused-argument model_to_estimator = tf_export('keras.estimator.model_to_estimator')( stub_model_to_estimator)
lukeiwanski/tensorflow
tensorflow/python/keras/estimator/__init__.py
Python
apache-2.0
1,873
import xbmcgui #abstract class Li(object): def __init__(self, title, icon, thumb, url, isFolder, isPlayalbe, videoInfoLabels, generalInfoLabels, contextMenus=None): li = xbmcgui.ListItem(title, iconImage=icon, thumbnailImage=thumb) if isPlayalbe: li.setProperty('IsPlayable', 'True') if generalInfoLabels is not None: li.setInfo('general', generalInfoLabels) if videoInfoLabels is not None: li.setInfo('video', videoInfoLabels) self.li = li self.di = (url, li, isFolder) self.url = url if contextMenus: li.addContextMenuItems(contextMenus) def runPluginCm(label, url): return ('%s' %label, 'RunPlugin(%s)' %url)
SportySpice/Collections
src/li/Li.py
Python
gpl-2.0
913
from sqlalchemy.util import topological from test.lib.testing import assert_raises, eq_ from test.lib.util import conforms_partial_ordering from sqlalchemy import exc from test.lib import fixtures class DependencySortTest(fixtures.TestBase): def assert_sort(self, tuples, allitems=None): if allitems is None: allitems = self._nodes_from_tuples(tuples) else: allitems = self._nodes_from_tuples(tuples).union(allitems) result = list(topological.sort(tuples, allitems)) assert conforms_partial_ordering(tuples, result) def _nodes_from_tuples(self, tups): s = set() for tup in tups: s.update(tup) return s def test_sort_one(self): rootnode = 'root' node2 = 'node2' node3 = 'node3' node4 = 'node4' subnode1 = 'subnode1' subnode2 = 'subnode2' subnode3 = 'subnode3' subnode4 = 'subnode4' subsubnode1 = 'subsubnode1' tuples = [ (subnode3, subsubnode1), (node2, subnode1), (node2, subnode2), (rootnode, node2), (rootnode, node3), (rootnode, node4), (node4, subnode3), (node4, subnode4), ] self.assert_sort(tuples) def test_sort_two(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' node7 = 'node7' tuples = [(node1, node2), (node3, node4), (node4, node5), (node5, node6), (node6, node2)] self.assert_sort(tuples, [node7]) def test_sort_three(self): node1 = 'keywords' node2 = 'itemkeyowrds' node3 = 'items' node4 = 'hoho' tuples = [(node1, node2), (node4, node1), (node1, node3), (node3, node2)] self.assert_sort(tuples) def test_raise_on_cycle_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' tuples = [ (node4, node5), (node5, node4), (node1, node2), (node2, node3), (node3, node1), (node4, node1), ] allitems = self._nodes_from_tuples(tuples) try: list(topological.sort(tuples, allitems)) assert False except exc.CircularDependencyError, err: eq_(err.cycles, set(['node1', 'node3', 'node2', 'node5', 'node4'])) eq_(err.edges, set([('node3', 'node1'), ('node4', 'node1'), ('node2', 'node3'), ('node1', 'node2'), ('node4','node5'), ('node5', 'node4')])) def test_raise_on_cycle_two(self): # this condition was arising from ticket:362 and was not treated # properly by topological sort node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' tuples = [(node1, node2), (node3, node1), (node2, node4), (node3, node2), (node2, node3)] allitems = self._nodes_from_tuples(tuples) try: list(topological.sort(tuples, allitems)) assert False except exc.CircularDependencyError, err: eq_(err.cycles, set(['node1', 'node3', 'node2'])) eq_(err.edges, set([('node3', 'node1'), ('node2', 'node3'), ('node3', 'node2'), ('node1', 'node2'), ('node2','node4')])) def test_raise_on_cycle_three(self): question, issue, providerservice, answer, provider = \ 'Question', 'Issue', 'ProviderService', 'Answer', 'Provider' tuples = [ (question, issue), (providerservice, issue), (provider, question), (question, provider), (providerservice, question), (provider, providerservice), (question, answer), (issue, question), ] allitems = self._nodes_from_tuples(tuples) assert_raises(exc.CircularDependencyError, list, topological.sort(tuples, allitems)) # TODO: test find_cycles def test_large_sort(self): tuples = [(i, i + 1) for i in range(0, 1500, 2)] self.assert_sort(tuples) def test_ticket_1380(self): # ticket:1380 regression: would raise a KeyError tuples = [(id(i), i) for i in range(3)] self.assert_sort(tuples) def test_find_cycle_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' tuples = [(node1, node2), (node3, node1), (node2, node4), (node3, node2), (node2, node3)] eq_(topological.find_cycles(tuples, self._nodes_from_tuples(tuples)), set([node1, node2, node3])) def test_find_multiple_cycles_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' node7 = 'node7' node8 = 'node8' node9 = 'node9' tuples = [ # cycle 1 cycle 2 cycle 3 cycle 4, but only if cycle # 1 nodes are present (node1, node2), (node2, node4), (node4, node1), (node9, node9), (node7, node5), (node5, node7), (node1, node6), (node6, node8), (node8, node4), (node3, node1), (node3, node2), ] allnodes = set([ node1, node2, node3, node4, node5, node6, node7, node8, node9, ]) eq_(topological.find_cycles(tuples, allnodes), set([ 'node8', 'node1', 'node2', 'node5', 'node4', 'node7', 'node6', 'node9', ])) def test_find_multiple_cycles_two(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' tuples = [ # cycle 1 cycle 2 (node1, node2), (node2, node4), (node4, node1), (node1, node6), (node6, node2), (node2, node4), (node4, node1), ] allnodes = set([ node1, node2, node3, node4, node5, node6, ]) # node6 only became present here once [ticket:2282] was addressed. eq_( topological.find_cycles(tuples, allnodes), set(['node1','node2', 'node4', 'node6']) ) def test_find_multiple_cycles_three(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' tuples = [ # cycle 1 cycle 2 cycle3 cycle4 (node1, node2), (node2, node1), (node2, node3), (node3, node2), (node2, node4), (node4, node2), (node2, node5), (node5, node6), (node6, node2), ] allnodes = set([ node1, node2, node3, node4, node5, node6, ]) eq_(topological.find_cycles(tuples, allnodes), allnodes) def test_find_multiple_cycles_four(self): tuples = [ ('node6', 'node2'), ('node15', 'node19'), ('node19', 'node2'), ('node4', 'node10'), ('node15', 'node13'), ('node17', 'node11'), ('node1', 'node19'), ('node15', 'node8'), ('node6', 'node20'), ('node14', 'node11'), ('node6', 'node14'), ('node11', 'node2'), ('node10', 'node20'), ('node1', 'node11'), ('node20', 'node19'), ('node4', 'node20'), ('node15', 'node20'), ('node9', 'node19'), ('node11', 'node10'), ('node11', 'node19'), ('node13', 'node6'), ('node3', 'node15'), ('node9', 'node11'), ('node4', 'node17'), ('node2', 'node20'), ('node19', 'node10'), ('node8', 'node4'), ('node11', 'node3'), ('node6', 'node1') ] allnodes = ['node%d' % i for i in xrange(1, 21)] eq_( topological.find_cycles(tuples, allnodes), set(['node11', 'node10', 'node13', 'node15', 'node14', 'node17', 'node19', 'node20', 'node8', 'node1', 'node3', 'node2', 'node4', 'node6']) )
ioram7/keystone-federado-pgid2013
build/sqlalchemy/test/base/test_dependency.py
Python
apache-2.0
8,756
#! /usr/bin/env python """ based on https://github.com/tomchristie/django-rest-framework/blob/master/runtests.py """ from __future__ import print_function import pytest import sys import os import subprocess PYTEST_ARGS = { 'default': ['tests'], 'fast': ['tests', '-q'], } FLAKE8_ARGS = ['rest_framework_friendly_errors', 'tests', '--ignore=E501'] sys.path.append(os.path.dirname(__file__)) def exit_on_failure(ret, message=None): if ret: sys.exit(ret) def flake8_main(args): print('Running flake8 code linting') ret = subprocess.call(['flake8'] + args) print('flake8 failed' if ret else 'flake8 passed') return ret def split_class_and_function(string): class_string, function_string = string.split('.', 1) return "%s and %s" % (class_string, function_string) def is_function(string): # `True` if it looks like a test function is included in the string. return string.startswith('test_') or '.test_' in string def is_class(string): # `True` if first character is uppercase - assume it's a class name. return string[0] == string[0].upper() if __name__ == "__main__": try: sys.argv.remove('--nolint') except ValueError: run_flake8 = True else: run_flake8 = False try: sys.argv.remove('--lintonly') except ValueError: run_tests = True else: run_tests = False try: sys.argv.remove('--fast') except ValueError: style = 'default' else: style = 'fast' run_flake8 = False if len(sys.argv) > 1: pytest_args = sys.argv[1:] first_arg = pytest_args[0] if first_arg.startswith('-'): # `runtests.py [flags]` pytest_args = ['tests'] + pytest_args elif is_class(first_arg) and is_function(first_arg): # `runtests.py TestCase.test_function [flags]` expression = split_class_and_function(first_arg) pytest_args = ['tests', '-k', expression] + pytest_args[1:] elif is_class(first_arg) or is_function(first_arg): # `runtests.py TestCase [flags]` # `runtests.py test_function [flags]` pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:] else: pytest_args = PYTEST_ARGS[style] if run_tests: exit_on_failure(pytest.main(pytest_args)) if run_flake8: exit_on_failure(flake8_main(FLAKE8_ARGS))
FutureMind/drf-friendly-errors
runtests.py
Python
mit
2,452
# (C) British Crown Copyright 2014, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Test function :func:`iris.fileformats.grib._load_convert.time_range_unit. """ from __future__ import (absolute_import, division, print_function) # import iris tests first so that some things can be initialised # before importing anything else. import iris.tests as tests from iris.exceptions import TranslationError from iris.fileformats.grib._load_convert import time_range_unit from iris.unit import Unit class Test(tests.IrisTest): def setUp(self): self.unit_by_indicator = {0: Unit('minutes'), 1: Unit('hours'), 2: Unit('days'), 10: Unit('3 hours'), 11: Unit('6 hours'), 12: Unit('12 hours'), 13: Unit('seconds')} def test_units(self): for indicator, unit in self.unit_by_indicator.items(): result = time_range_unit(indicator) self.assertEqual(result, unit) def test_bad_indicator(self): emsg = 'unsupported time range' with self.assertRaisesRegexp(TranslationError, emsg): time_range_unit(-1) if __name__ == '__main__': tests.main()
Jozhogg/iris
lib/iris/tests/unit/fileformats/grib/load_convert/test_time_range_unit.py
Python
lgpl-3.0
1,951
# Dynamic wpa_supplicant interface # Copyright (c) 2013, Jouni Malinen <j@w1.fi> # # This software may be distributed under the terms of the BSD license. # See README for more details. import logging logger = logging.getLogger() import subprocess import time import hwsim_utils import hostapd from wpasupplicant import WpaSupplicant def test_sta_dynamic(dev, apdev): """Dynamically added wpa_supplicant interface""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hostapd.add_ap(apdev[0]['ifname'], params) logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412") def test_sta_ap_scan_0(dev, apdev): """Dynamically added wpa_supplicant interface with AP_SCAN 0 connection""" hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test" }) bssid = apdev[0]['bssid'] logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") if "OK" not in wpas.request("AP_SCAN 0"): raise Exception("Failed to set AP_SCAN 2") id = wpas.connect("", key_mgmt="NONE", bssid=bssid, only_add_network=True) wpas.request("ENABLE_NETWORK " + str(id) + " no-connect") wpas.request("SCAN") time.sleep(0.5) subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412']) wpas.wait_connected(timeout=10) wpas.request("SCAN") wpas.wait_connected(timeout=5) def test_sta_ap_scan_2(dev, apdev): """Dynamically added wpa_supplicant interface with AP_SCAN 2 connection""" hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test" }) bssid = apdev[0]['bssid'] logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") if "FAIL" not in wpas.request("AP_SCAN -1"): raise Exception("Invalid AP_SCAN -1 accepted") if "FAIL" not in wpas.request("AP_SCAN 3"): raise Exception("Invalid AP_SCAN 3 accepted") if "OK" not in wpas.request("AP_SCAN 2"): raise Exception("Failed to set AP_SCAN 2") id = wpas.connect("", key_mgmt="NONE", bssid=bssid, only_add_network=True) wpas.request("ENABLE_NETWORK " + str(id) + " no-connect") subprocess.call(['iw', wpas.ifname, 'scan', 'trigger', 'freq', '2412']) time.sleep(1) subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412']) wpas.wait_connected(timeout=10) wpas.request("SET disallow_aps bssid " + bssid) wpas.wait_disconnected(timeout=10) subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412']) ev = wpas.wait_event(["CTRL-EVENT-CONNECTED"], timeout=1) if ev is not None: raise Exception("Unexpected connection reported") def test_sta_ap_scan_2b(dev, apdev): """Dynamically added wpa_supplicant interface with AP_SCAN 2 operation""" hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test" }) bssid = apdev[0]['bssid'] logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5", drv_params="force_connect_cmd=1") if "OK" not in wpas.request("AP_SCAN 2"): raise Exception("Failed to set AP_SCAN 2") id = wpas.connect("test", key_mgmt="NONE", bssid=bssid) wpas.request("DISCONNECT") wpas.set_network(id, "disabled", "1") id2 = wpas.add_network() wpas.set_network_quoted(id2, "ssid", "test2") wpas.set_network(id2, "key_mgmt", "NONE") wpas.set_network(id2, "disabled", "0") wpas.request("REASSOCIATE") ev = wpas.wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=15) if ev is None: raise Exception("Association rejection not reported") hapd.disable() wpas.set_network(id, "disabled", "0") wpas.set_network(id2, "disabled", "1") for i in range(3): ev = wpas.wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=15) if ev is None: raise Exception("Association rejection not reported") wpas.request("DISCONNECT") def test_sta_dynamic_down_up(dev, apdev): """Dynamically added wpa_supplicant interface down/up""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412") hwsim_utils.test_connectivity(wpas, hapd) subprocess.call(['ifconfig', wpas.ifname, 'down']) wpas.wait_disconnected(timeout=10) if wpas.get_status_field("wpa_state") != "INTERFACE_DISABLED": raise Exception("Unexpected wpa_state") subprocess.call(['ifconfig', wpas.ifname, 'up']) wpas.wait_connected(timeout=15, error="Reconnection not reported") hwsim_utils.test_connectivity(wpas, hapd) def test_sta_dynamic_ext_mac_addr_change(dev, apdev): """Dynamically added wpa_supplicant interface with external MAC address change""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) logger.info("Create a dynamic wpa_supplicant interface and connect") wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412") hwsim_utils.test_connectivity(wpas, hapd) subprocess.call(['ifconfig', wpas.ifname, 'down']) wpas.wait_disconnected(timeout=10) if wpas.get_status_field("wpa_state") != "INTERFACE_DISABLED": raise Exception("Unexpected wpa_state") prev_addr = wpas.p2p_interface_addr() new_addr = '02:11:22:33:44:55' try: subprocess.call(['ip', 'link', 'set', 'dev', wpas.ifname, 'address', new_addr]) subprocess.call(['ifconfig', wpas.ifname, 'up']) wpas.wait_connected(timeout=15, error="Reconnection not reported") if wpas.get_driver_status_field('addr') != new_addr: raise Exception("Address change not reported") hwsim_utils.test_connectivity(wpas, hapd) sta = hapd.get_sta(new_addr) if sta['addr'] != new_addr: raise Exception("STA association with new address not found") finally: subprocess.call(['ifconfig', wpas.ifname, 'down']) subprocess.call(['ip', 'link', 'set', 'dev', wpas.ifname, 'address', prev_addr]) subprocess.call(['ifconfig', wpas.ifname, 'up']) def test_sta_dynamic_random_mac_addr(dev, apdev): """Dynamically added wpa_supplicant interface and random MAC address""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") addr0 = wpas.get_driver_status_field("addr") wpas.request("SET preassoc_mac_addr 1") wpas.request("SET rand_addr_lifetime 0") id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="1", scan_freq="2412") addr1 = wpas.get_driver_status_field("addr") if addr0 == addr1: raise Exception("Random MAC address not used") sta = hapd.get_sta(addr0) if sta['addr'] != "FAIL": raise Exception("Unexpected STA association with permanent address") sta = hapd.get_sta(addr1) if sta['addr'] != addr1: raise Exception("STA association with random address not found") wpas.request("DISCONNECT") wpas.connect_network(id) addr2 = wpas.get_driver_status_field("addr") if addr1 != addr2: raise Exception("Random MAC address changed unexpectedly") wpas.remove_network(id) id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="1", scan_freq="2412") addr2 = wpas.get_driver_status_field("addr") if addr1 == addr2: raise Exception("Random MAC address did not change") def test_sta_dynamic_random_mac_addr_keep_oui(dev, apdev): """Dynamically added wpa_supplicant interface and random MAC address (keep OUI)""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") addr0 = wpas.get_driver_status_field("addr") wpas.request("SET preassoc_mac_addr 2") wpas.request("SET rand_addr_lifetime 0") id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="2", scan_freq="2412") addr1 = wpas.get_driver_status_field("addr") if addr0 == addr1: raise Exception("Random MAC address not used") if addr1[3:8] != addr0[3:8]: raise Exception("OUI was not kept") sta = hapd.get_sta(addr0) if sta['addr'] != "FAIL": raise Exception("Unexpected STA association with permanent address") sta = hapd.get_sta(addr1) if sta['addr'] != addr1: raise Exception("STA association with random address not found") wpas.request("DISCONNECT") wpas.connect_network(id) addr2 = wpas.get_driver_status_field("addr") if addr1 != addr2: raise Exception("Random MAC address changed unexpectedly") wpas.remove_network(id) id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="2", scan_freq="2412") addr2 = wpas.get_driver_status_field("addr") if addr1 == addr2: raise Exception("Random MAC address did not change") if addr2[3:8] != addr0[3:8]: raise Exception("OUI was not kept") def test_sta_dynamic_random_mac_addr_scan(dev, apdev): """Dynamically added wpa_supplicant interface and random MAC address for scan""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") addr0 = wpas.get_driver_status_field("addr") wpas.request("SET preassoc_mac_addr 1") wpas.request("SET rand_addr_lifetime 0") id = wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412") addr1 = wpas.get_driver_status_field("addr") if addr0 != addr1: raise Exception("Random MAC address used unexpectedly") def test_sta_dynamic_random_mac_addr_scan_keep_oui(dev, apdev): """Dynamically added wpa_supplicant interface and random MAC address for scan (keep OUI)""" params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678") hapd = hostapd.add_ap(apdev[0]['ifname'], params) wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5') wpas.interface_add("wlan5") addr0 = wpas.get_driver_status_field("addr") wpas.request("SET preassoc_mac_addr 2") wpas.request("SET rand_addr_lifetime 0") id = wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412") addr1 = wpas.get_driver_status_field("addr") if addr0 != addr1: raise Exception("Random MAC address used unexpectedly")
wangybgit/Chameleon
hostapd-OpenWrt/tests/hwsim/test_sta_dynamic.py
Python
apache-2.0
11,322
import xbmcgui import urllib import time def download(url, dest, dp = None): if not dp: dp = xbmcgui.DialogProgress() dp.create("YCBeast Wizard","Downloading the Freshness",' ', ' ') dp.update(0) start_time=time.time() urllib.urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time)) def _pbhook(numblocks, blocksize, filesize, dp, start_time): try: percent = min(numblocks * blocksize * 100 / filesize, 100) currently_downloaded = float(numblocks) * blocksize / (1024 * 1024) kbps_speed = numblocks * blocksize / (time.time() - start_time) if kbps_speed > 0 and not percent == 100: eta = (filesize - numblocks * blocksize) / kbps_speed else: eta = 0 kbps_speed = kbps_speed / 1024 total = float(filesize) / (1024 * 1024) mbs = '%.02f MB of %.02f MB' % (currently_downloaded, total) e = 'Speed: %.02f Kb/s ' % kbps_speed e += 'ETA: %02d:%02d' % divmod(eta, 60) dp.update(percent, mbs, e) except: percent = 100 dp.update(percent) if dp.iscanceled(): raise Exception("Canceled") dp.close()
anyoneuno/ycrepo
plugin.program.ycbeast/resources/libs/downloader.py
Python
gpl-3.0
1,331
import glib import gst from common import TestCase import ges class GlobalFunctions(TestCase): def testGlobalFunctions(self): tl = ges.timeline_new_audio_video() tr = ges.timeline_standard_transition_new_for_nick("crossfade")
freesteph/gst-editing-services
bindings/python/testsuite/test_global_functions.py
Python
lgpl-2.1
249
import json from functools import wraps from flask import request from utils.exceptions import HttpBadRequest from utils.validators import ValidationError class validate(object): def __init__(self, rules): self.rules = rules def get_params(self): if request.method in ['POST', 'PUT']: return json.loads(request.data) return request.args def __call__(self, f): @wraps(f) def decorated(*args, **kwargs): errors = [] params = self.get_params() for param in self.rules: try: kwargs[param] = self.rules[param](params.get(param, '')) except ValidationError as e: errors.append(e.message) if errors: raise HttpBadRequest("\n".join(errors)) return f(*args, **kwargs) return decorated
vtemian/kruncher
utils/decorators/validate.py
Python
apache-2.0
798
#!/usr/bin/python # -*- coding: utf-8 -*- #****************************************************************************** # # This file is part of the lizard_waterbalance Django app. # # The lizard_waterbalance app is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # the lizard_waterbalance app. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2010 Nelen & Schuurmans # #****************************************************************************** # # Initial programmer: Pieter Swinkels # Initial date: 2010-11-24 # #****************************************************************************** import logging import itertools import timeseries from copy import deepcopy from datetime import datetime from datetime import timedelta from math import fabs from timeseries import daily_events from timeseries import TimeSeries logger = logging.getLogger(__name__) def _first_of_day(event): """Return the first moment of the day for an event. >>> _first_of_day((datetime(1999, 10, 2, 3, 4), 0.0)) datetime.datetime(1999, 10, 2, 0, 0) """ date, value = event return datetime(date.year, date.month, date.day) def _first_of_month(event): """Return the first day of the month for an event. >>> _first_of_month((datetime(1999, 10, 2, 3, 4), 0.0)) datetime.datetime(1999, 10, 1, 0, 0) """ date, value = event return datetime(date.year, date.month, 1) def _first_of_quarter(event): """Return the first day of the quarter for an event. The first day of a quarter is returned: >>> dt = datetime(1972, 12, 25) >>> _first_of_quarter((dt, 'reinout')) datetime.datetime(1972, 10, 1, 0, 0) >>> dt = datetime(1972, 10, 1) >>> _first_of_quarter((dt, 'bla')) datetime.datetime(1972, 10, 1, 0, 0) >>> dt = datetime(1976, 01, 27) >>> _first_of_quarter((dt, 'maurits')) datetime.datetime(1976, 1, 1, 0, 0) """ date, value = event month = 1 + ((date.month - 1) / 3 * 3) return datetime(date.year, month, 1) def _first_of_year(event): """Return the first day of the year for an event. >>> _first_of_year((datetime(1999, 10, 2, 3, 4), 0.0)) datetime.datetime(1999, 1, 1, 0, 0) """ date, value = event return datetime(date.year, 1, 1) def _first_of_hydro_year(event): """Return the first day of the year for an event. Hydrologic year starts in October! >>> _first_of_hydro_year((datetime(1999, 10, 2, 3, 4), 0.0)) datetime.datetime(1999, 10, 1, 0, 0) >>> _first_of_hydro_year((datetime(1999, 9, 2, 3, 4), 0.0)) datetime.datetime(1998, 10, 1, 0, 0) """ date, value = event if date < datetime(date.year, 10, 1): year = date.year - 1 else: year = date.year return datetime(year, 10, 1) def grouped_event_values(timeseries, period, average=False): """Return iterator with totals for days/months/years for timeseries. Aggregation function is sum. Optional: take average. >>> ts = TimeseriesStub() # empty timeseries >>> [i for i in grouped_event_values(ts, 'day')] [] >>> [i for i in grouped_event_values(ts, 'month')] [] >>> [i for i in grouped_event_values(ts, 'quarter')] [] >>> [i for i in grouped_event_values(ts, 'year')] [] >>> [i for i in grouped_event_values(ts, 'not_a_period')] Traceback (most recent call last): ... AssertionError >>> """ groupers = {'year': _first_of_year, 'month': _first_of_month, 'quarter': _first_of_quarter, 'day': _first_of_day} grouper = groupers.get(period) assert grouper is not None for date, events in itertools.groupby(timeseries.events(), grouper): if average: # To be able to count the events, we make a list of the # generated elements. There are ways to count them without # having to make the list explicit but this is the easy # way. events = list(events) result = (sum(value for (date, value) in events) / (1.0 * len(events))) else: result = sum(value for (date, value) in events) yield date, result def cumulative_event_values(timeseries, reset_period, period='month', multiply=1, time_shift=0): """Return iterator with major events and at least with interval. cumulative is reset on reset_period Aggregation function is sum. Optional: take average. """ if reset_period == 'hydro_year' and period == 'year': # This is a really strange combination for which the rest of this # function is not suited. We fix that as follows. period = 'hydro_year' # When the reset period is smaller than the group period, it is possible # that the grouper returns a date before the date of the resetter, for # example when the reset period is a month and the group period a # quarter. But to which cumulative time series should this lead? # # To "fix" this problem, we use the following rule: # # When the reset period is smaller than the group period, use the reset # period also for the group period. # # In this way, the user always sees the reset. keys = ['day', 'month', 'quarter', 'hydro_year', 'year'] if keys.index(reset_period) < keys.index(period): period = reset_period firsters = {'year': _first_of_year, 'hydro_year': _first_of_hydro_year, 'month': _first_of_month, 'quarter': _first_of_quarter, 'day': _first_of_day} reseter = firsters.get(reset_period) assert reseter is not None grouper = firsters.get(period) assert grouper is not None cumulative = 0 time_shift = timedelta(time_shift) for date, events in itertools.groupby(timeseries.events(), reseter): cumulative = 0 for cum_date, cum_events in itertools.groupby(events, grouper): cumulative += sum(value for (date, value) in cum_events) yield (cum_date + time_shift), cumulative * multiply def monthly_events(timeseries): """Return a generator to iterate over all monthly events. A TimeseriesStub stores daily events. This generator aggregates these daily events to monthly events. Each monthly events takes place on the first of the month and its value is the total value of the daily events for that month. """ return grouped_event_values(timeseries, 'month') def average_monthly_events(timeseries): """Return a generator to iterate over all average monthly events. A TimeseriesStub stores daily events. This generator aggregates these daily events to monthly events that is placed at the first of the month and whose value is the average value of the daily events for that month. """ return grouped_event_values(timeseries, 'month', average=True) def daily_sticky_events(events): """Return a generator to iterate over all daily events. The generator iterates over the events in the order they were added. If dates are missing in between two successive events, this function fills in the missing dates with the value on the latest known date. Parameters: *events* sequence of (date or datetime, value) pairs ordered by date or datetime """ # We initialize this variable to silence pyflakes. date_to_yield = None previous_value = 0 for date, value in events: if not date_to_yield is None: while date_to_yield < date: yield date_to_yield, previous_value date_to_yield = date_to_yield + timedelta(1) yield date, value previous_value = value date_to_yield = date + timedelta(1) class TimeseriesStub(timeseries.TimeSeries): """Implements a time series for testing. A time series is a sequence of values ordered by date and time. Instance variables: *initial_value* value on any date before the first date *events* list of (date and time, value) tuples ordered by date and time """ def __init__(self, *events): if len(events) == 0: events = [] self._events = events def get_start_date(self): """Return the initial date and time. The returned value must match the events data. """ try: return self._events[0][0] except: return datetime(1970, 1, 1) def get_end_date(self): """Return the final date and time. The returned value must match the events data. """ try: return self._events[-1][0] except: return datetime(1970, 1, 1) def sorted_event_items(self): """return all items, sorted by key """ return list(self.events()) def get_value(self, date_time): """Return the value on the given date and time. Note that this method assumes that the events are ordered earliest date and time first. """ result = 0.0 events = (event for event in self._events if event[0] >= date_time) event = next(events, None) if not event is None: if event[0] == date_time: result = event[1] return result def add_value(self, date_time, value): """Add the given value for the given date and time. Please note that events should be added earliest date and time first. """ self._events.append((date_time, value)) def raw_events(self): """Return a generator to iterate over all daily events. The generator iterates over the events in the order they were added. If dates are missing in between two successive events, this function does not fill in the missing dates with value. """ for date, value in self._events: yield date, value def raw_events_dict(self): return dict(self.raw_events()) def events(self, start_date=None, end_date=None): """Return a generator to iterate over the requested daily events. The generator iterates over the events in the order they were added. If dates are missing in between two successive events, this function fills in the missing dates with value 0. """ if start_date is not None and end_date is not None: for date, value in daily_events(self._events): if start_date is not None and date < start_date: continue if end_date is not None and date < end_date: yield date, value else: break else: for date, value in daily_events(self._events): yield date, value def get_events(self, start_date=None, end_date=None): return self.events(start_date, end_date) def monthly_events(self): """Return a generator to iterate over all monthly events. A TimeseriesStub stores daily events. This generator aggregates these daily events to monthly events. Each monthly events takes place on the first of the month and its value is the total value of the daily events for that month. """ return grouped_event_values(self, 'month') def __eq__(self, other): """Return True iff the two given time series represent the same events.""" my_events = list(self.events()) your_events = list(other.events()) equal = len(my_events) == len(your_events) if equal: for (my_event, your_event) in zip(my_events, your_events): equal = my_event[0] == your_event[0] if equal: equal = fabs(my_event[1] - your_event[1]) < 1e-6 if not equal: break return equal class SparseTimeseriesStub(timeseries.TimeSeries): """Represents a continuous time series. A continuous time series is a sequence of values ordered by date and time where each event is the day after the first event. Instance variables: *first_date* date of the first event *previous_date* date of the last event that has been added *values* list of values """ def __init__(self, first_date=None, values=None): self.first_date = first_date if values is None: self.values = [] else: self.values = values self.previous_date = self.first_date + timedelta(len(values) - 1) def get_start_date(self): """Return the initial date and time. The returned value must match the events data. """ if not self.first_date is None: return self.first_date else: return datetime(1970, 1, 1) def get_end_date(self): """Return the final date and time. The returned value must match the events data. """ if not self.first_date is None: return self.first_date + timedelta(len(self.values) - 1) else: return datetime(1970, 1, 1) def __len__(self): """behave as a container""" return len(list(self.events())) def sorted_event_items(self): """return all items, sorted by key """ return list(self.events()) def add_value(self, date_time, value): """Add the given value for the given date and time. Please note that events should be added earliest date and time first. """ if self.first_date is None: self.first_date = date_time else: assert self.previous_date is not None next_expected_date = self.previous_date + timedelta(1) assert next_expected_date.isocalendar() == date_time.isocalendar() self.previous_date = date_time self.values.append(value) def events(self, start_date=None, end_date=None): """Return a generator to iterate over the requested daily events. The generator iterates over the events in the order they were added. If dates are missing in between two successive events, this function fills in the missing dates with value 0. """ current_date = self.first_date if start_date is not None and end_date is not None: if current_date == start_date: for value in self.values: if current_date < end_date: yield current_date, value current_date = current_date + timedelta(1) else: break else: for value in self.values: if current_date < start_date: pass elif current_date < end_date: yield current_date, value else: break current_date = current_date + timedelta(1) else: for value in self.values: yield current_date, value current_date = current_date + timedelta(1) def get_events(self, start_date=None, end_date=None): return self.events(start_date, end_date) class TimeseriesWithMemoryStub(TimeseriesStub): def __init__(self, *args, **kwargs): TimeseriesStub.__init__(self, *args, **kwargs) def get_value(self, date_time): """Return the value on the given date and time. Note that this method assumes that the events are ordered earliest date and time first. """ result = 0.0 previous_event = None # note that we traverse the list of events in reverse for event in reversed(self._events): if event[0] < date_time: if previous_event is None: result = event[1] else: result = previous_event[1] break elif event[0] == date_time: result = event[1] previous_event = event return result def events(self, start_date=None, end_date=None): """Return a generator to iterate over all daily events. The generator iterates over the events in the order they were added. If dates are missing in between two successive events, this function fills in the missing dates with the value on the latest known date. """ if start_date is not None and end_date is not None: for date, value in daily_sticky_events(self._events): if start_date is not None and date < start_date: continue if end_date is not None and date < end_date: yield date, value else: break else: for date, value in daily_sticky_events(self._events): yield date, value class TimeseriesRestrictedStub(TimeseriesStub): """Represents a time series that lies between specific dates. A time series is a sequence of values ordered by date and time. Instance variables: *timeseries* object that supports an events method *start_date* date of the first day of the time series *end_date* date of the day *after* the last day of the time series """ def __init__(self, *args, **kwargs): self.timeseries = kwargs["timeseries"] del kwargs["timeseries"] self.start_date = kwargs["start_date"] del kwargs["start_date"] self.end_date = kwargs["end_date"] del kwargs["end_date"] TimeseriesStub.__init__(self, *args, **kwargs) def get_start_date(self): """Return the initial date and time. The returned value must match the events data. """ start_event = next(self.events(), (datetime(1970, 1, 1), 0)) return start_event[0] def get_end_date(self): """Return the final date and time. The returned value must match the events data. """ end_date = self.timeseries.get_end_date() if end_date > self.end_date: end_date = self.end_date return end_date def events(self, start_date=None, end_date=None): """Return a generator to iterate over the requested events. Parameters: *start_date* date of the earliest event to iterate over *end_data* date of the date after the latest event to iterate over """ events = self.timeseries.events(start_date=self.start_date, end_date=self.end_date) if start_date is None and end_date is None: for event in events: yield event[0], event[1] else: for event in events: if not start_date is None and event[0] < start_date: continue if not end_date is None and event[0] < end_date: yield event[0], event[1] else: break def enumerate_events(*timeseries_list): """Yield the events for all the days of the given time series. Parameters: *timeseries_list* list of time series Each of the given time series should specify values for possibly non-continous ranges of dates. For each day present in a time series, this method yields a tuple of events of all time series. If that day is present in a time series, the tuple contains the corresponding event. If that day is not present, the tuple contains an event with value 0 at that day. The description above only mentions dates. However, this method can handle events whose 'date' include a time component *as long as* the 'date' object supports an isocalendar() method as datetime.date and datetime.datetime do. """ next_start = datetime.max for timeseries in timeseries_list: start = next((event[0] for event in timeseries.events()), None) if not start is None: next_start = min(next_start, start) if next_start == datetime.max: # none of the time series contains an event and we stop immediately return # next_start is the first date for which an event is specified events_list = [timeseries.events() for timeseries in timeseries_list] earliest_event_list = [next(events, None) for events in events_list] timeseries_count = len(timeseries_list) no_events_are_present = False while not no_events_are_present: no_events_are_present = True to_yield = [(next_start, 0.0)] * timeseries_count for index, earliest_event in enumerate(earliest_event_list): if not earliest_event is None: no_events_are_present = False if earliest_event[0].isocalendar() == next_start.isocalendar(): to_yield[index] = earliest_event earliest_event_list[index] = next(events_list[index], None) next_start = next_start + timedelta(1) if not no_events_are_present: yield tuple(to_yield) def enumerate_dict_events(timeseries_dict): """Yield the events for all the days of the given time series. Parameter: *timeseries_dict* dictionary where a value is - a timeseries or - a dictionary where **each** value is a timeseries Each of the given time series should specify values for possibly non-continous ranges of dates. For each day present in a time series, this method yields a tuple of events of all time series. If that day is present in a time series, the tuple contains the corresponding event. If that day is not present, the tuple contains an event with value 0 at that day. The description above only mentions dates. However, this method can handle events whose 'date' include a time component *as long as* the 'date' object supports an isocalendar() method as datetime.date and datetime.datetime do. """ next_start = datetime.max #get earliest moment for timeseries in timeseries_dict.values(): if not type(timeseries) == type({}): start = next((event[0] for event in timeseries.events()), None) else: for ts_nested in timeseries.values(): start = next((event[0] for event in ts_nested.events()), None) if not start is None: next_start = min(next_start, start) if next_start == datetime.max: # none of the time series contains an event and we stop immediately return # next_start is the first date for which an event is specified events_list = [] keys_list = [] for key, timeseries in timeseries_dict.items(): if not type(timeseries) == type({}): events_list.append(timeseries.events()) keys_list.append([key]) else: #nested timeserie for key_nested, timeseries_nested in timeseries.items(): events_list.append(timeseries_nested.events()) keys_list.append([key, key_nested]) earliest_event_list = [next(events, None) for events in events_list] no_events_are_present = False while not no_events_are_present: no_events_are_present = True to_yield = {'date': next_start} for key in keys_list: if len(key) == 1: to_yield[key[0]] = (next_start, 0.0) else: if key[0] not in to_yield: to_yield[key[0]] = {} to_yield[key[0]][key[1]] = (next_start, 0.0) for index, earliest_event in enumerate(earliest_event_list): if not earliest_event is None: no_events_are_present = False if earliest_event[0].isocalendar() == next_start.isocalendar(): if len(keys_list[index]) == 1: to_yield[keys_list[index][0]] = earliest_event else: if keys_list[index][0] not in to_yield: to_yield[keys_list[index][0]] = {} to_yield[keys_list[index][0]][keys_list[index][1]] = \ earliest_event earliest_event_list[index] = next(events_list[index], None) next_start = next_start + timedelta(1) if not no_events_are_present: yield to_yield def enumerate_merged_events(timeseries_a, timeseries_b): """Yields all triples *(date, value_a, value_b)* for the given time series. In *(date, value_a, value_b)*, *value_a* is the value of the event at *date* in *timeseries_a* and *value_b* the value of the event at *date* in *timeseries_b*. Note that the given time series can have different date ranges. Therefore it is possible one time series specifies a value at a date outside the date range of the other time series. In that case, this method does return a triple for that date and it uses the value 0 for the missing value. Parameters: *timeseries_a* object that supports a method events() to yield events *timeseries_b* object that supports a method events() to yield events """ events_a = timeseries_a.events() events_b = timeseries_b.events() event_a = next(events_a, None) event_b = next(events_b, None) while not event_a is None and not event_b is None: if event_a[0].isocalendar() < event_b[0].isocalendar(): yield event_a[0], event_a[1], 0 event_a = next(events_a, None) elif event_a[0].isocalendar() > event_b[0].isocalendar(): yield event_b[0], 0, event_b[1] event_b = next(events_b, None) else: yield event_a[0], event_a[1], event_b[1] event_a = next(events_a, None) event_b = next(events_b, None) if event_a is None: if not event_b is None: yield event_b[0], 0, event_b[1] for event in events_b: yield event[0], 0, event[1] else: if not event_a is None: yield event_a[0], event_a[1], 0 for event in events_a: yield event[0], event[1], 0 def create_empty_timeseries(timeseries): """Return the empty TimeseriesStub that starts on the same day as the given time series. If the given time series is non-empty, this function returns a TimeseriesStub with a single event that starts on the day as the given time series and which has value 0.0. If the given time series is empty, this function returns an empty TimeseriesStub. """ empty_timeseries = TimeseriesStub() event = next(timeseries.events(), None) if not event is None: empty_timeseries.add_value(event[0], 0.0) return empty_timeseries def add_timeseries(*args): """Return the TimeseriesStub that is the sum of the given time series.""" result = SparseTimeseriesStub() for events in enumerate_events( *args): date = events[0][0] value = sum([value[1] for value in events]) result.add_value(date, value) return result def subtract_timeseries(timeseries_a, timeseries_b): """Return the TimeseriesStub that is the difference of the given time series.""" result = SparseTimeseriesStub() for date, value_a, value_b in enumerate_merged_events( timeseries_a, timeseries_b): result.add_value(date, value_a - value_b) return result def multiply_timeseries(timeseries, value): """Return the product of the given time series with the given value. """ product = SparseTimeseriesStub() for event in timeseries.events(): product.add_value(event[0], event[1] * value) return product def map_timeseries(timeseries, map_function): """Apply the given map function to each value of the given time series. This method returns a time series. """ product = SparseTimeseriesStub() for time, value in timeseries.events(): product.add_value(time, map_function(value)) return product def split_timeseries(timeseries): """Return the 2-tuple of non-positive and non-negative time series. Parameters: *timeseries* time series that contains the events for the new 2 -tuple This function creates a 2-tuple of TimeseriesStub, where the first element contains all non-positive events (of the given time series) and the second element contains all non-negative events. The 2 resulting time series have events for the same dates as the given time series, but with value zero if the value at that date does not have the right sign. """ non_pos_timeseries = SparseTimeseriesStub() non_neg_timeseries = SparseTimeseriesStub() for (date, value) in timeseries.events(): if value > 0: non_pos_timeseries.add_value(date, 0) non_neg_timeseries.add_value(date, value) elif value < 0: non_pos_timeseries.add_value(date, value) non_neg_timeseries.add_value(date, 0) else: non_pos_timeseries.add_value(date, 0) non_neg_timeseries.add_value(date, 0) return (non_pos_timeseries, non_neg_timeseries) def write_to_pi_file(*args, **kwargs): """Write the given timeseries in PI XML format. Parameters: *kwargs['filename']* name of PI XML file to create and write to *kwargs['timeseries']* single time series, or a dict of time series, where each time series has with a method 'events' to generate all date, value pairs """ multiple_series_stub = kwargs['timeseries'] if isinstance(multiple_series_stub, dict): multiple_series = [] for parameter_id, series_stub in multiple_series_stub.iteritems(): my_kwargs = deepcopy(kwargs) my_kwargs["parameter_id"] = parameter_id series = TimeSeries(*args, **my_kwargs) series.sorted_event_items = lambda s=series_stub: list(s.events()) multiple_series.append(series) multiple_series.sort(key=lambda series: series.parameter_id) else: series = TimeSeries(*args, **kwargs) series.sorted_event_items = lambda: list(multiple_series_stub.events()) multiple_series = [series] TimeSeries.write_to_pi_file(kwargs['filename'], multiple_series)
nens/timeseries
timeseries/timeseriesstub.py
Python
gpl-3.0
31,231
from Object.PotionList import PotionList from Object.Potion import Potion from Object.PotionColor import PotionColor from Object.PotionSign import PotionSign from Object.Ingredient import Ingredient from Object.IngredientProperties import IngredientProperties class PotionCombinations: def distinct_potions_list(potionList): dist_potions = [] dist_potion_hashs = set() potionList=potionList.get_potions() for potion in potionList: if potion.get_hash() not in dist_potion_hashs: dist_potions.append(potion) dist_potion_hashs.add(potion.get_hash()) return dist_potions def distinct_hash_list(hashList): return None #Generates the Potion Chances from 2 ingredients #Is dumb and does not know about PotionList creations #output list[tuple(Potion, potion_count, total_count, %)] def generate_ingredient_potions(ip1, ip2): totalCount = 0 possible_potions = [] #Cross Test all Triplets to see what they make! for ip1Triplet in ip1.get_alchemical_options(): for ip2Triplet in ip2.get_alchemical_options(): totalCount = totalCount + 1 for ip1A in ip1Triplet.get_alchemicals(): for ip2A in ip2Triplet.get_alchemicals(): alchem_res = PotionCombinations.score_alchemicals(ip1A, ip2A) if alchem_res is not None: possible_potions.append(Potion(ip1.get_name(), ip2.get_name(), PotionColor(alchem_res[0].value), PotionSign(alchem_res[1].value))) #Count all PotionColor PotionSign combos point_counts = {} for potion in possible_potions: if str(potion.get_color())+str(potion.get_sign()) in point_counts: point_counts[str(potion.get_color())+str(potion.get_sign())] = point_counts[str(potion.get_color())+str(potion.get_sign())] + 1 else: point_counts[str(potion.get_color())+str(potion.get_sign())] = 1 #Generate Report Tuples report_tuple_list = [] report_tuple_list.append(('PotionColorPotionSign','PotionCount','TotalCount','Potion %')) for pointKey in point_counts.keys(): report_tuple_list.append((pointKey, point_counts[pointKey], totalCount, point_counts[pointKey]/totalCount)) return report_tuple_list #searches a potion list for existing results def postion_list_search(pl, i1, i2): for po in pl.get_potions(): knownIngredients = po.get_ingredients() if i1 in knownIngredients and i2 in knownIngredients: print('###Known Potion###') print(po.to_string()) return True return False #Takes 2 Alchemicals and sees if they make a Color together #Returns the Alchemical Color and Sign def score_alchemicals(alchemical_one, alchemical_two): if alchemical_one.get_color() is alchemical_two.get_color(): if alchemical_one.get_sign() is alchemical_two.get_sign(): if alchemical_one.get_size() is not alchemical_two.get_size(): return (alchemical_one.get_color(), alchemical_two.get_sign()) else: return None
josephxsxn/alchemists_notepad
Routine/PotionCombinations.py
Python
apache-2.0
2,880
# Copyright (C) 2016 - Yevgen Muntyan # Copyright (C) 2016 - Ignacio Casal Quinteiro # Copyright (C) 2016 - Arnavion # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. import glob import os from gvsbuild.utils.base_builders import MakeGir, Meson from gvsbuild.utils.base_expanders import Tarball from gvsbuild.utils.base_project import Project, project_add class Project_gtk_base(Tarball, Project, MakeGir): def make_all_mo(self): mo = "gtk20.mo" if self.name == "gtk" else "gtk30.mo" localedir = os.path.join(self.pkg_dir, "share", "locale") self.push_location(r".\po") for fp in glob.glob(os.path.join(self.build_dir, "po", "*.po")): f = os.path.basename(fp) lcmsgdir = os.path.join(localedir, f[:-3], "LC_MESSAGES") self.builder.make_dir(lcmsgdir) cmd = " ".join(["msgfmt", "-co", os.path.join(lcmsgdir, mo), f]) self.builder.exec_cmd(cmd, working_dir=self._get_working_dir()) self.pop_location() self.install(r".\COPYING share\doc\%s" % self.name) @project_add class Gtk(Project_gtk_base): def __init__(self): Project.__init__( self, "gtk", archive_url="http://ftp.acc.umu.se/pub/GNOME/sources/gtk+/2.24/gtk+-2.24.31.tar.xz", hash="68c1922732c7efc08df4656a5366dcc3afdc8791513400dac276009b40954658", dependencies=["atk", "gdk-pixbuf", "pango"], patches=[ "gtk-revert-scrolldc-commit.patch", "gtk-bgimg.patch", "gtk-accel.patch", # https://github.com/hexchat/hexchat/issues/1007 "gtk-multimonitor.patch", # These two will be in 2.24.33 "bfdac2f70e005b2504cc3f4ebbdab328974d005a.patch", "61162225f712df648f38fd12bc0817cfa9f79a64.patch", # https://github.com/hexchat/hexchat/issues/2077 "0001-GDK-W32-Remove-WS_EX_LAYERED-from-an-opaque-window.patch", ], ) if Project.opts.enable_gi: self.add_dependency("gobject-introspection") def build(self): self.builder.mod_env( "INCLUDE", "{}\\include\\harfbuzz".format(self.builder.gtk_dir) ) self.exec_msbuild_gen(r"build\win32", "gtk+.sln", add_pars="/p:UseEnv=True") self.make_all_mo() def post_install(self): if Project.opts.enable_gi: self.builder.mod_env( "INCLUDE", "{}\\include\\cairo".format(self.builder.gtk_dir) ) self.builder.mod_env( "INCLUDE", "{}\\include\\harfbuzz".format(self.builder.gtk_dir) ) self.make_single_gir("gtk", prj_dir="gtk") @project_add class Gtk320(Project_gtk_base): def __init__(self): if self.opts.gtk3_ver != "3.20": self.ignore() return Project.__init__( self, "gtk3", prj_dir="gtk3-20", archive_url="https://download.gnome.org/sources/gtk%2B/3.20/gtk%2B-3.20.10.tar.xz", hash="e81da1af1c5c1fee87ba439770e17272fa5c06e64572939814da406859e56b70", dependencies=["atk", "gdk-pixbuf", "pango", "libepoxy"], patches=["gtk3-clip-retry-if-opened-by-others.patch"], ) if Project.opts.enable_gi: self.add_dependency("gobject-introspection") def build(self): self.builder.mod_env( "INCLUDE", "{}\\include\\harfbuzz".format(self.builder.gtk_dir) ) self.exec_msbuild_gen( r"build\win32", "gtk+.sln", add_pars="/p:UseEnv=True /p:GtkPostInstall=rem" ) self.make_all_mo() def post_install(self): if Project.opts.enable_gi: self.builder.mod_env( "INCLUDE", "{}\\include\\cairo".format(self.builder.gtk_dir) ) self.make_single_gir("gtk", prj_dir="gtk3-20") self.exec_cmd( r"%(gtk_dir)s\bin\glib-compile-schemas.exe %(gtk_dir)s\share\glib-2.0\schemas" ) self.exec_cmd( r'%(gtk_dir)s\bin\gtk-update-icon-cache.exe --ignore-theme-index --force "%(gtk_dir)s\share\icons\hicolor"' ) @project_add class Gtk322(Project_gtk_base): def __init__(self): if self.opts.gtk3_ver != "3.22": self.ignore() return Project.__init__( self, "gtk3", prj_dir="gtk3-22", archive_url="https://download.gnome.org/sources/gtk%2B/3.22/gtk%2B-3.22.30.tar.xz", hash="a1a4a5c12703d4e1ccda28333b87ff462741dc365131fbc94c218ae81d9a6567", dependencies=["atk", "gdk-pixbuf", "pango", "libepoxy"], ) if Project.opts.enable_gi: self.add_dependency("gobject-introspection") def build(self): self.builder.mod_env( "INCLUDE", "{}\\include\\harfbuzz".format(self.builder.gtk_dir) ) self.exec_msbuild_gen( r"build\win32", "gtk+.sln", add_pars="/p:UseEnv=True /p:GtkPostInstall=rem" ) self.make_all_mo() def post_install(self): if Project.opts.enable_gi: self.builder.mod_env( "INCLUDE", "{}\\include\\cairo".format(self.builder.gtk_dir) ) self.make_single_gir("gtk", prj_dir="gtk3-22") self.exec_cmd( r"%(gtk_dir)s\bin\glib-compile-schemas.exe %(gtk_dir)s\share\glib-2.0\schemas" ) self.exec_cmd( r'%(gtk_dir)s\bin\gtk-update-icon-cache.exe --ignore-theme-index --force "%(gtk_dir)s\share\icons\hicolor"' ) @project_add class Gtk324(Tarball, Meson): def __init__(self): if self.opts.gtk3_ver != "3.24": self.ignore() return Project.__init__( self, "gtk3", prj_dir="gtk3-24", archive_url="https://download.gnome.org/sources/gtk%2B/3.24/gtk%2B-3.24.31.tar.xz", hash="423c3e7fdb4c459ee889e35fd4d71fd2623562541c1041b11c07e5ad1ff10bf9", dependencies=["atk", "gdk-pixbuf", "pango", "libepoxy"], patches=[ "gtk_update_icon_cache.patch", ], ) if self.opts.enable_gi: self.add_dependency("gobject-introspection") enable_gi = "true" else: enable_gi = "false" self.add_param("-Dintrospection={}".format(enable_gi)) def build(self): Meson.build(self, meson_params="-Dtests=false -Ddemos=false -Dexamples=false") self.install(r".\COPYING share\doc\gtk3") @project_add class Gtk4(Tarball, Meson): def __init__(self): Project.__init__( self, "gtk4", prj_dir="gtk4", archive_url="https://download.gnome.org/sources/gtk/4.6/gtk-4.6.1.tar.xz", hash="d85508d21cbbcd63d568a7862af5ecd63b978d7d5799cbe404c91d2389d0ec5f", dependencies=["gdk-pixbuf", "pango", "libepoxy", "graphene"], patches=[], ) if self.opts.enable_gi: self.add_dependency("gobject-introspection") enable_gi = "enabled" else: enable_gi = "disabled" self.add_param("-Dintrospection={}".format(enable_gi)) def build(self): Meson.build( self, meson_params="-Dbuild-tests=false -Ddemos=false -Dbuild-examples=false -Dmedia-gstreamer=disabled", ) self.install(r".\COPYING share\doc\gtk4")
wingtk/gvsbuild
gvsbuild/projects/gtk.py
Python
gpl-2.0
8,166
#!/usr/bin/python #====================================================================== # # Project : hpp_IOStressTest # File : IOST_WRun_SPI.py # Date : Oct 25, 2016 # Author : HuuHoang Nguyen # Contact : hhnguyen@apm.com # : hoangnh.hpp@gmail.com # License : MIT License # Copyright : 2016 # Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE # #====================================================================== import io import os import sys import time from IOST_Prepare import IOST_Prepare from IOST_Config import * from IOST_Basic import * import gtk import gobject import gtk.glade #====================================================================== IOST_WRun_SPI_Debug_Enable = 1 #====================================================================== class IOST_WRun_SPI(): """ This is class to get all SPI object from IOST_WRun_Skylark window and control to these component """ #---------------------------------------------------------------------- def __init__(self, glade_filename, window_name, builder=None): "This is a function to get SPIn object" self.IOST_WRun_SPI_window = window_name if not builder: self.WRun_SPI_Builder = gtk.Builder() self.WRun_SPI_Builder.add_from_file(glade_filename) self.WRun_SPI_Builder.connect_signals(self) else: self.WRun_SPI_Builder = builder #---------------------------------------------------------------------- def WRun_GetSPI_Obj(self, window_name): "Get all SPI objecs on WRun window and store in self.IOST_Objs" # print window_name for i in range(0, self.IOST_Data["SPI_PortNum"]): self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Action_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_Action_L"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_PassNum_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_PassNum_L"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_FailNum_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_FailNum_L"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Name_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_Name_L"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Pass_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_Pass_L"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Fail_L"] = self.WRun_SPI_Builder.get_object(self.IOST_Objs[window_name]["_Summary_SPI"+str(i)+"_Fail_L"]) #---------------------------------------------------------------------- def WRun_InitSPI_Obj(self, window_name): "Initialization all SPI objects when WRun start" if self.IOST_Data["SPI"] == STATUS_ENABLE: self.WRun_SPI_Builder.get_object(window_name+"_Summary_SPI_F").set_sensitive(True) self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_SPI_L"), color=WRUN_IP_COLOR_DEFAULT, bold=True) for i in range(0, self.IOST_Data["SPI_PortNum"]): if self.IOST_Data["SPI"+str(i)][0] == STATUS_DISABLE: self.WRun_SetSensitive_SPI(window_name, i, False) else: self.WRun_SetSensitive_SPI(window_name, i, True) self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Name_L"], "blue", bold=True) self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Pass_L"], "green") self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Fail_L"], "red") else: for i in range(0, self.IOST_Data["SPI_PortNum"]): self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_Action_L"].set_text(STATUS_DISABLE) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_PassNum_L"].set_text(STATUS_EMPTY) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(i)+"_FailNum_L"].set_text(STATUS_EMPTY) self.WRun_SPI_Builder.get_object(window_name+"_Summary_SPI_F").set_sensitive(False) #---------------------------------------------------------------------- def WRun_SetSensitive_SPI(self, window_name, element, is_sensitive): "" if not is_sensitive: self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_Action_L"].set_text(STATUS_DISABLE) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_PassNum_L"].set_text(STATUS_EMPTY) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_FailNum_L"].set_text(STATUS_EMPTY) self.WRun_SPI_Builder.get_object(window_name+"_Summary_SPI"+str(element)+"_HB").set_sensitive(is_sensitive) else: self.WRun_SPI_Builder.get_object(window_name+"_Summary_SPI"+str(element)+"_HB").set_sensitive(is_sensitive) self.WRun_SetAction_SPI(window_name, element, STATUS_INIT) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_Action_L"].set_text(self.IOST_Data["SPI"+str(element)+"_Status"]) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_PassNum_L"].set_text(str(self.IOST_Data["SPI"+str(element)+"_PassNum"])) self.IOST_Objs[window_name][window_name+"_Summary_SPI"+str(element)+"_FailNum_L"].set_text(str(self.IOST_Data["SPI"+str(element)+"_FailNum"])) def WRun_SetAction_SPI(self, window_name, element, is_action): self.IOST_Data["SPI"+str(element)+"_Status"]=is_action
HPPTECH/hpp_IOSTressTest
Refer/IOST_OLD_SRC/IOST_0.17/Libs/IOST_WRun_SPI.py
Python
mit
6,038
# -*- coding: utf-8 -*- # Copyright 2018 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from openerp import models, fields from openerp import tools class DaftarKlienNonAuditUmum(models.Model): _name = "l10n_id.ppajp_daftar_klien_non_audit_umum" _description = "PPAJP - Daftar Klien Non Audit Umum" _auto = False name = fields.Char( string="Nomor Laporan", ) company_id = fields.Many2one( string="Company", comodel_name="res.company", ) partner_id = fields.Many2one( string="Nama", comodel_name="res.partner", ) contact_address = fields.Char( string="Alamat", related="partner_id.contact_address", store=False, ) npwp = fields.Char( string="NPWP", ) date = fields.Date( string="Tanggal Laporan", ) date_start = fields.Date( string="Awal Tahun Buku" ) date_end = fields.Date( string="Awal Tahun Buku" ) signing_accountant_id = fields.Many2one( string="Penanggung Jawab", comodel_name="res.partner", ) service_id = fields.Many2one( string="Jenis Jasa Yang Diberikan KAP", comodel_name="accountant.service", ) sector_id = fields.Many2one( string="Bidang Usaha Klien", comodel_name="res.partner.sector", ) def _select(self): select_str = """ SELECT a.id AS id, a.company_id AS company_id, a.name AS name, a.partner_id AS partner_id, CASE WHEN b.vat IS NOT NULL THEN RIGHT(b.vat, -2) ELSE '-' END AS npwp, a.date AS date, a.date_start AS date_start, a.date_end AS date_end, a.signing_accountant_id AS signing_accountant_id, a.service_id AS service_id, b.sector_id AS sector_id """ return select_str def _from(self): from_str = """ accountant_report AS a """ return from_str def _where(self): where_str = """ WHERE a.state = 'valid' """ return where_str def _join(self): join_str = """ JOIN res_partner AS b ON a.partner_id = b.id JOIN ( SELECT c1.company_id, c1.service_id FROM rel_company_2_jasa_non_audit_umum AS c1 ) AS c ON a.service_id = c.service_id AND a.company_id = c.company_id """ return join_str def init(self, cr): tools.drop_view_if_exists(cr, self._table) # pylint: disable=locally-disabled, sql-injection cr.execute("""CREATE or REPLACE VIEW %s as ( %s FROM %s %s %s )""" % ( self._table, self._select(), self._from(), self._join(), self._where() ))
open-synergy/opnsynid-l10n-indonesia
l10n_id_ppajp_daftar_klien_non_audit_umum/reports/daftar_klien_non_audit_umum.py
Python
agpl-3.0
3,129
from flask.ext.wtf import Form from wtforms import TextField, BooleanField, SelectField from wtforms.validators import Required class GetIdForm(Form): identifier = TextField('identifier', validators = [Required()]) class SearchForm(Form): text = TextField('text', validators = [Required()]) class SearchItemsForm(Form): kind_choices = [('mimetype', 'Mimetype'), ('lib', 'Library'), ('bin', 'Binary'), ('python2', 'Python-2 module'), ('python3', 'Python-3 module')] text = TextField('text', validators = [Required()]) kind = SelectField('kind', choices = kind_choices, default = '1')
pombreda/https-gitorious.org-appstream-figment
figment/forms.py
Python
gpl-3.0
627
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,socket,tempfile,time try: import http.client as httplib from urllib import request as url_request except ImportError: #above is available in py3+,below is py2.7 import httplib as httplib import urllib as url_request #配置app def configure_app(app): if app[:4].lower() == "http": tmpDir = tmpfile.mkdtemp() randNum = str(time.time().replace(".","")) tmpPath = os.path.join(tmpDir,randNum + ".apk") configure_downloaded_app(app,tmpPath) return tmpPath else: configure_local_app(app) return app #本地app def configure_local_app(app): ext = app[-4:].lower() if ext == ".apk": if not os.path.exists(app): msg = "App is not exists: %s" % app raise Exception(msg) else: msg = "Using local app,but didn't end in .apk" raise Exception(msg) #下载app def configure_downloaded_app(app,path): ext = app[-4:].lower() if ext == ".apk": down_load_app(app,path) if os.path.getsize(path) < 1024: msg = "Failed downloading app from app URL(%s)" % app raise Exception(msg) else: msg = "App URL(%s) didn't sem to point to a .apk file" % app raise Exception(msg) #下载 def download_app(app,path): try: #set urllib timeout socket.setdefaulttimeout(600) url_request.urlretrieve(app,path) except: msg = "Failed downloading app from app URL(%s)" % app raise Exception(msg)
quentin-xia/Maticv
common/app.py
Python
mit
1,573
from mas.multiagent import * class TouchPointOracle(OracleSpace) : def touch(self, c, d = 0) : blocks = [] for obj in self.get_objs_at(c, d) : l = ppdist_l2(obj.pos, c) x = obj.pos[0] - c[0] y = obj.pos[1] - c[1] if (abs(l) > 0.001) : x *= max(0, l - obj.radius) / float(l) y *= max(0, l - obj.radius) / float(l) blocks.append((x, y)) for obt in self.get_obts_at(c, d) : diff = pldiff(c, obt.start, obt.end) blocks.append((- diff[0], - diff[1])) return blocks class ShowLabelObject(Object) : def __init__(self, name, mass = 1.0, radius = 10.0) : super(ShowLabelObject, self).__init__(name, mass, radius) self.__label = self.name @property def label(self) : return self.__label @label.setter def label(self, value) : self.__label = value def draw(self, screen) : super(ShowLabelObject, self).draw(screen) if self.visible == True : font = pygame.font.Font(None, 16) (width, height) = screen.get_size() pos_draw = (int(width / 2.0 + self.pos[0] - 5.0), int(height / 2.0 - self.pos[1] - self.radius - 10.0)) screen.blit(font.render(self.label, 1, THECOLORS["black"]), pos_draw)
csningli/MultiAgent
mas/extension.py
Python
apache-2.0
1,352
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import abc from typing import cast, TYPE_CHECKING from .. import CredentialUnavailableError from .._internal.managed_identity_client import ManagedIdentityClient from .._internal.get_token_mixin import GetTokenMixin if TYPE_CHECKING: from typing import Any, Optional from azure.core.credentials import AccessToken class ManagedIdentityBase(GetTokenMixin): """Base class for internal credentials using ManagedIdentityClient""" def __init__(self, **kwargs): # type: (**Any) -> None super(ManagedIdentityBase, self).__init__() self._client = self.get_client(**kwargs) @abc.abstractmethod def get_client(self, **kwargs): # type: (**Any) -> Optional[ManagedIdentityClient] pass @abc.abstractmethod def get_unavailable_message(self): # type: () -> str pass def __enter__(self): if self._client: self._client.__enter__() return self def __exit__(self, *args): if self._client: self._client.__exit__(*args) def close(self): # type: () -> None self.__exit__() def get_token(self, *scopes, **kwargs): # type: (*str, **Any) -> AccessToken if not self._client: raise CredentialUnavailableError(message=self.get_unavailable_message()) return super(ManagedIdentityBase, self).get_token(*scopes, **kwargs) def _acquire_token_silently(self, *scopes, **kwargs): # type: (*str, **Any) -> Optional[AccessToken] # casting because mypy can't determine that these methods are called # only by get_token, which raises when self._client is None return cast(ManagedIdentityClient, self._client).get_cached_token(*scopes) def _request_token(self, *scopes, **kwargs): # type: (*str, **Any) -> AccessToken return cast(ManagedIdentityClient, self._client).request_token(*scopes, **kwargs)
Azure/azure-sdk-for-python
sdk/identity/azure-identity/azure/identity/_internal/managed_identity_base.py
Python
mit
2,084
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2014-2022 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import os import sys import abc import pdb import logging import operator import traceback from datetime import datetime from shapely import wkt import h5py import numpy import pandas from openquake.baselib import ( general, hdf5, __version__ as engine_version) from openquake.baselib import performance, parallel, python3compat from openquake.baselib.performance import Monitor from openquake.hazardlib import InvalidFile, site, stats from openquake.hazardlib.site_amplification import Amplifier from openquake.hazardlib.site_amplification import AmplFunction from openquake.hazardlib.calc.filters import SourceFilter, getdefault from openquake.hazardlib.source import rupture from openquake.hazardlib.shakemap.maps import get_sitecol_shakemap from openquake.hazardlib.shakemap.gmfs import to_gmfs from openquake.risklib import riskinput, riskmodels from openquake.commonlib import readinput, logictree, datastore, source_reader from openquake.calculators.export import export as exp from openquake.calculators import getters get_taxonomy = operator.attrgetter('taxonomy') get_weight = operator.attrgetter('weight') get_imt = operator.attrgetter('imt') calculators = general.CallableDict(operator.attrgetter('calculation_mode')) U8 = numpy.uint8 U16 = numpy.uint16 U32 = numpy.uint32 F32 = numpy.float32 TWO16 = 2 ** 16 TWO32 = 2 ** 32 stats_dt = numpy.dtype([('mean', F32), ('std', F32), ('min', F32), ('max', F32), ('len', U16)]) def check_imtls(this, parent): """ Fix the hazard_imtls of two calculations if possible """ for imt, imls in this.items(): if (imls != parent[imt]).any(): raise ValueError('The intensity measure levels %s are different ' 'from the parent levels %s for %s' % ( imls, parent[imt], imt)) # this is used for the minimum_intensity dictionaries def consistent(dic1, dic2): """ Check if two dictionaries with default are consistent: >>> consistent({'PGA': 0.05, 'SA(0.3)': 0.05}, {'default': 0.05}) True >>> consistent({'SA(0.3)': 0.1, 'SA(0.6)': 0.05}, ... {'default': 0.1, 'SA(0.3)': 0.1, 'SA(0.6)': 0.05}) True """ if dic1 == dic2: return True v1 = set(dic1.values()) v2 = set(dic2.values()) missing = set(dic2) - set(dic1) - {'default'} if len(v1) == 1 and len(v2) == 1 and v1 == v2: # {'PGA': 0.05, 'SA(0.3)': 0.05} is consistent with {'default': 0.05} return True return not missing def get_stats(seq): std = numpy.nan if len(seq) == 1 else numpy.std(seq, ddof=1) tup = (numpy.mean(seq), std, numpy.min(seq), numpy.max(seq), len(seq)) return numpy.array(tup, stats_dt) class InvalidCalculationID(Exception): """ Raised when running a post-calculation on top of an incompatible pre-calculation """ def build_weights(realizations): """ :returns: an array with the realization weights of shape R """ arr = numpy.array([rlz.weight['default'] for rlz in realizations]) return arr def set_array(longarray, shortarray): """ :param longarray: a numpy array of floats of length L >= l :param shortarray: a numpy array of floats of length l Fill `longarray` with the values of `shortarray`, starting from the left. If `shortarry` is shorter than `longarray`, then the remaining elements on the right are filled with `numpy.nan` values. """ longarray[:len(shortarray)] = shortarray longarray[len(shortarray):] = numpy.nan class BaseCalculator(metaclass=abc.ABCMeta): """ Abstract base class for all calculators. :param oqparam: OqParam object :param monitor: monitor object :param calc_id: numeric calculation ID """ precalc = None accept_precalc = [] from_engine = False # set by engine.run_calc is_stochastic = False # True for scenario and event based calculators def __init__(self, oqparam, calc_id): self.datastore = datastore.new(calc_id, oqparam) self._monitor = Monitor( '%s.run' % self.__class__.__name__, measuremem=True, h5=self.datastore) # NB: using h5=self.datastore.hdf5 would mean losing the performance # info about Calculator.run since the file will be closed later on self.oqparam = oqparam def pre_checks(self): """ Checks to run after the pre_execute but before the execute """ def monitor(self, operation='', **kw): """ :returns: a new Monitor instance """ mon = self._monitor(operation, h5=self.datastore.hdf5) self._monitor.calc_id = mon.calc_id = self.datastore.calc_id vars(mon).update(kw) return mon def save_params(self, **kw): """ Update the current calculation parameters and save engine_version """ if ('hazard_calculation_id' in kw and kw['hazard_calculation_id'] is None): del kw['hazard_calculation_id'] vars(self.oqparam).update(**kw) if isinstance(self.oqparam.risk_imtls, dict): # always except in case_shakemap self.datastore['oqparam'] = self.oqparam attrs = self.datastore['/'].attrs attrs['engine_version'] = engine_version attrs['date'] = datetime.now().isoformat()[:19] if 'checksum32' not in attrs: attrs['input_size'] = size = self.oqparam.get_input_size() attrs['checksum32'] = check = readinput.get_checksum32( self.oqparam, self.datastore.hdf5) logging.info(f'Checksum of the inputs: {check} ' f'(total size {general.humansize(size)})') self.datastore.flush() def check_precalc(self, precalc_mode): """ Defensive programming against users providing an incorrect pre-calculation ID (with ``--hazard-calculation-id``). :param precalc_mode: calculation_mode of the previous calculation """ calc_mode = self.oqparam.calculation_mode ok_mode = self.accept_precalc if calc_mode != precalc_mode and precalc_mode not in ok_mode: raise InvalidCalculationID( 'In order to run a calculation of kind %r, ' 'you need to provide a calculation of kind %r, ' 'but you provided a %r instead' % (calc_mode, ok_mode, precalc_mode)) def run(self, pre_execute=True, concurrent_tasks=None, remove=True, shutdown=False, **kw): """ Run the calculation and return the exported outputs. :param pre_execute: set it to False to avoid running pre_execute :param concurrent_tasks: set it to 0 to disable parallelization :param remove: set it to False to remove the hdf5cache file (if any) :param shutdown: set it to True to shutdown the ProcessPool """ with self._monitor: self._monitor.username = kw.get('username', '') if concurrent_tasks is None: # use the job.ini parameter ct = self.oqparam.concurrent_tasks else: # used the parameter passed in the command-line ct = concurrent_tasks if ct == 0: # disable distribution temporarily oq_distribute = os.environ.get('OQ_DISTRIBUTE') os.environ['OQ_DISTRIBUTE'] = 'no' if ct != self.oqparam.concurrent_tasks: # save the used concurrent_tasks self.oqparam.concurrent_tasks = ct self.save_params(**kw) try: if pre_execute: self.pre_execute() self.result = self.execute() if self.result is not None: self.post_execute(self.result) self.export(kw.get('exports', '')) except Exception: if kw.get('pdb'): # post-mortem debug tb = sys.exc_info()[2] traceback.print_tb(tb) pdb.post_mortem(tb) else: logging.critical('', exc_info=True) raise finally: if shutdown: parallel.Starmap.shutdown() # cleanup globals if ct == 0: # restore OQ_DISTRIBUTE if oq_distribute is None: # was not set del os.environ['OQ_DISTRIBUTE'] else: os.environ['OQ_DISTRIBUTE'] = oq_distribute readinput.pmap = None readinput.exposure = None readinput.gmfs = None readinput.eids = None readinput.gsim_lt_cache.clear() # remove temporary hdf5 file, if any if os.path.exists(self.datastore.tempname) and remove: os.remove(self.datastore.tempname) return getattr(self, 'exported', {}) def core_task(*args): """ Core routine running on the workers. """ raise NotImplementedError @abc.abstractmethod def pre_execute(self): """ Initialization phase. """ @abc.abstractmethod def execute(self): """ Execution phase. Usually will run in parallel the core function and return a dictionary with the results. """ @abc.abstractmethod def post_execute(self, result): """ Post-processing phase of the aggregated output. It must be overridden with the export code. It will return a dictionary of output files. """ def gzip_inputs(self): """ Gzipping the inputs and saving them in the datastore """ logging.info('gzipping the input files') fnames = readinput.get_input_files(self.oqparam) self.datastore.store_files(fnames) def export(self, exports=None): """ Export all the outputs in the datastore in the given export formats. Individual outputs are not exported if there are multiple realizations. """ self.exported = getattr(self, 'exported', {}) if isinstance(exports, tuple): fmts = exports elif exports: # is a string fmts = exports.split(',') elif isinstance(self.oqparam.exports, tuple): fmts = self.oqparam.exports else: # is a string fmts = self.oqparam.exports.split(',') keys = set(self.datastore) | {'fullreport'} has_hcurves = ('hcurves-stats' in self.datastore or 'hcurves-rlzs' in self.datastore) if has_hcurves: keys.add('hcurves') for fmt in fmts: if not fmt: continue for key in sorted(keys): # top level keys if 'rlzs' in key and self.R > 1: if (key[:-4] + 'stats') in self.datastore: continue # skip individual curves self._export((key, fmt)) if has_hcurves and self.oqparam.hazard_maps: self._export(('hmaps', fmt)) if has_hcurves and self.oqparam.uniform_hazard_spectra: self._export(('uhs', fmt)) def _export(self, ekey): if ekey not in exp or self.exported.get(ekey): # already exported return with self.monitor('export'): try: self.exported[ekey] = fnames = exp(ekey, self.datastore) except Exception as exc: fnames = [] logging.error('Could not export %s: %s', ekey, exc) if fnames: logging.info('exported %s: %s', ekey[0], fnames) def __repr__(self): return '<%s#%d>' % (self.__class__.__name__, self.datastore.calc_id) def check_time_event(oqparam, occupancy_periods): """ Check the `time_event` parameter in the datastore, by comparing with the periods found in the exposure. """ time_event = oqparam.time_event if time_event and time_event not in occupancy_periods: raise ValueError( 'time_event is %s in %s, but the exposure contains %s' % (time_event, oqparam.inputs['job_ini'], ', '.join(occupancy_periods))) def check_amplification(ampl_df, sitecol): """ Make sure the amplification codes in the site collection match the ones in the amplification table. :param ampl_df: the amplification table as a pandas DataFrame :param sitecol: the site collection """ codeset = set(ampl_df.index) if len(codeset) == 1: # there is a single amplification function, there is no need to # extend the sitecol with an ampcode field return codes = set(sitecol.ampcode) missing = codes - codeset if missing: raise ValueError('The site collection contains references to missing ' 'amplification functions: %s' % b' '.join(missing). decode('utf8')) class HazardCalculator(BaseCalculator): """ Base class for hazard calculators based on source models """ af = None amplifier = None def src_filter(self): """ :returns: a SourceFilter """ oq = self.oqparam if getattr(self, 'sitecol', None): sitecol = self.sitecol.complete else: # can happen to the ruptures-only calculator sitecol = None return SourceFilter(sitecol, oq.maximum_distance) @property def E(self): """ :returns: the number of stored events """ try: return len(self.datastore['events']) except KeyError: return 0 @property def N(self): """ :returns: the total number of sites """ if hasattr(self, 'sitecol'): return len(self.sitecol.complete) if self.sitecol else 0 if 'sitecol' not in self.datastore: return 0 return len(self.datastore['sitecol']) @property def few_sites(self): """ :returns: True if there are less than max_sites_disagg """ return len(self.sitecol.complete) <= self.oqparam.max_sites_disagg def check_overflow(self): """Overridden in event based""" def check_floating_spinning(self): oq = self.oqparam f, s = self.csm.get_floating_spinning_factors() if f != 1: logging.info('Rupture floating factor = %s', f) if s != 1: logging.info('Rupture spinning factor = %s', s) if (f * s >= 1.5 and oq.no_pointsource_distance and 'classical' in oq.calculation_mode): logging.info( 'You are not using the pointsource_distance approximation:\n' 'https://docs.openquake.org/oq-engine/advanced/common-mistakes.html#pointsource-distance') elif 'classical' in oq.calculation_mode: logging.info('Using pointsource_distance=%s', oq.pointsource_distance) def read_inputs(self): """ Read risk data and sources if any """ oq = self.oqparam self._read_risk_data() self.check_overflow() # check if self.sitecol is too large if ('amplification' in oq.inputs and oq.amplification_method == 'kernel'): logging.info('Reading %s', oq.inputs['amplification']) df = AmplFunction.read_df(oq.inputs['amplification']) check_amplification(df, self.sitecol) self.af = AmplFunction.from_dframe(df) if (oq.calculation_mode == 'disaggregation' and oq.max_sites_disagg < len(self.sitecol)): raise ValueError( 'Please set max_sites_disagg=%d in %s' % ( len(self.sitecol), oq.inputs['job_ini'])) if ('source_model_logic_tree' in oq.inputs and oq.hazard_calculation_id is None): with self.monitor('composite source model', measuremem=True): self.csm = csm = readinput.get_composite_source_model( oq, self.datastore.hdf5) oq.mags_by_trt = csm.get_mags_by_trt() for trt in oq.mags_by_trt: self.datastore['source_mags/' + trt] = numpy.array( oq.mags_by_trt[trt]) interp = oq.maximum_distance(trt) if len(interp.x) > 2: md = '%s->%d, ... %s->%d, %s->%d' % ( interp.x[0], interp.y[0], interp.x[-2], interp.y[-2], interp.x[-1], interp.y[-1]) logging.info('max_dist %s: %s', trt, md) self.full_lt = csm.full_lt self.init() # do this at the end of pre-execute self.pre_checks() if (not oq.hazard_calculation_id and oq.calculation_mode != 'preclassical' and not oq.save_disk_space): self.gzip_inputs() # check DEFINED_FOR_REFERENCE_VELOCITY if self.amplifier: gsim_lt = readinput.get_gsim_lt(oq) self.amplifier.check(self.sitecol.vs30, oq.vs30_tolerance, gsim_lt.values) def import_perils(self): """Defined in MultiRiskCalculator""" def pre_execute(self): """ Check if there is a previous calculation ID. If yes, read the inputs by retrieving the previous calculation; if not, read the inputs directly. """ oq = self.oqparam if 'gmfs' in oq.inputs or 'multi_peril' in oq.inputs: # read hazard from files assert not oq.hazard_calculation_id, ( 'You cannot use --hc together with gmfs_file') with self.monitor('importing inputs', measuremem=True): self.read_inputs() if 'gmfs' in oq.inputs: self.datastore['full_lt'] = logictree.FullLogicTree.fake() if oq.inputs['gmfs'].endswith('.csv'): eids = import_gmfs_csv(self.datastore, oq, self.sitecol.complete.sids) elif oq.inputs['gmfs'].endswith('.hdf5'): eids = import_gmfs_hdf5(self.datastore, oq) else: raise NotImplementedError( 'Importer for %s' % oq.inputs['gmfs']) E = len(eids) if hasattr(oq, 'number_of_ground_motion_fields'): if oq.number_of_ground_motion_fields != E: raise RuntimeError( 'Expected %d ground motion fields, found %d' % (oq.number_of_ground_motion_fields, E)) else: # set the number of GMFs from the file oq.number_of_ground_motion_fields = E else: self.import_perils() self.save_crmodel() elif 'hazard_curves' in oq.inputs: # read hazard from file assert not oq.hazard_calculation_id, ( 'You cannot use --hc together with hazard_curves') haz_sitecol = readinput.get_site_collection(oq) self.load_crmodel() # must be after get_site_collection self.read_exposure(haz_sitecol) # define .assets_by_site self.datastore.create_df('_poes', readinput.pmap.to_dframe()) self.datastore['assetcol'] = self.assetcol self.datastore['full_lt'] = fake = logictree.FullLogicTree.fake() self.datastore['rlzs_by_g'] = sum( fake.get_rlzs_by_grp().values(), []) with hdf5.File(self.datastore.tempname, 'a') as t: t['oqparam'] = oq self.realizations = fake.get_realizations() self.save_crmodel() self.datastore.swmr_on() elif oq.hazard_calculation_id: parent = datastore.read(oq.hazard_calculation_id) oqparent = parent['oqparam'] if 'weights' in parent: weights = numpy.unique(parent['weights'][:]) if oq.collect_rlzs and len(weights) > 1: raise ValueError( 'collect_rlzs=true can be specified only if ' 'the realizations have identical weights') if oqparent.imtls: check_imtls(self.oqparam.imtls, oqparent.imtls) self.check_precalc(oqparent.calculation_mode) self.datastore.parent = parent # copy missing parameters from the parent if 'concurrent_tasks' not in vars(self.oqparam): self.oqparam.concurrent_tasks = ( self.oqparam.__class__.concurrent_tasks.default) params = {name: value for name, value in vars(parent['oqparam']).items() if name not in vars(self.oqparam) and name != 'ground_motion_fields'} if params: self.save_params(**params) with self.monitor('importing inputs', measuremem=True): self.read_inputs() oqp = parent['oqparam'] if oqp.investigation_time != oq.investigation_time: raise ValueError( 'The parent calculation was using investigation_time=%s' ' != %s' % (oqp.investigation_time, oq.investigation_time)) hstats, rstats = list(oqp.hazard_stats()), list(oq.hazard_stats()) if hstats != rstats: raise ValueError( 'The parent calculation had stats %s != %s' % (hstats, rstats)) sec_imts = set(oq.get_sec_imts()) missing_imts = set(oq.risk_imtls) - sec_imts - set(oqp.imtls) if oqp.imtls and missing_imts: raise ValueError( 'The parent calculation is missing the IMT(s) %s' % ', '.join(missing_imts)) self.save_crmodel() elif self.__class__.precalc: calc = calculators[self.__class__.precalc]( self.oqparam, self.datastore.calc_id) calc.from_engine = self.from_engine calc.pre_checks = lambda: self.__class__.pre_checks(calc) calc.run(remove=False) calc.datastore.close() for name in ( 'csm param sitecol assetcol crmodel realizations max_weight ' 'amplifier policy_name policy_dict full_lt exported' ).split(): if hasattr(calc, name): setattr(self, name, getattr(calc, name)) else: with self.monitor('importing inputs', measuremem=True): self.read_inputs() self.save_crmodel() def init(self): """ To be overridden to initialize the datasets needed by the calculation """ oq = self.oqparam if not oq.risk_imtls: if self.datastore.parent: oq.risk_imtls = ( self.datastore.parent['oqparam'].risk_imtls) if 'full_lt' in self.datastore: full_lt = self.datastore['full_lt'] self.realizations = full_lt.get_realizations() if oq.hazard_calculation_id and 'gsim_logic_tree' in oq.inputs: # redefine the realizations by reading the weights from the # gsim_logic_tree_file that could be different from the parent full_lt.gsim_lt = logictree.GsimLogicTree( oq.inputs['gsim_logic_tree'], set(full_lt.trts)) elif hasattr(self, 'csm'): self.check_floating_spinning() self.realizations = self.csm.full_lt.get_realizations() else: # build a fake; used by risk-from-file calculators self.datastore['full_lt'] = fake = logictree.FullLogicTree.fake() self.realizations = fake.get_realizations() @general.cached_property def R(self): """ :returns: the number of realizations """ if self.oqparam.collect_rlzs: return 1 elif 'weights' in self.datastore: return len(self.datastore['weights']) try: return self.csm.full_lt.get_num_rlzs() except AttributeError: # no self.csm return self.datastore['full_lt'].get_num_rlzs() def read_exposure(self, haz_sitecol): # after load_risk_model """ Read the exposure, the risk models and update the attributes .sitecol, .assetcol """ oq = self.oqparam with self.monitor('reading exposure'): self.sitecol, self.assetcol, discarded = ( readinput.get_sitecol_assetcol( oq, haz_sitecol, self.crmodel.loss_types)) self.datastore['sitecol'] = self.sitecol if len(discarded): self.datastore['discarded'] = discarded if 'scenario' in oq.calculation_mode: # this is normal for the case of scenario from rupture logging.info('%d assets were discarded because too far ' 'from the rupture; use `oq show discarded` ' 'to show them and `oq plot_assets` to plot ' 'them' % len(discarded)) elif not oq.discard_assets: # raise an error self.datastore['assetcol'] = self.assetcol raise RuntimeError( '%d assets were discarded; use `oq show discarded` to' ' show them and `oq plot_assets` to plot them' % len(discarded)) if oq.inputs.get('insurance'): k, v = zip(*oq.inputs['insurance'].items()) self.load_insurance_data(k, v) return readinput.exposure def load_insurance_data(self, ins_types, ins_files): """ Read the insurance files and populate the policy_dict """ for loss_type, fname in zip(ins_types, ins_files): array = hdf5.read_csv( fname, {'insurance_limit': float, 'deductible': float, None: object}).array policy_name = array.dtype.names[0] policy_idx = getattr(self.assetcol.tagcol, policy_name + '_idx') insurance = numpy.zeros((len(policy_idx), 2)) for pol, ded, lim in array[ [policy_name, 'deductible', 'insurance_limit']]: insurance[policy_idx[pol]] = ded, lim self.policy_dict[loss_type] = insurance if self.policy_name and policy_name != self.policy_name: raise ValueError( 'The file %s contains %s as policy field, but we were ' 'expecting %s' % (fname, policy_name, self.policy_name)) else: self.policy_name = policy_name self.datastore['policy_dict'] = self.policy_dict def load_crmodel(self): # to be called before read_exposure # NB: this is called even if there is no risk model """ Read the risk models and set the attribute .crmodel. The crmodel can be empty for hazard calculations. Save the loss ratios (if any) in the datastore. """ oq = self.oqparam logging.info('Reading the risk model if present') self.crmodel = readinput.get_crmodel(oq) if not self.crmodel: parent = self.datastore.parent if 'crm' in parent: self.crmodel = riskmodels.CompositeRiskModel.read(parent, oq) return if oq.ground_motion_fields and not oq.imtls: raise InvalidFile('No intensity_measure_types specified in %s' % self.oqparam.inputs['job_ini']) self.save_params() # re-save oqparam def save_crmodel(self): """ Save the risk models in the datastore """ if len(self.crmodel): logging.info('Storing risk model') attrs = self.crmodel.get_attrs() self.datastore.create_df('crm', self.crmodel.to_dframe(), 'gzip', **attrs) def _read_risk_data(self): # read the risk model (if any), the exposure (if any) and then the # site collection, possibly extracted from the exposure. oq = self.oqparam self.load_crmodel() # must be called first if (oq.calculation_mode == 'preclassical' and 'exposure' not in oq.inputs): return elif (not oq.imtls and 'shakemap' not in oq.inputs and oq.ground_motion_fields): raise InvalidFile('There are no intensity measure types in %s' % oq.inputs['job_ini']) elif oq.hazard_calculation_id: with datastore.read(oq.hazard_calculation_id) as dstore: if 'sitecol' in dstore: haz_sitecol = dstore['sitecol'].complete else: haz_sitecol = readinput.get_site_collection( oq, self.datastore) if ('amplification' in oq.inputs and 'ampcode' not in haz_sitecol.array.dtype.names): haz_sitecol.add_col('ampcode', site.ampcode_dt) else: if 'gmfs' in oq.inputs and oq.inputs['gmfs'].endswith('.hdf5'): with hdf5.File(oq.inputs['gmfs']) as f: haz_sitecol = f['sitecol'] else: haz_sitecol = readinput.get_site_collection(oq, self.datastore) if hasattr(self, 'rup'): # for scenario we reduce the site collection to the sites # within the maximum distance from the rupture haz_sitecol, _dctx = self.cmaker.filter( haz_sitecol, self.rup) haz_sitecol.make_complete() if 'site_model' in oq.inputs: self.datastore['site_model'] = readinput.get_site_model(oq) oq_hazard = (self.datastore.parent['oqparam'] if self.datastore.parent else None) self.policy_name = '' self.policy_dict = {} if 'exposure' in oq.inputs: exposure = self.read_exposure(haz_sitecol) self.datastore['assetcol'] = self.assetcol self.datastore['cost_calculator'] = exposure.cost_calculator if hasattr(readinput.exposure, 'exposures'): self.datastore.getitem('assetcol')['exposures'] = numpy.array( exposure.exposures, hdf5.vstr) elif 'assetcol' in self.datastore.parent: assetcol = self.datastore.parent['assetcol'] if oq.region: region = wkt.loads(oq.region) self.sitecol = haz_sitecol.within(region) if oq.shakemap_id or 'shakemap' in oq.inputs or oq.shakemap_uri: self.sitecol, self.assetcol = read_shakemap( self, haz_sitecol, assetcol) self.datastore['sitecol'] = self.sitecol self.datastore['assetcol'] = self.assetcol elif hasattr(self, 'sitecol') and general.not_equal( self.sitecol.sids, haz_sitecol.sids): self.assetcol = assetcol.reduce(self.sitecol) self.datastore['assetcol'] = self.assetcol logging.info('Extracted %d/%d assets', len(self.assetcol), len(assetcol)) else: self.assetcol = assetcol if ('site_id' in oq.aggregate_by and 'site_id' not in assetcol.tagcol.tagnames): assetcol.tagcol.add_tagname('site_id') assetcol.tagcol.site_id.extend(range(self.N)) else: # no exposure if oq.hazard_calculation_id: # read the sitecol of the child self.sitecol = readinput.get_site_collection(oq) self.datastore['sitecol'] = self.sitecol else: self.sitecol = haz_sitecol if self.sitecol and oq.imtls: logging.info('Read N=%d hazard sites and L=%d hazard levels', len(self.sitecol), oq.imtls.size) if oq_hazard: parent = self.datastore.parent if 'assetcol' in parent: check_time_event(oq, parent['assetcol'].occupancy_periods) elif oq.job_type == 'risk' and 'exposure' not in oq.inputs: raise ValueError('Missing exposure both in hazard and risk!') if oq_hazard.time_event and oq_hazard.time_event != oq.time_event: raise ValueError( 'The risk configuration file has time_event=%s but the ' 'hazard was computed with time_event=%s' % ( oq.time_event, oq_hazard.time_event)) if oq.job_type == 'risk': taxs = python3compat.decode(self.assetcol.tagcol.taxonomy) tmap = readinput.taxonomy_mapping(self.oqparam, taxs) self.crmodel.tmap = tmap taxonomies = set() for ln in oq.loss_types: for items in self.crmodel.tmap[ln]: for taxo, weight in items: if taxo != '?': taxonomies.add(taxo) # check that we are covering all the taxonomy strings in the exposure missing = taxonomies - set(self.crmodel.taxonomies) if self.crmodel and missing: raise RuntimeError('The exposure contains the taxonomy strings %s ' 'which are not in the risk model' % missing) if len(self.crmodel.taxonomies) > len(taxonomies): logging.info('Reducing risk model from %d to %d taxonomy strings', len(self.crmodel.taxonomies), len(taxonomies)) self.crmodel = self.crmodel.reduce(taxonomies) self.crmodel.tmap = tmap if hasattr(self, 'sitecol') and self.sitecol: if 'site_model' in oq.inputs: assoc_dist = (oq.region_grid_spacing * 1.414 if oq.region_grid_spacing else 5) # Graeme's 5km sm = readinput.get_site_model(oq) self.sitecol.complete.assoc(sm, assoc_dist) self.datastore['sitecol'] = self.sitecol # store amplification functions if any if 'amplification' in oq.inputs: logging.info('Reading %s', oq.inputs['amplification']) df = AmplFunction.read_df(oq.inputs['amplification']) check_amplification(df, self.sitecol) if oq.amplification_method == 'kernel': # TODO: need to add additional checks on the main calculation # methodology since the kernel method is currently tested only # for classical PSHA self.af = AmplFunction.from_dframe(df) else: self.amplifier = Amplifier(oq.imtls, df, oq.soil_intensities) # manage secondary perils sec_perils = oq.get_sec_perils() for sp in sec_perils: sp.prepare(self.sitecol) # add columns as needed if sec_perils: self.datastore['sitecol'] = self.sitecol mal = {lt: getdefault(oq.minimum_asset_loss, lt) for lt in oq.loss_types} if mal: logging.info('minimum_asset_loss=%s', mal) oq._amplifier = self.amplifier oq._sec_perils = sec_perils # compute exposure stats if hasattr(self, 'assetcol'): save_agg_values( self.datastore, self.assetcol, oq.loss_types, oq.aggregate_by) def store_rlz_info(self, rel_ruptures): """ Save info about the composite source model inside the full_lt dataset :param rel_ruptures: dictionary TRT -> number of relevant ruptures """ if hasattr(self, 'full_lt'): # no scenario self.realizations = self.full_lt.get_realizations() if not self.realizations: raise RuntimeError('Empty logic tree: too much filtering?') self.datastore['full_lt'] = self.full_lt else: # scenario self.full_lt = self.datastore['full_lt'] R = self.R logging.info('There are %d realization(s)', R) self.datastore['weights'] = arr = build_weights(self.realizations) self.datastore.set_attrs('weights', nbytes=arr.nbytes) if rel_ruptures: self.check_discardable(rel_ruptures) def check_discardable(self, rel_ruptures): """ Check if logic tree reduction is possible """ n = len(self.full_lt.sm_rlzs) keep_trts = set() nrups = [] for grp_id, trt_smrs in enumerate(self.datastore['trt_smrs']): trti, smrs = numpy.divmod(trt_smrs, n) trt = self.full_lt.trts[trti[0]] nr = rel_ruptures.get(grp_id, 0) nrups.append(nr) if nr: keep_trts.add(trt) self.datastore['est_rups_by_grp'] = U32(nrups) discard_trts = set(self.full_lt.trts) - keep_trts if discard_trts: msg = ('No sources for some TRTs: you should set\n' 'discard_trts = %s\nin %s') % ( ', '.join(discard_trts), self.oqparam.inputs['job_ini']) logging.warning(msg) def store_source_info(self, source_data): """ Save (eff_ruptures, num_sites, calc_time) inside the source_info """ if 'source_info' not in self.datastore: source_reader.create_source_info( self.csm, source_data, self.datastore.hdf5) self.csm.update_source_info(source_data) recs = [tuple(row) for row in self.csm.source_info.values()] self.datastore['source_info'][:] = numpy.array( recs, source_reader.source_info_dt) if 'trt_smrs' not in self.datastore: self.datastore['trt_smrs'] = self.csm.get_trt_smrs() self.datastore['toms'] = numpy.array( [sg.tom_name for sg in self.csm.src_groups], hdf5.vstr) def post_process(self): """For compatibility with the engine""" class RiskCalculator(HazardCalculator): """ Base class for all risk calculators. A risk calculator must set the attributes .crmodel, .sitecol, .assetcol, .riskinputs in the pre_execute phase. """ def build_riskinputs(self): """ :returns: a list of RiskInputs objects, sorted by IMT. """ logging.info('Building risk inputs from %d realization(s)', self.R) imtset = set(self.oqparam.imtls) | set(self.oqparam.get_sec_imts()) if not set(self.oqparam.risk_imtls) & imtset: rsk = ', '.join(self.oqparam.risk_imtls) haz = ', '.join(imtset) raise ValueError('The IMTs in the risk models (%s) are disjoint ' "from the IMTs in the hazard (%s)" % (rsk, haz)) if not hasattr(self.crmodel, 'tmap'): self.crmodel.tmap = readinput.taxonomy_mapping( self.oqparam, self.assetcol.tagcol.taxonomy) with self.monitor('building riskinputs'): if self.oqparam.hazard_calculation_id: dstore = self.datastore.parent else: dstore = self.datastore riskinputs = self._gen_riskinputs(dstore) assert riskinputs logging.info('Built %d risk inputs', len(riskinputs)) self.acc = None return riskinputs # used only for classical_risk and classical_damage def _gen_riskinputs(self, dstore): out = [] asset_df = self.assetcol.to_dframe('site_id') slices = performance.get_slices(dstore['_poes/sid'][:]) for sid, assets in asset_df.groupby(asset_df.index): # hcurves, shape (R, N) ws = [rlz.weight for rlz in self.realizations] getter = getters.PmapGetter( dstore, ws, slices.get(sid, []), self.oqparam.imtls) for slc in general.split_in_slices( len(assets), self.oqparam.assets_per_site_limit): out.append(riskinput.RiskInput(getter, assets[slc])) if slc.stop - slc.start >= TWO16: logging.error('There are %d assets on site #%d!', slc.stop - slc.start, sid) return out def execute(self): """ Parallelize on the riskinputs and returns a dictionary of results. Require a `.core_task` to be defined with signature (riskinputs, crmodel, param, monitor). """ if not hasattr(self, 'riskinputs'): # in the reportwriter return ct = self.oqparam.concurrent_tasks or 1 maxw = sum(ri.weight for ri in self.riskinputs) / ct self.datastore.swmr_on() smap = parallel.Starmap( self.core_task.__func__, h5=self.datastore.hdf5) smap.monitor.save('crmodel', self.crmodel) for block in general.block_splitter( self.riskinputs, maxw, get_weight, sort=True): smap.submit((block, self.oqparam)) return smap.reduce(self.combine, self.acc) def combine(self, acc, res): """ Combine the outputs assuming acc and res are dictionaries """ if res is None: raise MemoryError('You ran out of memory!') return acc + res def import_gmfs_csv(dstore, oqparam, sids): """ Import in the datastore a ground motion field CSV file. :param dstore: the datastore :param oqparam: an OqParam instance :param sids: the complete site IDs :returns: event_ids """ fname = oqparam.inputs['gmfs'] array = hdf5.read_csv(fname, {'sid': U32, 'eid': U32, None: F32}, renamedict=dict(site_id='sid', event_id='eid', rlz_id='rlzi')).array names = array.dtype.names # rlz_id, sid, ... if names[0] == 'rlzi': # backward compatibility names = names[1:] # discard the field rlzi imts = [name.lstrip('gmv_') for name in names[2:]] oqparam.hazard_imtls = {imt: [0] for imt in imts} missing = set(oqparam.imtls) - set(imts) if missing: raise ValueError('The calculation needs %s which is missing from %s' % (', '.join(missing), fname)) imt2idx = {imt: i for i, imt in enumerate(oqparam.imtls)} arr = numpy.zeros(len(array), oqparam.gmf_data_dt()) for name in names: if name.startswith('gmv_'): try: m = imt2idx[name[4:]] except KeyError: # the file contains more than enough IMTs pass else: arr[f'gmv_{m}'][:] = array[name] else: arr[name] = array[name] n = len(numpy.unique(array[['sid', 'eid']])) if n != len(array): raise ValueError('Duplicated site_id, event_id in %s' % fname) # store the events eids = numpy.unique(array['eid']) eids.sort() if eids[0] != 0: raise ValueError('The event_id must start from zero in %s' % fname) E = len(eids) events = numpy.zeros(E, rupture.events_dt) events['id'] = eids logging.info('Storing %d events, all relevant', E) dstore['events'] = events # store the GMFs dic = general.group_array(arr, 'sid') offset = 0 gmvlst = [] for sid in sids: n = len(dic.get(sid, [])) if n: offset += n gmvs = dic[sid] gmvlst.append(gmvs) data = numpy.concatenate(gmvlst) data.sort(order='eid') create_gmf_data(dstore, oqparam.get_primary_imtls(), oqparam.get_sec_imts(), data=data) dstore['weights'] = numpy.ones(1) return eids def _getset_attrs(oq): # read effective_time, num_events and imts from oq.inputs['gmfs'] # if the format of the file is old (v3.11) also sets the attributes # investigation_time and ses_per_logic_tree_path on `oq` with hdf5.File(oq.inputs['gmfs'], 'r') as f: attrs = f['gmf_data'].attrs etime = attrs.get('effective_time') num_events = attrs.get('num_events') if etime is None: # engine == 3.11 R = len(f['weights']) num_events = len(f['events']) arr = f.getitem('oqparam') it = arr['par_name'] == b'investigation_time' it = float(arr[it]['par_value'][0]) oq.investigation_time = it ses = arr['par_name'] == b'ses_per_logic_tree_path' ses = int(arr[ses]['par_value'][0]) oq.ses_per_logic_tree_path = ses etime = it * ses * R imts = [] for name in arr['par_name']: if name.startswith(b'hazard_imtls.'): imts.append(name[13:].decode('utf8')) else: # engine >= 3.12 imts = attrs['imts'].split() return dict(effective_time=etime, num_events=num_events, imts=imts) def import_gmfs_hdf5(dstore, oqparam): """ Import in the datastore a ground motion field HDF5 file. :param dstore: the datastore :param oqparam: an OqParam instance :returns: event_ids """ dstore['gmf_data'] = h5py.ExternalLink(oqparam.inputs['gmfs'], "gmf_data") attrs = _getset_attrs(oqparam) oqparam.hazard_imtls = {imt: [0] for imt in attrs['imts']} # store the events E = attrs['num_events'] events = numpy.zeros(E, rupture.events_dt) events['id'] = numpy.arange(E) rel = numpy.unique(dstore['gmf_data/eid']) logging.info('Storing %d events, %d relevant', E, len(rel)) dstore['events'] = events dstore['weights'] = numpy.ones(1) return events['id'] def create_gmf_data(dstore, prim_imts, sec_imts=(), data=None): """ Create and possibly populate the datasets in the gmf_data group """ oq = dstore['oqparam'] R = dstore['full_lt'].get_num_rlzs() M = len(prim_imts) n = 0 if data is None else len(data['sid']) items = [('sid', U32 if n == 0 else data['sid']), ('eid', U32 if n == 0 else data['eid'])] for m in range(M): col = f'gmv_{m}' items.append((col, F32 if data is None else data[col])) for imt in sec_imts: items.append((str(imt), F32 if n == 0 else data[imt])) if oq.investigation_time: eff_time = oq.investigation_time * oq.ses_per_logic_tree_path * R else: eff_time = 0 dstore.create_df('gmf_data', items, 'gzip') dstore.set_attrs('gmf_data', num_events=len(dstore['events']), imts=' '.join(map(str, prim_imts)), effective_time=eff_time) if data is not None: df = pandas.DataFrame(dict(items)) avg_gmf = numpy.zeros((2, n, M + len(sec_imts)), F32) for sid, df in df.groupby(df.sid): df.pop('eid') df.pop('sid') avg_gmf[:, sid] = stats.avg_std(df.to_numpy()) dstore['avg_gmf'] = avg_gmf def save_agg_values(dstore, assetcol, lossnames, aggby): """ Store agg_keys, agg_values. :returns: the aggkey dictionary key -> tags """ lst = [] aggkey = assetcol.tagcol.get_aggkey(aggby) if aggby: logging.info('Storing %d aggregation keys', len(aggkey)) dt = [(name + '_', U16) for name in aggby] + [ (name, hdf5.vstr) for name in aggby] kvs = [] for key, val in aggkey.items(): val = tuple(python3compat.decode(val)) kvs.append(key + val) lst.append(' '.join(val)) dstore['agg_keys'] = numpy.array(kvs, dt) if aggby == ['id']: kids = assetcol['ordinal'] elif aggby == ['site_id']: kids = assetcol['site_id'] else: key2i = {key: i for i, key in enumerate(aggkey)} kids = [key2i[tuple(t)] for t in assetcol[aggby]] if 'assetcol' not in set(dstore): dstore['assetcol'] = assetcol grp = dstore.getitem('assetcol') if 'kids' not in grp: grp['kids'] = U16(kids) lst.append('*total*') if assetcol.get_value_fields(): dstore['agg_values'] = assetcol.get_agg_values(aggby) dstore.set_shape_descr('agg_values', aggregation=lst) return aggkey if aggby else {} def read_shakemap(calc, haz_sitecol, assetcol): """ Enabled only if there is a shakemap_id parameter in the job.ini. Download, unzip, parse USGS shakemap files and build a corresponding set of GMFs which are then filtered with the hazard site collection and stored in the datastore. """ oq = calc.oqparam E = oq.number_of_ground_motion_fields imtls = oq.imtls or calc.datastore.parent['oqparam'].imtls oq.risk_imtls = {imt: list(imls) for imt, imls in imtls.items()} logging.info('Getting/reducing shakemap') with calc.monitor('getting/reducing shakemap'): # for instance for the test case_shakemap the haz_sitecol # has sids in range(0, 26) while sitecol.sids is # [8, 9, 10, 11, 13, 15, 16, 17, 18]; # the total assetcol has 26 assets on the total sites # and the reduced assetcol has 9 assets on the reduced sites if oq.shakemap_id: uridict = {'kind': 'usgs_id', 'id': oq.shakemap_id} elif 'shakemap' in oq.inputs: uridict = {'kind': 'file_npy', 'fname': oq.inputs['shakemap']} else: uridict = oq.shakemap_uri sitecol, shakemap, discarded = get_sitecol_shakemap( uridict.pop('kind'), uridict, oq.imtls, haz_sitecol, oq.asset_hazard_distance['default']) if len(discarded): calc.datastore['discarded'] = discarded assetcol.reduce_also(sitecol) logging.info('Extracted %d assets', len(assetcol)) # assemble dictionary to decide on the calculation method for the gmfs if 'MMI' in oq.imtls: # calculations with MMI should be executed if len(oq.imtls) == 1: # only MMI intensities if oq.spatial_correlation != 'no' or oq.cross_correlation != 'no': logging.warning('Calculations with MMI intensities do not ' 'support correlation. No correlations ' 'are applied.') gmf_dict = {'kind': 'mmi'} else: # there are also other intensities than MMI raise RuntimeError( 'There are the following intensities in your model: %s ' 'Models mixing MMI and other intensities are not supported. ' % ', '.join(oq.imtls.keys())) else: # no MMI intensities, calculation with or without correlation if oq.spatial_correlation != 'no' or oq.cross_correlation != 'no': # cross correlation and/or spatial correlation after S&H gmf_dict = {'kind': 'Silva&Horspool', 'spatialcorr': oq.spatial_correlation, 'crosscorr': oq.cross_correlation, 'cholesky_limit': oq.cholesky_limit} else: # no correlation required, basic calculation is faster gmf_dict = {'kind': 'basic'} logging.info('Building GMFs') with calc.monitor('building/saving GMFs'): imts, gmfs = to_gmfs(shakemap, gmf_dict, oq.site_effects, oq.truncation_level, E, oq.random_seed, oq.imtls) N, E, M = gmfs.shape events = numpy.zeros(E, rupture.events_dt) events['id'] = numpy.arange(E, dtype=U32) calc.datastore['events'] = events # convert into an array of dtype gmv_data_dt lst = [(sitecol.sids[s], ei) + tuple(gmfs[s, ei]) for ei, event in enumerate(events) for s in numpy.arange(N, dtype=U32)] oq.hazard_imtls = {str(imt): [0] for imt in imts} data = numpy.array(lst, oq.gmf_data_dt()) create_gmf_data(calc.datastore, imts, data=data) return sitecol, assetcol def create_risk_by_event(calc): """ Created an empty risk_by_event with keys event_id, agg_id, loss_id and fields for damages, losses and consequences """ oq = calc.oqparam dstore = calc.datastore aggkey = getattr(calc, 'aggkey', {}) # empty if not aggregate_by crmodel = calc.crmodel if 'risk' in oq.calculation_mode: fields = [('loss', F32)] if calc.policy_dict: fields.append(('ins_loss', F32)) descr = [('event_id', U32), ('agg_id', U32), ('loss_id', U8), ('variance', F32)] + fields dstore.create_df('risk_by_event', descr, K=len(aggkey), L=len(oq.loss_types)) else: # damage + consequences dmgs = ' '.join(crmodel.damage_states[1:]) descr = ([('event_id', U32), ('agg_id', U32), ('loss_id', U8)] + [(dc, F32) for dc in crmodel.get_dmg_csq()]) dstore.create_df('risk_by_event', descr, K=len(aggkey), L=len(oq.loss_types), limit_states=dmgs)
gem/oq-engine
openquake/calculators/base.py
Python
agpl-3.0
54,542
"""Config flow for the Abode Security System component.""" from __future__ import annotations from http import HTTPStatus from typing import Any, cast from abodepy import Abode from abodepy.exceptions import AbodeAuthenticationException, AbodeException from abodepy.helpers.errors import MFA_CODE_REQUIRED from requests.exceptions import ConnectTimeout, HTTPError import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.data_entry_flow import FlowResult from .const import CONF_POLLING, DEFAULT_CACHEDB, DOMAIN, LOGGER CONF_MFA = "mfa_code" class AbodeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Config flow for Abode.""" VERSION = 1 def __init__(self) -> None: """Initialize.""" self.data_schema = { vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str, } self.mfa_data_schema = { vol.Required(CONF_MFA): str, } self._cache: str | None = None self._mfa_code: str | None = None self._password: str | None = None self._polling: bool = False self._username: str | None = None async def _async_abode_login(self, step_id: str) -> FlowResult: """Handle login with Abode.""" self._cache = self.hass.config.path(DEFAULT_CACHEDB) errors = {} try: await self.hass.async_add_executor_job( Abode, self._username, self._password, True, False, False, self._cache ) except AbodeException as ex: if ex.errcode == MFA_CODE_REQUIRED[0]: return await self.async_step_mfa() LOGGER.error("Unable to connect to Abode: %s", ex) if ex.errcode == HTTPStatus.BAD_REQUEST: errors = {"base": "invalid_auth"} else: errors = {"base": "cannot_connect"} except (ConnectTimeout, HTTPError): errors = {"base": "cannot_connect"} if errors: return self.async_show_form( step_id=step_id, data_schema=vol.Schema(self.data_schema), errors=errors ) return await self._async_create_entry() async def _async_abode_mfa_login(self) -> FlowResult: """Handle multi-factor authentication (MFA) login with Abode.""" try: # Create instance to access login method for passing MFA code abode = Abode( auto_login=False, get_devices=False, get_automations=False, cache_path=self._cache, ) await self.hass.async_add_executor_job( abode.login, self._username, self._password, self._mfa_code ) except AbodeAuthenticationException: return self.async_show_form( step_id="mfa", data_schema=vol.Schema(self.mfa_data_schema), errors={"base": "invalid_mfa_code"}, ) return await self._async_create_entry() async def _async_create_entry(self) -> FlowResult: """Create the config entry.""" config_data = { CONF_USERNAME: self._username, CONF_PASSWORD: self._password, CONF_POLLING: self._polling, } existing_entry = await self.async_set_unique_id(self._username) if existing_entry: self.hass.config_entries.async_update_entry( existing_entry, data=config_data ) # Reload the Abode config entry otherwise devices will remain unavailable self.hass.async_create_task( self.hass.config_entries.async_reload(existing_entry.entry_id) ) return self.async_abort(reason="reauth_successful") return self.async_create_entry( title=cast(str, self._username), data=config_data ) async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is None: return self.async_show_form( step_id="user", data_schema=vol.Schema(self.data_schema) ) self._username = user_input[CONF_USERNAME] self._password = user_input[CONF_PASSWORD] return await self._async_abode_login(step_id="user") async def async_step_mfa( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a multi-factor authentication (MFA) flow.""" if user_input is None: return self.async_show_form( step_id="mfa", data_schema=vol.Schema(self.mfa_data_schema) ) self._mfa_code = user_input[CONF_MFA] return await self._async_abode_mfa_login() async def async_step_reauth(self, config: dict[str, Any]) -> FlowResult: """Handle reauthorization request from Abode.""" self._username = config[CONF_USERNAME] return await self.async_step_reauth_confirm() async def async_step_reauth_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle reauthorization flow.""" if user_input is None: return self.async_show_form( step_id="reauth_confirm", data_schema=vol.Schema( { vol.Required(CONF_USERNAME, default=self._username): str, vol.Required(CONF_PASSWORD): str, } ), ) self._username = user_input[CONF_USERNAME] self._password = user_input[CONF_PASSWORD] return await self._async_abode_login(step_id="reauth_confirm")
rohitranjan1991/home-assistant
homeassistant/components/abode/config_flow.py
Python
mit
5,964
import random import sys n = int(sys.argv[1]) tst = [] for ix in range(n): x = 0 while x == 0: x = random.randrange(-10000, 10001) tst.append(x) print n, n print ' '.join(map(str, tst))
pbl64k/HackerRank-Contests
2014-10-10-FP/OrderExercises/gen_test.py
Python
bsd-2-clause
211
from distutils.core import setup import py2exe setup(console=['daemon.py'])
IngenuityEngine/daemon
setup.py
Python
mit
77
from PyQt5.Qt import QObject from BookManipulation.FolderKeeper import FolderKeeper FIRST_CSS_NAME = "Style0001.css" FIRST_SVG_NAME = "Image0001.svg" PLACEHOLDER_TEXT = "PLACEHOLDER" EMPTY_HTML_FILE = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \ "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\"\n" \ " \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">\n\n" \ "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" \ "<head>\n" \ "<title></title>\n" \ "</head>\n" \ "<body>\n" \ "<p>&#160;</p>\n" \ "</body>\n" \ "</html>" SGC_TOC_CSS_FILE = "div.sgc-toc-title {\n" \ " font-size: 2em;\n" \ " font-weight: bold;\n" \ " margin-bottom: 1em;\n" \ " text-align: center;\n" \ "}\n\n" \ "div.sgc-toc-level-1 {\n" \ " margin-left: 0em;\n" \ "}\n\n" \ "div.sgc-toc-level-2 {\n" \ " margin-left: 2em;\n" \ "}\n\n" \ "div.sgc-toc-level-3 {\n" \ " margin-left: 2em;\n" \ "}\n\n" \ "div.sgc-toc-level-4 {\n" \ " margin-left: 2em;\n" \ "}\n\n" \ "div.sgc-toc-level-5 {\n" \ " margin-left: 2em;\n" \ "}\n\n" \ "div.sgc-toc-level-6 {\n" \ " margin-left: 2em;\n" \ "}\n" SGC_INDEX_CSS_FILE = "div.sgc-index-title {\n" \ " font-size: 2em;\n" \ " font-weight: bold;\n" \ " margin-bottom: 1em;\n" \ " text-align: center;\n" \ "}\n\n" \ "div.sgc-index-body {\n" \ " margin-left: -2em;\n" \ "}\n\n" \ "div.sgc-index-entry {\n" \ " margin-top: 0em;\n" \ " margin-bottom: 0.5em;\n" \ " margin-left: 3.5em;\n" \ " text-indent: -1.5em;\n" \ "}\n\n" \ "div.sgc-index-new-letter {\n" \ " margin-top: 1.5em;\n" \ " margin-left: 1.3em;\n" \ " margin-bottom: 0.5em;\n" \ " font-size: 1.5em;\n" \ " font-weight: bold;\n" \ " border-bottom: solid black 4px;\n" \ " width: 50%;\n" \ "}\n" HTML_COVER_SOURCE = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>\n" \ "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\"\n" \ "\"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">\n" \ "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" \ "<head>\n" \ " <title>Cover</title>\n" \ "</head>\n" \ "" \ "<body>\n" \ " <div style=\"text-align: center; padding: 0pt; margin: 0pt;\">\n" \ " <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"100%\" preserveAspectRatio=\"xMidYMid meet\" version=\"1.1\" viewBox=\"0 0 SGC_IMAGE_WIDTH SGC_IMAGE_HEIGHT\" width=\"100%\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n" \ " <image width=\"SGC_IMAGE_WIDTH\" height=\"SGC_IMAGE_HEIGHT\" xlink:href=\"SGC_IMAGE_FILENAME\"/>\n" \ " </svg>\n" \ " </div>\n" \ "</body>\n" \ "</html>\n" ''' Represents the book loaded in the current MainWindow instance of tolo. The book's resources are accessed through the FolderKeeper instance. ''' class Book(QObject): def __init__(self, parent=None): QObject.__init__(self, parent) # The FolderKeeper object that represents # this book's presence on the hard drive. self.m_Mainfolder = FolderKeeper(self) # Stores the modified state of the book. self.m_IsModified = False
tcp813/tolo
BookManipulation/Book.py
Python
mit
3,311
# ------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ------------------------------------------------------------- import unittest import numpy as np from systemds.context import SystemDSContext class TestSource_DefaultValues(unittest.TestCase): sds: SystemDSContext = None src_path: str = "./tests/source/source_with_default_values.dml" @classmethod def setUpClass(cls): cls.sds = SystemDSContext() @classmethod def tearDownClass(cls): cls.sds.close() def test_01(self): s = self.sds.source(self.src_path,"test") c = s.d() res = c.compute() self.assertEqual(4.2,res) def test_02(self): s = self.sds.source(self.src_path,"test") c = s.d(a=self.sds.scalar(5)) res = c.compute() self.assertEqual(5,res) def test_03(self): s = self.sds.source(self.src_path,"test") c = s.d(a=5) res = c.compute() self.assertEqual(5,res) def test_04(self): s = self.sds.source(self.src_path,"test") c = s.d(c=False) res = c.compute() self.assertEqual(10,res) def test_05(self): s = self.sds.source(self.src_path,"test") c = s.d(b = 1, c=False) res = c.compute() self.assertEqual(1,res) if __name__ == "__main__": unittest.main(exit=False)
apache/incubator-systemml
src/main/python/tests/source/test_source_with_default_values.py
Python
apache-2.0
2,157
#!/usr/bin/env python2.6 import sys import boto from boto.s3.key import Key if len(sys.argv) < 3: print "Not enough arguments" print "%s <bucket> <prefix>" % sys.argv[0] sys.exit(1) s3 = boto.connect_s3() bucket = s3.get_bucket(sys.argv[1]) for k in bucket.list(sys.argv[2]): if k.name not in [sys.argv[2]]: bucket.delete_key(k.name)
markusd/gpgpu
aws/delete-s3-keys.py
Python
gpl-3.0
361
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2017-02-21 17:04 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('index', '0012_cartridgeitemname_manufacturer'), ] operations = [ migrations.AlterField( model_name='cartridgeitemname', name='manufacturer', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='common.Manufacturer'), ), ]
sfcl/ancon
index/migrations/0013_auto_20170221_2204.py
Python
gpl-2.0
578