code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# lock.py
# DNF Locking Subsystem.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.exceptions import ProcessLockError, ThreadLockError, LockError
from dnf.i18n import _
from dnf.yum import misc
import dnf.logging
import dnf.util
import errno
import hashlib
import logging
import os
import threading
import time
logger = logging.getLogger("dnf")
def _fit_lock_dir(dir_):
if not dnf.util.am_i_root():
# for regular users the best we currently do is not to clash with
# another DNF process of the same user. Since dir_ is quite definitely
# not writable for us, yet significant, use its hash:
hexdir = hashlib.md5(dir_.encode('utf-8')).hexdigest()
dir_ = os.path.join(misc.getCacheDir(), 'locks', hexdir)
return dir_
def build_download_lock(cachedir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'download_lock.pid'),
'cachedir', not exit_on_lock)
def build_metadata_lock(cachedir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'metadata_lock.pid'),
'metadata', not exit_on_lock)
def build_rpmdb_lock(persistdir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(persistdir), 'rpmdb_lock.pid'),
'RPMDB', not exit_on_lock)
class ProcessLock(object):
def __init__(self, target, description, blocking=False):
self.blocking = blocking
self.count = 0
self.description = description
self.target = target
self.thread_lock = threading.RLock()
def _lock_thread(self):
if not self.thread_lock.acquire(blocking=False):
msg = '%s already locked by a different thread' % self.description
raise ThreadLockError(msg)
self.count += 1
def _try_lock(self):
pid = str(os.getpid()).encode('utf-8')
try:
fd = os.open(self.target, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o644)
os.write(fd, pid)
os.close(fd)
return True
except OSError as e:
if e.errno == errno.EEXIST: # File exists
return False
raise
def _try_read_lock(self):
try:
with open(self.target, 'r') as f:
return int(f.readline())
except IOError:
return -1
except ValueError:
time.sleep(2)
try:
with open(self.target, 'r') as f:
return int(f.readline())
except IOError:
return -1
except ValueError:
msg = _('Malformed lock file found: %s.\n'
'Ensure no other dnf process is running and '
'remove the lock file manually or run '
'systemd-tmpfiles --remove dnf.conf.') % (self.target)
raise LockError(msg)
def _try_unlink(self):
try:
os.unlink(self.target)
return True
except OSError:
return False
def _unlock_thread(self):
self.count -= 1
self.thread_lock.release()
def __enter__(self):
dnf.util.ensure_dir(os.path.dirname(self.target))
self._lock_thread()
inform = True
prev_pid = 0
while not self._try_lock():
pid = self._try_read_lock()
if pid == -1:
# already removed by other process
continue
if pid == os.getpid():
# already locked by this process
return
if not os.access('/proc/%d/stat' % pid, os.F_OK):
# locked by a dead process
self._try_unlink()
continue
if not self.blocking:
self._unlock_thread()
msg = '%s already locked by %d' % (self.description, pid)
raise ProcessLockError(msg, pid)
if inform or prev_pid != pid:
msg = _('Waiting for process with pid %d to finish.') % (pid)
logger.info(msg)
inform = False
prev_pid = pid
time.sleep(2)
def __exit__(self, *exc_args):
if self.count == 1:
os.unlink(self.target)
self._unlock_thread()
|
j-mracek/dnf
|
dnf/lock.py
|
Python
|
gpl-2.0
| 5,332
|
DEFAULT_CONFIG = {
# Protector server address
'host': 'localhost',
'port': 8888,
# Connection to the time series database API
'backend_host': 'localhost',
'backend_port': 8086,
'rules': [
'prevent_delete',
'prevent_drop',
'series_endswith_dot',
'short_series_name',
'too_many_datapoints',
'negative_groupby_statement'
],
# Queries for series names matching one of
# the following patterns are always executed
# without any checking
'whitelist': [],
# Run in foreground?
'foreground': False,
# Default PID file location
'pidfile': '/var/run/protector.pid',
'logfile': '/var/log/protector.log',
# Smallest possible system date.
# This is required for the calculation of the max duration between datetime objects.
# We use the release day of InfluxDB 0.8 as the epoch by default
# because it is the first official version supported.
# You can overwrite it with this parameter, though:
'epoch': None,
'configfile': None,
'c': None,
'verbose': 0,
'v': 0,
}
|
trivago/Protector
|
protector/config/default_config.py
|
Python
|
bsd-3-clause
| 1,108
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('here', '0018_post_postid'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='postid',
),
]
|
chenjunqian/here
|
HereServer-deprecated/here/migrations/0019_remove_post_postid.py
|
Python
|
gpl-2.0
| 340
|
from __future__ import division
import numpy as np
# Non-monotonic Sobol G Function (8 parameters)
# First-order indices:
# x1: 0.7165
# x2: 0.1791
# x3: 0.0237
# x4: 0.0072
# x5-x8: 0.0001
def evaluate(values, a=None):
if type(values) != np.ndarray:
raise TypeError("The argument `values` must be a numpy ndarray")
if a is None:
a = [0, 1, 4.5, 9, 99, 99, 99, 99]
ltz = values < 0
gto = values > 1
if ltz.any() == True:
raise ValueError("Sobol G function called with values less than zero")
elif gto.any() == True:
raise ValueError("Sobol G function called with values greater than one")
Y = np.ones([values.shape[0]])
len_a = len(a)
for i, row in enumerate(values):
for j in range(len_a):
x = row[j]
a_j = a[j]
Y[i] *= (np.abs(4 * x - 2) + a_j) / (1 + a_j)
return Y
def partial_first_order_variance(a=None):
if a is None:
a = [0, 1, 4.5, 9, 99, 99, 99, 99]
a = np.array(a)
return np.divide(1, np.multiply(3, np.square(1 + a)))
def total_variance(a=None):
if a is None:
a = [0, 1, 4.5, 9, 99, 99, 99, 99]
a = np.array(a)
return np.add(-1, np.product(1 + partial_first_order_variance(a), axis=0))
def sensitivity_index(a):
a = np.array(a)
return np.divide(partial_first_order_variance(a), total_variance(a))
def total_sensitivity_index(a):
a = np.array(a)
pv = partial_first_order_variance(a)
tv = total_variance(a)
sum_pv = pv.sum(axis=0)
return np.subtract(1, np.divide(np.subtract(sum_pv, pv.T), tv))
|
willu47/SALib
|
src/SALib/test_functions/Sobol_G.py
|
Python
|
mit
| 1,685
|
###################################################################################
## MODULE : spell.lib.adapter.constants.__init__
## DATE : Mar 18, 2011
## PROJECT : SPELL
## DESCRIPTION: Module initialization
## --------------------------------------------------------------------------------
##
## Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.A.R.L.
##
## This file is part of SPELL.
##
## This component is free software: you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation, either
## version 3 of the License, or (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License and GNU General Public License (to which the GNU Lesser
## General Public License refers) along with this library.
## If not, see <http://www.gnu.org/licenses/>.
##
###################################################################################
|
Spacecraft-Code/SPELL
|
src/spell/spell/lib/adapter/constants/__init__.py
|
Python
|
lgpl-3.0
| 1,267
|
# -*- coding: utf-8 -*-
# Author: Jan Paricka
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h9w5qat*mdxuwjrn+z3=$s3c_f1_Jan_Paricka_mcz+@#u%^z24jdkdc5b!=$'
ADMINS = (
('Jan Paricka', 'jparicka@gmail.com'),
('Ondrej Kopal', 'koon@email.cz'),
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, '..', 'templates')),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
#CACHES = {
# 'default': {
# 'BACKEND': 'redis_cache.RedisCache',
# 'LOCATION': '/var/run/redis/redis.sock',
# },
#}
#SESSION_ENGINE = 'redis_sessions.session'
#SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = '/var/run/redis/redis.sock'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# global templatetags
'cloudly',
# views
'amazon',
'dashboard',
'userprofile',
'vms',
'admin',
)
MIDDLEWARE_CLASSES = (
#'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'cloudly.urls'
WSGI_APPLICATION = 'cloudly.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cloudly',
'USER': 'root',
'PASSWORD': 'cloudly',
'HOST': '',
'PORT': '',
}
}
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, 'templates')),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..', 'static'))
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
STATICFILES_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, 'static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_ROOT = 'media'
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
followyourheart/cloudly
|
cloudly/settings.py
|
Python
|
mit
| 3,381
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TopoDelProp
A QGIS plugin
TopoDelProp
-------------------
begin : 2011-12-19
copyright : (C) 2011 by J. Gaspar Mora Navarro
email : topodelprop@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
"""
Formulario guardar datos de las fincas, incluida la geometría.
Hereda de ctrIntrodDatos_NGeom.
@author: J. Gaspar Mora Navarro.
@organization: Universidad Politécnica de Valencia. Dep Ing Cart. Geod. y Fotogrametria
@contact: topodelprop@gmail.com
@version: 0.1
@summary: Formulario guardar datos de las fincas, incluida la geometría.
Hereda de ctrIntrodDatos_NGeom.
"""
import sys
from ctrIntrodDatos_N import ctrIntrodDatos_N
from ctrIntrodDatos_NGeom import ctrIntrodDatos_NGeom
from PyQt4 import QtGui
"""
sys.path.append("C:\eclipse\plugins\org.python.pydev.debug_2.3.0.2011121518\pysrc")
from pydevd import *
"""
class ctrIntrodDatos_NFinca(ctrIntrodDatos_NGeom):
"""
Añade fincas a la base de datos. Este controlador añade dos propiedades nuevas
a ctrIntrodDatos_NGeom:
- self.tipoFinca: Puede ser Rustica o Urbana
- self.dlgTipoFinca: cuadro de dialogo para la tabla ref_cat_rus o ref_cat_urb,
según sea self.tipoFinca.
Dependiendo de si el usuario introduce en el cuadro de dialogo de los datos de la finca,
en el campo tipo_finca_Cat, Rustica o Urbana, se crea el cuadro adecuado para la
introducción en una u otra tabla.
"""
#constructor
def __init__(self, oUtiles,tabla,listaSubDirDescargas,mostrarBttNuevo=False,dicValoresAdd=None, geomWkt=None,esMulti=False):
"""
La documentación de este constructor es la misma que la del constructor de la clase
ctrIntrodDatos_NGeom.
"""
ctrIntrodDatos_NGeom.__init__(self, oUtiles,tabla,listaSubDirDescargas,mostrarBttNuevo,dicValoresAdd, geomWkt,esMulti)
self.dlgTipoFinca=None
self.tipoFinca=None
def guarda(self):
"""
Guarda los datos de la finca y crea el cuadro de diálogo para introducir los datos
catastrales de rústica o urbana, y lo almacena en self.dlgTipoFinca.
Si el usuario cambia la finca de rústica a urbana, o viceversa, hay que borrar los datos
anticuados en la tabla ref_cat_rus o ref_cat_urb. Esto debe hacerse desde un disparador
en la base de datos.
"""
if self.getModo()=="nuevo":
self.borra_overlaps()#borro antes de guardar, porque es al guardar cuando
#se dibujan los gaps, si no hay superposicion
ctrIntrodDatos_NGeom.guarda(self)
if self.estadoGuardado!="guardado":
self.dibuja_overlaps()#comprueba que el problema fue una superposicion
#y la dibuja
self.tipoFinca=self.sacaTipoFinca()
self.set_dlgTipoFinca()
else:
resp=self.cargaDicValoresCompleto_delFormulario(True)
if resp==False:
return
tipo_finca=self.sacaTipoFinca()
if self.tipoFinca==tipo_finca:
#no ha cambiado el tipo de Finca
ctrIntrodDatos_NGeom.guarda(self)
else:
#ha cambiado el tipo de Finca
mens=unicode("Ha cambiado el tipo de finca catastral de " + self.tipoFinca + " a " + tipo_finca, "utf-8")
mens1=unicode(". Esto implica borrar los datos catastrales anteriores.","utf-8")
mens2=unicode(" ¿Seguro que desea continuar?","utf-8")
mens=mens + mens1 + mens2
reply = QtGui.QMessageBox.question(self, "Advertencia", mens, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
#hay que cepillarse los anteriores datos y el anterior cuadro de dialogo
#los anteriores datos se eliminan con un disparador en la base de datos.
#cuando cambia el tipo de Finca, se borran los datos anteriores en la tabla
#del tipo de Finca anterior
self.dlgTipoFinca.bttBorrar(darMens=False)
ctrIntrodDatos_NGeom.guarda(self)
self.tipoFinca=tipo_finca
self.set_dlgTipoFinca()
else:
self.ui.lbEstado.setText("Establezca el tipo de finca catastral a " + self.tipoFinca)
return
def sacaTipoFinca(self):
tipo_finca=self.dicValoresCompleto.get("tipo_finca_catastral")
if tipo_finca==unicode("Rústica","utf-8"):
return "Rustica"
elif tipo_finca==unicode("Urbana","utf-8"):
return "Urbana"
else:
return False
def set_tipoFinca(self):
self.tipoFinca=self.sacaTipoFinca()
def set_dlgTipoFinca(self, cargarDeBda=False):
"""
Comprueba que no se haya cambiado el tipo de Finca. En tal caso, se eliminan
los datos anteriores de la base de datos y se muestra un cuadro de dialogo nuevo.
para que añada el gid del Finca que describe
"""
gid=self.dicValoresCompleto.get("gid")
dicValoresAdd={}
dicValoresAdd["gid_finca"]=gid
if self.tipoFinca=="Rustica":
nomTabla=self.oUtiles.get_nomTabla("ref_cat_rus")
elif self.tipoFinca=="Urbana":
nomTabla=self.oUtiles.get_nomTabla("ref_cat_urb")
else:
self.dlgTipoFinca=None
return False
if cargarDeBda==False:
self.dlgTipoFinca=ctrIntrodDatos_N(self.oUtiles,nomTabla,self.listaSubDirDescargas,mostrarBttNuevo=True, dicValoresAdd=dicValoresAdd)
else:
dlg=ctrIntrodDatos_N(self.oUtiles,nomTabla,self.listaSubDirDescargas,mostrarBttNuevo=True,dicValoresAdd=dicValoresAdd)
resp=dlg.setModoConsultar(mostrarBttNuevo=False, dicValoresCompleto=None, dicCondiciones=dicValoresAdd)
self.dlgTipoFinca=dlg
return resp
def get_dlgTipoFinca(self):
return self.dlgTipoFinca
def get_tipoFinca(self):
return self.tipoFinca
def dibuja_overlaps(self):
"""
Dibuja la parte de solape de la finca que se intenta insertar
en la capa ed_overlaps_fincas o overlaps_fincas.
Este programa no envia la geometria en el modo editar, por lo que no es
necesario programar nada en ese modo.
Antes de insertar el solape en la capa gaps, borra lo que había antes para el id_trabajo
actual, por si se habia intentado insertar antes una geometria con solape, en cuyo caso
habria un poligono antiguo del error del anterior intento de insercion
"""
mens=self.ui.lbEstado.text()
if "Error de superposicion." in mens:
#script.comprueba_overlaps_wkt(geom_wkt varchar, epsg varchar,
#nom_tabla_comprobar varchar,nom_tabla_overlaps varchar,
#nom_campo varchar,valor_campo integer, VariaDic gid_excluir integer[]) RETURNS boolean AS $$
nomTabla=self.nomTabla.split(".")[1]
nomTablaComprobar="src" + str(self.oUtiles.src_trabajo) + "." + "fincas"
dicCondWhere={}
dicCondWhere["id_trabajo"]=self.oUtiles.id_trabajo
if nomTabla == "ed_fincas":
nomTablaOverlaps="ed_src" + str(self.oUtiles.src_trabajo) + "." + "ed_overlaps_fincas"
self.oUtiles.oConectaPg.cursor.callproc("script.comprueba_overlaps_wkt",[self.geomWkt,str(self.oUtiles.src_trabajo),nomTablaComprobar,nomTablaOverlaps,"id_trabajo",self.oUtiles.id_trabajo,-1])
else:
nomTablaOverlaps="src" + str(self.oUtiles.src_trabajo) + "." + "overlaps_fincas"
self.oUtiles.oConectaPg.cursor.callproc("script.comprueba_overlaps_wkt",[self.geomWkt,str(self.oUtiles.src_trabajo),nomTablaComprobar,nomTablaOverlaps,"id_trabajo",self.oUtiles.id_trabajo,-1])
self.oUtiles.oConectaPg.conn.commit()
def borra_overlaps(self):
"""
borra la parte de solape de la finca y los gaps, si los habia
para ese trabajo.
Los overlaps se dibujan desde self.dibuja_overlaps. Pero los gaps,
los dibuja directamente el disparador que hay sobre la tabla fincas
geom_fincas_def, o geom_fincas_ed
"""
nomTabla=self.nomTabla.split(".")[1]
dicCondWhere={}
dicCondWhere["id_trabajo"]=self.oUtiles.id_trabajo
if nomTabla == "ed_fincas":
nomTablaOverlaps="ed_src" + str(self.oUtiles.src_trabajo) + "." + "ed_overlaps_fincas"
nomTablaGaps="ed_src" + str(self.oUtiles.src_trabajo) + "." + "ed_gaps_fincas"
else:
nomTablaOverlaps="src" + str(self.oUtiles.src_trabajo) + "." + "overlaps_fincas"
nomTablaGaps="src" + str(self.oUtiles.src_trabajo) + "." + "gaps_fincas"
self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaOverlaps,dicCondWhere=dicCondWhere)
self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaGaps,dicCondWhere=dicCondWhere)
|
gasparmoranavarro/TopoDelProp
|
ctr/ctrIntrodDatos_NFinca.py
|
Python
|
gpl-2.0
| 10,273
|
#!/usr/bin/env python
"""
dirac-my-great-script
This script prints out how great is it, shows raw queries and sets the
number of pings.
Usage:
dirac-my-great-script [option|cfgfile] <Arguments>
Arguments:
<service1> [<service2> ...]
"""
from DIRAC import S_OK, S_ERROR, gLogger, exit as DIRACExit
from DIRAC.Core.Base import Script
__RCSID__ = '$Id$'
cliParams = None
switchDict = None
class Params:
'''
Class holding the parameters raw and pingsToDo, and callbacks for their
respective switches.
'''
def __init__( self ):
self.raw = False
self.pingsToDo = 1
def setRawResult( self, value ):
self.raw = True
return S_OK()
def setNumOfPingsToDo( self, value ):
try:
self.pingsToDo = max( 1, int( value ) )
except ValueError:
return S_ERROR( "Number of pings to do has to be a number" )
return S_OK()
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
#Some of the switches have associated a callback, defined on Params class.
cliParams = Params()
switches = [
( '', 'text=', 'Text to be printed' ),
( 'u', 'upper', 'Print text on upper case' ),
( 'r', 'showRaw', 'Show raw result from the query', cliParams.setRawResult ),
( 'p:', 'numPings=', 'Number of pings to do (by default 1)', cliParams.setNumOfPingsToDo )
]
# Register switches
for switch in switches:
Script.registerSwitch( *switch )
#Define a help message
Script.setUsageMessage( __doc__ )
def parseSwitches():
'''
Parse switches and positional arguments given to the script
'''
#Parse the command line and initialize DIRAC
Script.parseCommandLine( ignoreErrors = False )
#Get the list of services
servicesList = Script.getPositionalArgs()
gLogger.info( 'This is the servicesList %s:' % servicesList )
# Gets the rest of the
switches = dict( Script.getUnprocessedSwitches() )
gLogger.debug( "The switches used are:" )
map( gLogger.debug, switches.iteritems() )
switches[ 'servicesList' ] = servicesList
return switches
def main():
'''
This is the script main method, which will hold all the logic.
'''
# let's do something
if not len( switchDict[ 'servicesList' ] ):
gLogger.error( 'No services defined' )
DIRACExit( 1 )
gLogger.notice( 'We are done' )
if __name__ == "__main__":
# Script initialization
registerSwitches()
switchDict = parseSwitches()
#Import the required DIRAC modules
from DIRAC.Interfaces.API.Dirac import Dirac
# Run the script
main()
# Bye
DIRACExit( 0 )
|
DIRACGrid/DIRACDocs
|
source/DeveloperGuide/AddingNewComponents/DevelopingCommands/dirac-my-great-script.py
|
Python
|
gpl-3.0
| 2,711
|
""" generate_uniform_precip.py
This component generates rainfall
events based on statistical distributions.
No particular units must be used, but it was
written with the storm units in hours (hr)
and depth units in millimeters (mm)
Written by Jordan Adams, 2013.
"""
import os
import numpy as np
import random
from landlab import Component,ModelParameterDictionary
from landlab.core.model_parameter_dictionary import MissingKeyError
_DEFAULT_INPUT_FILE = os.path.join(os.path.dirname(__file__),
'preciptest.in')
class PrecipitationDistribution(Component):
"""Landlab component that generates precipitation events
using the rectangular Poisson pulse model described in
Eagleson (1978).
This component can generate a random storm duration, interstorm
duration, precipitation intensity or storm depth from a Poisson
distribution when given a mean value.
Default input file is named 'preciptest.in' and can be found in
the landlab.components.uniform_precip folder.
Inputs
------
input_file : Contains necessary inputs. If not given, default input file is used.
- MEAN_STORM: (type : float) the mean storm duration if not provided in initialization
- MEAN_DEPTH: (type : float) the mean storm depth if not provided in initizalization
- MEAN_INTERSTORM: (type : float) the mean interstorm duration if not provided in initialization
- RUN_TIME: (type : float) total model run time if not provided in initialization
- DELTA_T: (type : int) external time step increment if not provided in initialization
(it is not obligtory to provide a DELTA_T)
So, without an input file (selecting the default), we can call this component like...
>>> from landlab.components.uniform_precip.generate_uniform_precip import PrecipitationDistribution
>>> precip = PrecipitationDistribution()
To use hard-coded values for mean storm, mean interstorm, mean depth, model run time and delta t...
Say we use 1.5 for mean storm, 15 for mean interstorm, 0.5 for mean depth, 100 for model run time and 1 for delta t...
>>> precip = PrecipitationDistribution(input_file=None,
... mean_storm=1.5, mean_interstorm=15.0, mean_storm_depth=0.5,
... total_t=100.0, delta_t=1)
"""
def __init__(self, input_file=None, mean_storm=None, mean_interstorm=None, mean_storm_depth=None, total_t=None, delta_t=None):
""" This reads in information from the input_file (either default or user
assigned, and creates an instantaneous storm event drawn from the Poisson distribution
"""
# First we create an instance of the Model Parameter Dictionary
MPD = ModelParameterDictionary()
# If no input_file is given,the default file is used
if input_file is None:
input_file = _DEFAULT_INPUT_FILE
# This reads in the file information
MPD.read_from_file(input_file)
# And now we set our different parameters
# using the model parameter dictionary...
if mean_storm == None:
self.mean_storm = MPD.read_float( 'MEAN_STORM')
else:
self.mean_storm = mean_storm
if mean_interstorm == None:
self.mean_interstorm = MPD.read_float( 'MEAN_INTERSTORM')
else:
self.mean_interstorm =mean_interstorm
if mean_storm_depth== None:
self.mean_storm_depth = MPD.read_float( 'MEAN_DEPTH')
else:
self.mean_storm_depth =mean_storm_depth
if total_t== None:
self.run_time = MPD.read_float( 'RUN_TIME')
else:
self.run_time =total_t
if delta_t== None:
if input_file != _DEFAULT_INPUT_FILE:
try:
self.delta_t = MPD.read_float( 'DELTA_T')
except MissingKeyError:
self.delta_t = None
else:
self.delta_t = None
else:
self.delta_t =delta_t
# Mean_intensity is not set by the MPD, but can be drawn from
# the mean storm depth and mean storm duration.
self.mean_intensity = self.mean_storm_depth / self.mean_storm
# If a time series is created later, this blank list will be used.
self.storm_time_series =[]
# Given the mean values assigned above using either the model
# parameter dictionary or the init function, we can call the
# different methods to assign values from the Poisson distribution.
self.storm_duration = self.get_precipitation_event_duration()
self.interstorm_duration = self.get_interstorm_event_duration()
self.storm_depth = self.get_storm_depth()
self.intensity = self.get_storm_intensity()
self._elapsed_time = 0.
def update(self):
"""If new values for storm duration, interstorm duration, storm depth
and intensity are needed, this method can be used to update those values
one time.
>>> from landlab.components.uniform_precip.generate_uniform_precip import PrecipitationDistribution
>>> PD = PrecipitationDistribution()
>>> PD.update()
Additionally, if we wanted to update several times, a loop could be
utilized to accomplish this. Say we want 5 storm_durations; this
pseudo-code represents a way to accomplish this...
>>> PD = PrecipitationDistribution()
>>> storm_duration_list=[]
>>> i = 0
>>> while i < 4:
... storm_duration_list.append(PD.storm_duration)
... PD.update()
... i+=1
"""
self.storm_duration = self.get_precipitation_event_duration()
self.interstorm_duration = self.get_interstorm_event_duration()
self.storm_depth = self.get_storm_depth()
self.intensity = self.get_storm_intensity()
def get_precipitation_event_duration(self):
"""This method is the storm generator.
This method has one argument: the mean_storm parameter.
(In Eagleson (1978), this parameter was called Tr.)
It finds a random storm_duration value
based on the poisson distribution about the mean.
This is accomplished using the expovariate function
from the "random" standard library.
Additionally, it is rounded to contain 4 significant figures,
for neatness.
The if-else statement is very important here. Values of 0
can exist in the Poission distribution, but it does not make
sense to have 0 duration storms, so to avoid that,
we return a storm duration IF it is greater than 0,
otherwise, recursion is employed to re-call the storm
generator function and get a new value.
:returns: storm_duration as a float
"""
storm = round(random.expovariate(1/self.mean_storm),2)
while storm == 0:
storm = round(random.expovariate(1/self.mean_storm),2)
self.storm_duration = storm
return self.storm_duration
def get_interstorm_event_duration(self):
""" This method is the interstorm duration generator
This method takes one argument, the mean_interstorm parameter.
(In Eagleson (1978), this parameter was called Tb.)
This method is modeled identically to get_precipitation_event_duration()
This method finds a random value for interstorm_duration
based on the poisson distribution about the mean.
This is accomplished using the expovariate function
from the "random" standard library.
Additionally, it is rounded to contain 4 significant figures, for neatness.
The if-else statement is very important here. Values of 0
can exist in the Poission distribution, but it does not make
sense to have 0 hour interstorm durations.
To avoid 0 hour interstorm durations, we return a
interstorm duration IF it is greater than 0,
otherwise, recursion is employed to re-call the interstorm
duration generator function and get a new value.
:returns: interstorm_duration as a float"""
interstorm = round(random.expovariate(1/self.mean_interstorm),2)
while interstorm == 0:
interstorm = round(random.expovariate(1/self.mean_interstorm),2)
self.interstorm_duration = interstorm
return self.interstorm_duration
def get_storm_depth(self):
""" This method is the storm depth generator.
Storm depth is used to generate a realistic
intensity for different storm events.
(In Eagleson (1978) this parameter was called "h")
This method requires storm_duration, mean_storm duration
and the mean_storm_depth. Storm_duration is generated through
the initialize() or update() method. mean_storm and mean_storm_depth
are read in using the ModelParameterDictionary.
Numpy has a random number generator to get values
from a given Gamma distribution. It takes two arguments,
alpha (or the shape parameter), which is the generated over the mean event
and beta (or the scale parameter), which is the mean value
These are all arguments in the function, which returns storm depth.
:returns: storm_depth as a float
"""
shape_parameter = (self.storm_duration/self.mean_storm)
scale_parameter = (self.mean_storm_depth)
self.storm_depth = np.random.gamma(shape_parameter, scale_parameter)
return self.storm_depth
def get_storm_intensity(self):
""" This method draws storm intensity out of the storm depth
generated by get_storm_depth.
This method requires the storm_depth and storm_duration
and is the same as the parameter ("i") in Eagleson (1978), but instead of
being drawn from Poission, this is drawn from the Gamma distribution
of ("h"), as h = i*Tr.
:returns: storm_intensity as a float
"""
self.intensity = self.storm_depth / self.storm_duration
return self.intensity
def get_storm_time_series(self):
"""
This method creates a time series of storms based on storm_duration, and
interstorm_duration. From these values it will calculate a complete
time series.
The storm_time_series returned by this method is made up of sublists, each comprising of three
sub-parts (e.g. [[x,y,z], [a,b,c]]) where x and a are the beginning times of a precipitation
event, y and b are the ending times of the precipitation event and z and c represent the
average intensity (mm/hr) of the storm lasting from x to y and a to be, respectively.
:returns: array containing several sub-arrays of events [start, finish, intensity]
There is a helper and an iterator. Open to suggestions on how to make this
sleeker. helper keeps track of storm events so that we can properly adjust the time for the
storm_time_series list.
Storm iterator makes sure that the run goes through to the specified time, either read in as "run_time" by
the ModelParameterDictionary or specified the fire time this method is called."""
storm = self.get_precipitation_event_duration()
self.get_storm_depth()
intensity = self.get_storm_intensity()
self.storm_time_series.append([0, storm, intensity])
storm_helper = storm
storm_iterator = storm
while storm_iterator <= self.run_time:
next_storm_start = storm_helper + round(self.get_interstorm_event_duration(),2)
next_storm_end = next_storm_start + round(self.get_precipitation_event_duration(),2)
intensity = round(self.get_storm_intensity(),2)
self.get_storm_depth()
self.storm_time_series.append([next_storm_start, next_storm_end, intensity])
storm_iterator = storm_helper
storm_helper = next_storm_end
storm_iterator = storm_helper
return self.storm_time_series
def yield_storm_interstorm_duration_intensity(self, subdivide_interstorms=False):
"""
This method is intended to be equivalent to get_storm_time_series,
but instead offers a generator functionality. This will be useful in
cases where the whole sequence of storms and interstorms doesn't need
to be stored, where we can save memory this way.
The method keeps track of the DELTA_T such that if a storm needs to be
generated longer than this supplied model timestep, the generator will
return the storm in "chunks", until there is no more storm duration.
e.g.,
storm of intensity 1. is 4.5 long, the DELTA_T is 2., the generator
yields (2.,1.) -> (2.,1.) -> (0.5,1.) -> ...
If DELTA_T is None or not supplied, no subdivision occurs.
Once a storm has been generated, this method will follow it with the
next interstorm, yielded as (interstorm_duration, 0.). Note that the
interstorm will NOT be subdivided according to DELTA_T unless you set
the flag *subdivide_interstorms* to True.
The method will keep yielding until it reaches the RUN_TIME, where it
will terminate.
YIELDS:
- a tuple, (interval_duration, rainfall_rate_in_interval)
One recommended procedure is to instantiate the generator, then call
instance.next() repeatedly to get the sequence.
Added DEJH, Dec 2014
"""
delta_t = self.delta_t
if delta_t == None:
assert subdivide_interstorms == False, 'You specified you wanted storm subdivision, but did not provide a DELTA_T to allow this!'
self._elapsed_time = 0.
while self._elapsed_time<self.run_time:
storm_duration = self.get_precipitation_event_duration()
step_time=0.
self.get_storm_depth()
intensity = self.get_storm_intensity() #this is a VELOCITY, i.e., a rainfall rate
if self._elapsed_time+storm_duration>self.run_time:
storm_duration = self.run_time-self._elapsed_time
while delta_t!=None and storm_duration-step_time>delta_t:
yield (delta_t, intensity)
step_time+=delta_t
yield (storm_duration-step_time, intensity)
self._elapsed_time += storm_duration
interstorm_duration = self.get_interstorm_event_duration()
if self._elapsed_time+interstorm_duration>self.run_time:
interstorm_duration = self.run_time-self._elapsed_time
if subdivide_interstorms:
step_time=0.
while interstorm_duration-step_time>delta_t:
yield (delta_t, 0.)
step_time += delta_t
yield (interstorm_duration-step_time, 0.)
else:
yield (interstorm_duration, 0.)
self._elapsed_time += interstorm_duration
@property
def elapsed_time(self):
"""
Return the elapsed time recorded by the module.
This will be particularly useful in the midst of a yield loop.
"""
return self._elapsed_time
|
decvalts/landlab
|
landlab/components/uniform_precip/generate_uniform_precip.py
|
Python
|
mit
| 16,070
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import os.path
import fixtures
from snapcraft.main import main
from snapcraft import tests
class PrimeCommandTestCase(tests.TestCase):
yaml_template = """name: prime-test
version: 1.0
summary: test prime
description: if the prime is succesful the state file will be updated
confinement: strict
parts:
{parts}"""
yaml_part = """ prime{:d}:
plugin: nil"""
def make_snapcraft_yaml(self, n=1):
parts = '\n'.join([self.yaml_part.format(i) for i in range(n)])
super().make_snapcraft_yaml(self.yaml_template.format(parts=parts))
parts = []
for i in range(n):
part_dir = os.path.join(self.parts_dir, 'prime{}'.format(i))
state_dir = os.path.join(part_dir, 'state')
parts.append({
'part_dir': part_dir,
'state_dir': state_dir,
})
return parts
def test_prime_invalid_part(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml()
with self.assertRaises(SystemExit) as raised:
main(['prime', 'no-prime', ])
self.assertEqual(1, raised.exception.code)
self.assertEqual(
fake_logger.output,
"The part named 'no-prime' is not defined in 'snapcraft.yaml'\n")
def test_prime_defaults(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml()
main(['prime'])
self.assertTrue(os.path.exists(self.snap_dir),
'Expected a prime directory')
self.assertTrue(
os.path.exists(
os.path.join(self.snap_dir, 'meta', 'snap.yaml')),
'Expected a snap.yaml')
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[0]['part_dir']),
'Expected a part directory for the build0 part')
self.verify_state('build0', parts[0]['state_dir'], 'prime')
def test_prime_one_part_only_from_3(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml(n=3)
main(['prime', 'prime1'])
self.assertFalse(
os.path.exists(
os.path.join(self.snap_dir, 'meta', 'snap.yaml')),
'There should not be a snap.yaml')
self.assertTrue(os.path.exists(self.snap_dir),
'Expected a prime directory')
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[1]['part_dir']),
'Expected a part directory for the prime1 part')
self.verify_state('prime1', parts[1]['state_dir'], 'prime')
for i in [0, 2]:
self.assertFalse(os.path.exists(parts[i]['part_dir']),
'Pulled wrong part')
self.assertFalse(os.path.exists(parts[i]['state_dir']),
'Expected for only to be a state file for build1')
def test_prime_ran_twice_is_a_noop(self):
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml()
main(['prime'])
self.assertEqual(
'Preparing to pull prime0 \n'
'Pulling prime0 \n'
'Preparing to build prime0 \n'
'Building prime0 \n'
'Staging prime0 \n'
'Priming prime0 \n',
fake_logger.output)
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[0]['part_dir']),
'Expected a part directory for the build0 part')
self.verify_state('build0', parts[0]['state_dir'], 'prime')
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
main(['prime'])
self.assertEqual(
'Skipping pull prime0 (already ran)\n'
'Skipping build prime0 (already ran)\n'
'Skipping stage prime0 (already ran)\n'
'Skipping prime prime0 (already ran)\n',
fake_logger.output)
|
jocave/snapcraft
|
snapcraft/tests/test_commands_prime.py
|
Python
|
gpl-3.0
| 5,460
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
import sys
from gpu_tests import common_browser_args as cba
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import pixel_test_pages
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
gpu_relative_path = "content/test/data/gpu/"
data_paths = [
os.path.join(path_util.GetChromiumSrcDir(), gpu_relative_path),
os.path.join(path_util.GetChromiumSrcDir(), 'media', 'test', 'data')
]
webgl_test_harness_script = r"""
var domAutomationController = {};
domAutomationController._finished = false;
domAutomationController.send = function(msg) {
// Issue a read pixel to synchronize the gpu process to ensure
// the asynchronous category enabling is finished.
var temp_canvas = document.createElement("canvas")
temp_canvas.width = 1;
temp_canvas.height = 1;
var temp_gl = temp_canvas.getContext("experimental-webgl") ||
temp_canvas.getContext("webgl");
if (temp_gl) {
temp_gl.clear(temp_gl.COLOR_BUFFER_BIT);
var id = new Uint8Array(4);
temp_gl.readPixels(0, 0, 1, 1, temp_gl.RGBA, temp_gl.UNSIGNED_BYTE, id);
} else {
console.log('Failed to get WebGL context.');
}
domAutomationController._finished = true;
}
window.domAutomationController = domAutomationController;
"""
basic_test_harness_script = r"""
var domAutomationController = {};
domAutomationController._proceed = false;
domAutomationController._readyForActions = false;
domAutomationController._succeeded = false;
domAutomationController._finished = false;
domAutomationController.send = function(msg) {
domAutomationController._proceed = true;
let lmsg = msg.toLowerCase();
if (lmsg == "ready") {
domAutomationController._readyForActions = true;
} else {
domAutomationController._finished = true;
if (lmsg == "success") {
domAutomationController._succeeded = true;
} else {
domAutomationController._succeeded = false;
}
}
}
window.domAutomationController = domAutomationController;
"""
# Presentation mode enums match DXGI_FRAME_PRESENTATION_MODE
_SWAP_CHAIN_PRESENTATION_MODE_COMPOSED = 0
_SWAP_CHAIN_PRESENTATION_MODE_OVERLAY = 1
_SWAP_CHAIN_PRESENTATION_MODE_NONE = 2
_SWAP_CHAIN_PRESENTATION_MODE_COMPOSITION_FAILURE = 3
# The following is defined for Chromium testing internal use.
_SWAP_CHAIN_GET_FRAME_STATISTICS_MEDIA_FAILED = -1
_GET_STATISTICS_EVENT_NAME = 'GetFrameStatisticsMedia'
_SWAP_CHAIN_PRESENT_EVENT_NAME = 'SwapChain::Present'
_PRESENT_TO_SWAP_CHAIN_EVENT_NAME = 'SwapChainPresenter::PresentToSwapChain'
_PRESENT_MAIN_SWAP_CHAIN_EVENT_NAME =\
'DirectCompositionChildSurfaceWin::PresentSwapChain'
_SUPPORTED_WIN_AMD_GPUS_WITH_NV12_ROTATED_OVERLAYS = [0x7340]
class _TraceTestArguments(object):
"""Struct-like object for passing trace test arguments instead of dicts."""
def __init__( # pylint: disable=too-many-arguments
self, browser_args, category, test_harness_script, finish_js_condition,
success_eval_func, other_args):
self.browser_args = browser_args
self.category = category
self.test_harness_script = test_harness_script
self.finish_js_condition = finish_js_condition
self.success_eval_func = success_eval_func
self.other_args = other_args
class TraceIntegrationTest(gpu_integration_test.GpuIntegrationTest):
"""Tests GPU traces are plumbed through properly.
Also tests that GPU Device traces show up on devices that support them."""
@classmethod
def Name(cls):
return 'trace_test'
@classmethod
def GenerateGpuTests(cls, options):
# Include the device level trace tests, even though they're
# currently skipped on all platforms, to give a hint that they
# should perhaps be enabled in the future.
namespace = pixel_test_pages.PixelTestPages
for p in namespace.DefaultPages('TraceTest'):
yield (p.name, gpu_relative_path + p.url,
_TraceTestArguments(
browser_args=p.browser_args,
category=cls._DisabledByDefaultTraceCategory('gpu.service'),
test_harness_script=webgl_test_harness_script,
finish_js_condition='domAutomationController._finished',
success_eval_func='CheckGLCategory',
other_args=p.other_args))
for p in namespace.DirectCompositionPages('VideoPathTraceTest'):
yield (p.name, gpu_relative_path + p.url,
_TraceTestArguments(
browser_args=p.browser_args,
category=cls._DisabledByDefaultTraceCategory('gpu.service'),
test_harness_script=basic_test_harness_script,
finish_js_condition='domAutomationController._finished',
success_eval_func='CheckVideoPath',
other_args=p.other_args))
for p in namespace.LowLatencyPages('SwapChainTraceTest'):
yield (p.name, gpu_relative_path + p.url,
_TraceTestArguments(
browser_args=p.browser_args,
category='gpu',
test_harness_script=basic_test_harness_script,
finish_js_condition='domAutomationController._finished',
success_eval_func='CheckSwapChainPath',
other_args=p.other_args))
for p in namespace.DirectCompositionPages('OverlayModeTraceTest'):
yield (p.name, gpu_relative_path + p.url,
_TraceTestArguments(
browser_args=p.browser_args,
category=cls._DisabledByDefaultTraceCategory('gpu.service'),
test_harness_script=basic_test_harness_script,
finish_js_condition='domAutomationController._finished',
success_eval_func='CheckOverlayMode',
other_args=p.other_args))
for p in namespace.ForceFullDamagePages('SwapChainTraceTest'):
yield (p.name, gpu_relative_path + p.url,
_TraceTestArguments(
browser_args=p.browser_args,
category='gpu',
test_harness_script=basic_test_harness_script,
finish_js_condition='domAutomationController._finished',
success_eval_func='CheckMainSwapChainPath',
other_args=p.other_args))
def RunActualGpuTest(self, test_path, *args):
test_params = args[0]
# The version of this test in the old GPU test harness restarted
# the browser after each test, so continue to do that to match its
# behavior.
self.RestartBrowserWithArgs(test_params.browser_args)
# Set up tracing.
config = tracing_config.TracingConfig()
config.chrome_trace_config.category_filter.AddExcludedCategory('*')
config.chrome_trace_config.category_filter.AddFilter(test_params.category)
config.enable_chrome_trace = True
tab = self.tab
tab.browser.platform.tracing_controller.StartTracing(config, 60)
# Perform page navigation.
url = self.UrlOfStaticFilePath(test_path)
tab.Navigate(url,
script_to_evaluate_on_commit=test_params.test_harness_script)
tab.action_runner.WaitForJavaScriptCondition(
test_params.finish_js_condition, timeout=30)
# Stop tracing.
timeline_data = tab.browser.platform.tracing_controller.StopTracing()
# Evaluate success.
if test_params.success_eval_func:
timeline_model = model_module.TimelineModel(timeline_data)
event_iter = timeline_model.IterAllEvents(
event_type_predicate=timeline_model.IsSliceOrAsyncSlice)
prefixed_func_name = '_EvaluateSuccess_' + test_params.success_eval_func
getattr(self, prefixed_func_name)(test_params.category, event_iter,
test_params.other_args)
@classmethod
def SetUpProcess(cls):
super(TraceIntegrationTest, cls).SetUpProcess()
path_util.SetupTelemetryPaths()
cls.CustomizeBrowserArgs([])
cls.StartBrowser()
cls.SetStaticServerDirs(data_paths)
@classmethod
def GenerateBrowserArgs(cls, additional_args):
"""Adds default arguments to |additional_args|.
See the parent class' method documentation for additional information.
"""
default_args = super(TraceIntegrationTest,
cls).GenerateBrowserArgs(additional_args)
default_args.extend([
cba.ENABLE_LOGGING,
cba.ENABLE_EXPERIMENTAL_WEB_PLATFORM_FEATURES,
])
return default_args
def _GetAndAssertOverlayBotConfig(self):
overlay_bot_config = self.GetOverlayBotConfig()
if overlay_bot_config is None:
self.fail('Overlay bot config can not be determined')
assert overlay_bot_config.get('direct_composition', False)
return overlay_bot_config
@staticmethod
def _SwapChainPresentationModeToStr(presentation_mode):
if presentation_mode == _SWAP_CHAIN_PRESENTATION_MODE_COMPOSED:
return 'COMPOSED'
if presentation_mode == _SWAP_CHAIN_PRESENTATION_MODE_OVERLAY:
return 'OVERLAY'
if presentation_mode == _SWAP_CHAIN_PRESENTATION_MODE_NONE:
return 'NONE'
if presentation_mode == _SWAP_CHAIN_PRESENTATION_MODE_COMPOSITION_FAILURE:
return 'COMPOSITION_FAILURE'
if presentation_mode == _SWAP_CHAIN_GET_FRAME_STATISTICS_MEDIA_FAILED:
return 'GET_STATISTICS_FAILED'
return str(presentation_mode)
@staticmethod
def _SwapChainPresentationModeListToStr(presentation_mode_list):
list_str = None
for mode in presentation_mode_list:
mode_str = TraceIntegrationTest._SwapChainPresentationModeToStr(mode)
if list_str is None:
list_str = mode_str
else:
list_str = '%s,%s' % (list_str, mode_str)
return '[%s]' % list_str
@staticmethod
def _DisabledByDefaultTraceCategory(category):
return 'disabled-by-default-%s' % category
#########################################
# The test success evaluation functions
def _EvaluateSuccess_CheckGLCategory(self, category, event_iterator,
other_args):
del other_args # Unused in this particular success evaluation.
for event in event_iterator:
if (event.category == category
and event.args.get('gl_category', None) == 'gpu_toplevel'):
break
else:
self.fail('Trace markers for GPU category %s were not found' % category)
def _GetVideoExpectations(self, other_args):
"""Helper for creating expectations for CheckVideoPath and CheckOverlayMode.
Args:
other_args: The |other_args| arg passed into the test.
Returns:
A _VideoExpectations instance with zero_copy, pixel_format, no_overlay,
and presentation_mode filled in.
"""
overlay_bot_config = self._GetAndAssertOverlayBotConfig()
expected = _VideoExpectations()
expected.zero_copy = other_args.get('zero_copy', None)
expected.pixel_format = other_args.get('pixel_format', None)
expected.no_overlay = other_args.get('no_overlay', False)
video_is_rotated = other_args.get('video_is_rotated', False)
video_is_not_scaled = other_args.get('full_size', False)
if overlay_bot_config.get('supports_overlays', False):
supports_hw_nv12_overlays = overlay_bot_config[
'nv12_overlay_support'] in ['DIRECT', 'SCALING']
supports_hw_yuy2_overlays = overlay_bot_config[
'yuy2_overlay_support'] in ['DIRECT', 'SCALING']
supports_sw_nv12_overlays = overlay_bot_config[
'nv12_overlay_support'] == 'SOFTWARE'
if expected.pixel_format is None:
if supports_hw_nv12_overlays:
expected.pixel_format = 'NV12'
elif supports_hw_yuy2_overlays:
expected.pixel_format = 'YUY2'
else:
assert supports_sw_nv12_overlays
expected.pixel_format = 'BGRA'
else:
if (not supports_hw_nv12_overlays and not supports_hw_yuy2_overlays):
expected.pixel_format = 'BGRA'
gpu = self.browser.GetSystemInfo().gpu.devices[0]
supports_rotated_video_overlays = (
gpu.vendor_id == 0x1002 and
gpu.device_id in _SUPPORTED_WIN_AMD_GPUS_WITH_NV12_ROTATED_OVERLAYS)
supports_downscaled_overlay_promotion = gpu.vendor_id != 0x8086
no_issue_with_downscaled_overlay_promotion = (
video_is_not_scaled or supports_downscaled_overlay_promotion)
if (((supports_hw_nv12_overlays and expected.pixel_format == 'NV12')
or supports_hw_yuy2_overlays)
and (not video_is_rotated or supports_rotated_video_overlays)):
expected.presentation_mode = 'OVERLAY'
else:
expected.presentation_mode = 'COMPOSED'
if expected.zero_copy is None:
# TODO(sunnyps): Check for overlay scaling support after making the same
# change in SwapChainPresenter.
expected.zero_copy = (expected.presentation_mode == 'OVERLAY'
and expected.pixel_format == 'NV12'
and supports_hw_nv12_overlays
and no_issue_with_downscaled_overlay_promotion
and not video_is_rotated)
return expected
def _EvaluateSuccess_CheckVideoPath(self, category, event_iterator,
other_args):
"""Verifies Chrome goes down the code path as expected.
Depending on whether hardware overlays are supported or not, which formats
are supported in overlays, whether video is downscaled or not, whether
video is rotated or not, Chrome's video presentation code path can be
different.
"""
os_name = self.browser.platform.GetOSName()
assert os_name and os_name.lower() == 'win'
other_args = other_args or {}
expected = self._GetVideoExpectations(other_args)
# Verify expectations through captured trace events.
for event in event_iterator:
if event.category != category:
continue
if event.name != _SWAP_CHAIN_PRESENT_EVENT_NAME:
continue
if expected.no_overlay:
self.fail('Expected no overlay got %s' % _SWAP_CHAIN_PRESENT_EVENT_NAME)
detected_pixel_format = event.args.get('PixelFormat', None)
if detected_pixel_format is None:
self.fail('PixelFormat is missing from event %s' %
_SWAP_CHAIN_PRESENT_EVENT_NAME)
if expected.pixel_format != detected_pixel_format:
self.fail('SwapChain pixel format mismatch, expected %s got %s' %
(expected.pixel_format, detected_pixel_format))
detected_zero_copy = event.args.get('ZeroCopy', None)
if detected_zero_copy is None:
self.fail('ZeroCopy is missing from event %s' %
_SWAP_CHAIN_PRESENT_EVENT_NAME)
if expected.zero_copy != detected_zero_copy:
self.fail('ZeroCopy mismatch, expected %s got %s' %
(expected.zero_copy, detected_zero_copy))
break
else:
if expected.no_overlay:
return
self.fail(
'Events with name %s were not found' % _SWAP_CHAIN_PRESENT_EVENT_NAME)
def _EvaluateSuccess_CheckOverlayMode(self, category, event_iterator,
other_args):
"""Verifies video frames are promoted to overlays when supported."""
os_name = self.browser.platform.GetOSName()
assert os_name and os_name.lower() == 'win'
other_args = other_args or {}
expected = self._GetVideoExpectations(other_args)
presentation_mode_history = []
for event in event_iterator:
if event.category != category:
continue
if event.name != _GET_STATISTICS_EVENT_NAME:
continue
if expected.no_overlay:
self.fail('Expected no overlay got %s' % _GET_STATISTICS_EVENT_NAME)
detected_presentation_mode = event.args.get('CompositionMode', None)
if detected_presentation_mode is None:
self.fail('PresentationMode is missing from event %s' %
_GET_STATISTICS_EVENT_NAME)
presentation_mode_history.append(detected_presentation_mode)
if expected.no_overlay:
return
valid_entry_found = False
for index in range(len(presentation_mode_history)):
mode = presentation_mode_history[index]
if (mode == _SWAP_CHAIN_PRESENTATION_MODE_NONE
or mode == _SWAP_CHAIN_GET_FRAME_STATISTICS_MEDIA_FAILED):
# Be more tolerant to avoid test flakiness
continue
if (TraceIntegrationTest._SwapChainPresentationModeToStr(mode) !=
expected.presentation_mode):
if index >= len(presentation_mode_history) // 2:
# Be more tolerant for the first half frames in non-overlay mode.
self.fail('SwapChain presentation mode mismatch, expected %s got %s' %
(expected.presentation_mode,
TraceIntegrationTest._SwapChainPresentationModeListToStr(
presentation_mode_history)))
valid_entry_found = True
if not valid_entry_found:
self.fail(
'No valid frame statistics being collected: %s' % TraceIntegrationTest
._SwapChainPresentationModeListToStr(presentation_mode_history))
def _EvaluateSuccess_CheckSwapChainPath(self, category, event_iterator,
other_args):
"""Verifies that swap chains are used as expected for low latency canvas."""
os_name = self.browser.platform.GetOSName()
assert os_name and os_name.lower() == 'win'
overlay_bot_config = self.GetOverlayBotConfig()
if overlay_bot_config is None:
self.fail('Overlay bot config can not be determined')
assert overlay_bot_config.get('direct_composition', False)
expect_no_overlay = other_args and other_args.get('no_overlay', False)
expect_overlay = not expect_no_overlay
found_overlay = False
# Verify expectations through captured trace events.
for event in event_iterator:
if event.category != category:
continue
if event.name != _PRESENT_TO_SWAP_CHAIN_EVENT_NAME:
continue
image_type = event.args.get('image_type', None)
if image_type == 'swap chain':
found_overlay = True
break
if expect_overlay and not found_overlay:
self.fail(
'Overlay expected but not found: matching %s events were not found' %
_PRESENT_TO_SWAP_CHAIN_EVENT_NAME)
elif expect_no_overlay and found_overlay:
self.fail(
'Overlay not expected but found: matching %s events were found' %
_PRESENT_TO_SWAP_CHAIN_EVENT_NAME)
def _EvaluateSuccess_CheckMainSwapChainPath(self, category, event_iterator,
other_args):
"""Verified that Chrome's main swap chain is presented with full damage."""
os_name = self.browser.platform.GetOSName()
assert os_name and os_name.lower() == 'win'
overlay_bot_config = self.GetOverlayBotConfig()
if overlay_bot_config is None:
self.fail('Overlay bot config can not be determined')
assert overlay_bot_config.get('direct_composition', False)
expect_full_damage = other_args and other_args.get('full_damage', False)
partial_damage_encountered = False
full_damage_encountered = False
# Verify expectations through captured trace events.
for event in event_iterator:
if event.category != category:
continue
if event.name != _PRESENT_MAIN_SWAP_CHAIN_EVENT_NAME:
continue
dirty_rect = event.args.get('dirty_rect', None)
if dirty_rect is None:
continue
if dirty_rect == 'full_damage':
full_damage_encountered = True
else:
partial_damage_encountered = True
# Today Chrome either run with full damage or partial damage, but not both.
# This may change in the future.
if (expect_full_damage != full_damage_encountered
or expect_full_damage == partial_damage_encountered):
self.fail('Expected events with name %s of %s, got others' %
(_PRESENT_MAIN_SWAP_CHAIN_EVENT_NAME,
'full damage' if expect_full_damage else 'partial damage'))
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
'trace_test_expectations.txt')
]
class _VideoExpectations(object):
"""Struct-like object for passing around video test expectations."""
def __init__(self):
self.pixel_format = None # str
self.zero_copy = None # bool
self.no_overlay = None # bool
self.presentation_mode = None # str
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
scheib/chromium
|
content/test/gpu/gpu_tests/trace_integration_test.py
|
Python
|
bsd-3-clause
| 20,948
|
"""The Unix filesystem
https://en.wikipedia.org/wiki/Unix_filesystem#Conventional_directory_layout
The filesystem appears as one rooted tree of directories
Instead of addressing separate volumes such as disk partitions,
removable media,
and network shares as separate trees (
as done in DOS and Windows:
each drive has a LETTER naming the root of its file system tree),
such volumes can be mounted on a directory,
causing the volume's tree to appear as that directory in the larger tree
The root of the entire tree is denoted.
"""
from pysyte.config.types import ModuleConfiguration
from pysyte.types.paths import DirectPath
class UnixDirectory(DirectPath):
def __init__(self, path_):
super().__init__(path_)
def upath(string):
"""For consistency with path()"""
return UnixDirectory(string)
root = upath("/")
config = ModuleConfiguration(__file__)
|
jalanb/dotsite
|
pysyte/unix/root.py
|
Python
|
mit
| 918
|
import time
import pyven.constants
from pyven.steps.step import Step
from pyven.checkers.checker import Checker
from pyven.logging.logger import Logger
from pyven.reporting.content.step import StepListing
class Postprocess(Step):
def __init__(self, verbose, nb_threads=1):
super(Postprocess, self).__init__(verbose)
self.name = 'postprocess'
self.checker = Checker('Postprocessing')
self.nb_threads = nb_threads
def process(self):
return self._process_parallel()
@Step.error_checks
def _process(self, project):
Logger.get().info('Starting ' + self.name)
ok = True
for tool in project.postprocessors:
tic = time.time()
if not tool.process(self.verbose):
ok = False
else:
toc = time.time()
Logger.get().info('Time for ' + tool.type + ':' + tool.name + ' : ' + str(round(toc - tic, 3)) + ' seconds')
if not ok:
project.status = pyven.constants.STATUS[1]
Logger.get().error(self.name + ' errors found')
else:
project.status = pyven.constants.STATUS[0]
Logger.get().info(self.name + ' completed')
return ok
def report_content(self):
listings = []
if self.status == pyven.constants.STATUS[1]:
for project in Step.PROJECTS:
for postprocessor in project.postprocessors:
listings.append(postprocessor.report_content())
if self.checker.enabled():
listings.append(self.checker.report_content())
return StepListing(title=self.title(), status=self.report_status(), listings=listings, enable_summary=True)
def report(self):
return self.status == pyven.constants.STATUS[1]
|
mgaborit/pyven
|
source/pyven/steps/postprocess.py
|
Python
|
mit
| 1,841
|
from exchanges import helpers
from exchanges import hitbtc
from exchanges import kraken
from exchanges import opportunity_kraken
import opportunity
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV and to name it
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime','hitbtc_BCNBTC_bid','hitbtc_BCNBTC_ask',
'hitbtc_BTCEUR_bid','hitbtc_BTCEUR_ask',
'hitbtc_BTCUSD_bid','hitbtc_BTCUSD_ask',
'hitbtc_DOGEBTC_bid','hitbtc_DOGEBTC_ask',
'hitbtc_EURGBP_bid','hitbtc_EURGBP_ask',
'hitbtc_EURUSD_bid','hitbtc_EURUSD_ask',
'hitbtc_FCNBTC_bid','hitbtc_FCNBTC_ask',
'hitbtc_GBPUSD_bid','hitbtc_GBPUSD_ask',
'hitbtc_LTCBTC_bid','hitbtc_LTCBTC_ask',
'hitbtc_LTCEUR_bid','hitbtc_LTCEUR_ask',
'hitbtc_LTCUSD_bid','hitbtc_LTCUSD_ask',
'hitbtc_NXTBTC_bid','hitbtc_NXTBTC_ask',
'hitbtc_QCNBTC_bid','hitbtc_QCNBTC_ask',
'hitbtc_XDNBTC_bid','hitbtc_XDNBTC_ask',
'hitbtc_XMRBTC_bid','hitbtc_XMRBTC_ask',
'opportunity_1',
'opportunity_2',
'kraken_LTCEUR_bid','kraken_LTCEUR_ask',
'kraken_LTCUSD_bid','kraken_LTCUSD_ask',
'kraken_XBTEUR_bid','kraken_XBTEUR_ask',
'kraken_XBTLTC_bid','kraken_XBTLTC_ask',
'kraken_XBTUSD_bid','kraken_XBTUSD_ask',
'kraken_opportunity_1'
]
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
# HITBTC
row.append(hitbtc.get_current_bid_BCNBTC())
row.append(hitbtc.get_current_ask_BCNBTC())
row.append(hitbtc.get_current_bid_BTCEUR())
row.append(hitbtc.get_current_ask_BTCEUR())
row.append(hitbtc.get_current_bid_BTCUSD())
row.append(hitbtc.get_current_ask_BTCUSD())
row.append(hitbtc.get_current_bid_DOGEBTC())
row.append(hitbtc.get_current_ask_DOGEBTC())
row.append(hitbtc.get_current_bid_EURGBP())
row.append(hitbtc.get_current_ask_EURGBP())
row.append(hitbtc.get_current_bid_EURUSD())
row.append(hitbtc.get_current_ask_EURUSD())
row.append(hitbtc.get_current_bid_FCNBTC())
row.append(hitbtc.get_current_ask_FCNBTC())
row.append(hitbtc.get_current_bid_GBPUSD())
row.append(hitbtc.get_current_ask_GBPUSD())
row.append(hitbtc.get_current_bid_LTCBTC())
row.append(hitbtc.get_current_ask_LTCBTC())
row.append(hitbtc.get_current_bid_LTCEUR())
row.append(hitbtc.get_current_ask_LTCEUR())
row.append(hitbtc.get_current_bid_LTCUSD())
row.append(hitbtc.get_current_ask_LTCUSD())
row.append(hitbtc.get_current_bid_NXTBTC())
row.append(hitbtc.get_current_ask_NXTBTC())
row.append(hitbtc.get_current_bid_QCNBTC())
row.append(hitbtc.get_current_ask_QCNBTC())
row.append(hitbtc.get_current_bid_XDNBTC())
row.append(hitbtc.get_current_ask_XDNBTC())
row.append(hitbtc.get_current_bid_XMRBTC())
row.append(hitbtc.get_current_ask_XMRBTC())
#check whether opportunity exists
row.append(opportunity.opportunity_1())
row.append(opportunity.opportunity_2())
#kraken watch
row.append(kraken.get_current_bid_LTCEUR())
row.append(kraken.get_current_ask_LTCEUR())
row.append(kraken.get_current_bid_LTCUSD())
row.append(kraken.get_current_ask_LTCUSD())
row.append(kraken.get_current_bid_XBTEUR())
row.append(kraken.get_current_ask_XBTEUR())
row.append(kraken.get_current_bid_XBTLTC())
row.append(kraken.get_current_ask_XBTLTC())
row.append(kraken.get_current_bid_XBTUSD())
row.append(kraken.get_current_ask_XBTUSD())
#kraken opportunity
row.append(opportunity_kraken.opportunity_1())
row.append(opportunity_kraken.opportunity_2())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
Humantrashcan/prices
|
request.py
|
Python
|
mit
| 4,411
|
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Harry Marr http://hmarr.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import urllib
import base64
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
import boto
import boto.jsonresponse
from boto.ses import exceptions as ses_exceptions
class SESConnection(AWSAuthConnection):
ResponseError = BotoServerError
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com'
APIVersion = '2010-12-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(SESConnection, self).__init__(self.region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ses']
def _build_list_params(self, params, items, label):
"""Add an AWS API-compatible parameter list to a dictionary.
:type params: dict
:param params: The parameter dictionary
:type items: list
:param items: Items to be included in the list
:type label: string
:param label: The parameter list's name
"""
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def _make_request(self, action, params=None):
"""Make a call to the SES API.
:type action: string
:param action: The API method to use (e.g. SendRawEmail)
:type params: dict
:param params: Parameters that will be sent as POST data with the API
call.
"""
ct = 'application/x-www-form-urlencoded; charset=UTF-8'
headers = {'Content-Type': ct}
params = params or {}
params['Action'] = action
for k, v in params.items():
if isinstance(v, unicode): # UTF-8 encode only if it's Unicode
params[k] = v.encode('utf-8')
response = super(SESConnection, self).make_request(
'POST',
'/',
headers=headers,
data=urllib.urlencode(params)
)
body = response.read()
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'Identities',
'DkimTokens', 'VerificationAttributes',
'SendDataPoints')
item_markers = ('member', 'item', 'entry')
e = boto.jsonresponse.Element(list_marker=list_markers,
item_marker=item_markers)
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
# HTTP codes other than 200 are considered errors. Go through
# some error handling to determine which exception gets raised,
self._handle_error(response, body)
def _handle_error(self, response, body):
"""
Handle raising the correct exception, depending on the error. Many
errors share the same HTTP response code, meaning we have to get really
kludgey and do string searches to figure out what went wrong.
"""
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
if "Address blacklisted." in body:
# Delivery failures happened frequently enough with the recipient's
# email address for Amazon to blacklist it. After a day or three,
# they'll be automatically removed, and delivery can be attempted
# again (if you write the code to do so in your application).
ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError
exc_reason = "Address blacklisted."
elif "Email address is not verified." in body:
# This error happens when the "Reply-To" value passed to
# send_email() hasn't been verified yet.
ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError
exc_reason = "Email address is not verified."
elif "Daily message quota exceeded." in body:
# Encountered when your account exceeds the maximum total number
# of emails per 24 hours.
ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError
exc_reason = "Daily message quota exceeded."
elif "Maximum sending rate exceeded." in body:
# Your account has sent above its allowed requests a second rate.
ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError
exc_reason = "Maximum sending rate exceeded."
elif "Domain ends with dot." in body:
# Recipient address ends with a dot/period. This is invalid.
ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError
exc_reason = "Domain ends with dot."
elif "Local address contains control or whitespace" in body:
# I think this pertains to the recipient address.
ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError
exc_reason = "Local address contains control or whitespace."
elif "Illegal address" in body:
# A clearly mal-formed address.
ExceptionToRaise = ses_exceptions.SESIllegalAddressError
exc_reason = "Illegal address"
# The re.search is to distinguish from the
# SESAddressNotVerifiedError error above.
elif re.search('Identity.*is not verified', body):
ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError
exc_reason = "Identity is not verified."
elif "ownership not confirmed" in body:
ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError
exc_reason = "Domain ownership is not confirmed."
else:
# This is either a common AWS error, or one that we don't devote
# its own exception to.
ExceptionToRaise = self.ResponseError
exc_reason = response.reason
raise ExceptionToRaise(response.status, exc_reason, body)
def send_email(self, source, subject, body, to_addresses,
cc_addresses=None, bcc_addresses=None,
format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message
will then be forwarded to the email address
specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a "
"text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an "
"html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text", "html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
def send_raw_email(self, raw_message, source=None, destinations=None):
"""Sends an email message, with header and content specified by the
client. The SendRawEmail action is useful for sending multipart MIME
emails, with attachments or inline content. The raw text of the message
must comply with Internet email standards; otherwise, the message
cannot be sent.
:type source: string
:param source: The sender's email address. Amazon's docs say:
If you specify the Source parameter, then bounce notifications and
complaints will be sent to this email address. This takes precedence
over any Return-Path header that you might include in the raw text of
the message.
:type raw_message: string
:param raw_message: The raw text of the message. The client is
responsible for ensuring the following:
- Message must contain a header and a body, separated by a blank line.
- All required header fields must be present.
- Each part of a multipart MIME message must be formatted properly.
- MIME content types must be among those supported by Amazon SES.
Refer to the Amazon SES Developer Guide for more details.
- Content must be base64-encoded, if MIME requires it.
:type destinations: list of strings or string
:param destinations: A list of destinations for the message.
"""
if isinstance(raw_message, unicode):
raw_message = raw_message.encode('utf-8')
params = {
'RawMessage.Data': base64.b64encode(raw_message),
}
if source:
params['Source'] = source
if destinations:
self._build_list_params(params, destinations,
'Destinations.member')
return self._make_request('SendRawEmail', params)
def list_verified_email_addresses(self):
"""Fetch a list of the email addresses that have been verified.
:rtype: dict
:returns: A ListVerifiedEmailAddressesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListVerifiedEmailAddresses')
def get_send_quota(self):
"""Fetches the user's current activity limits.
:rtype: dict
:returns: A GetSendQuotaResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendQuota')
def get_send_statistics(self):
"""Fetches the user's sending statistics. The result is a list of data
points, representing the last two weeks of sending activity.
Each data point in the list contains statistics for a 15-minute
interval.
:rtype: dict
:returns: A GetSendStatisticsResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendStatistics')
def delete_verified_email_address(self, email_address):
"""Deletes the specified email address from the list of verified
addresses.
:type email_adddress: string
:param email_address: The email address to be removed from the list of
verified addreses.
:rtype: dict
:returns: A DeleteVerifiedEmailAddressResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('DeleteVerifiedEmailAddress', {
'EmailAddress': email_address,
})
def verify_email_address(self, email_address):
"""Verifies an email address. This action causes a confirmation email
message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailAddressResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailAddress', {
'EmailAddress': email_address,
})
def verify_domain_dkim(self, domain):
"""
Returns a set of DNS records, or tokens, that must be published in the
domain name's DNS to complete the DKIM verification process. These
tokens are DNS ``CNAME`` records that point to DKIM public keys hosted
by Amazon SES. To complete the DKIM verification process, these tokens
must be published in the domain's DNS. The tokens must remain
published in order for Easy DKIM signing to function correctly.
After the tokens are added to the domain's DNS, Amazon SES will be able
to DKIM-sign email originating from that domain. To enable or disable
Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled``
action. For more information about Easy DKIM, go to the `Amazon SES
Developer Guide
<http://docs.amazonwebservices.com/ses/latest/DeveloperGuide>`_.
:type domain: string
:param domain: The domain name.
"""
return self._make_request('VerifyDomainDkim', {
'Domain': domain,
})
def set_identity_dkim_enabled(self, identity, dkim_enabled):
"""Enables or disables DKIM signing of email sent from an identity.
* If Easy DKIM signing is enabled for a domain name identity (e.g.,
* ``example.com``),
then Amazon SES will DKIM-sign all email sent by addresses under that
domain name (e.g., ``user@example.com``)
* If Easy DKIM signing is enabled for an email address, then Amazon SES
will DKIM-sign all email sent by that email address.
For email addresses (e.g., ``user@example.com``), you can only enable
Easy DKIM signing if the corresponding domain (e.g., ``example.com``)
has been set up for Easy DKIM using the AWS Console or the
``VerifyDomainDkim`` action.
:type identity: string
:param identity: An email address or domain name.
:type dkim_enabled: bool
:param dkim_enabled: Specifies whether or not to enable DKIM signing.
"""
return self._make_request('SetIdentityDkimEnabled', {
'Identity': identity,
'DkimEnabled': 'true' if dkim_enabled else 'false'
})
def get_identity_dkim_attributes(self, identities):
"""Get attributes associated with a list of verified identities.
Given a list of verified identities (email addresses and/or domains),
returns a structure describing identity notification attributes.
:type identities: list
:param identities: A list of verified identities (email addresses
and/or domains).
"""
params = {}
self._build_list_params(params, identities, 'Identities.member')
return self._make_request('GetIdentityDkimAttributes', params)
def list_identities(self):
"""Returns a list containing all of the identities (email addresses
and domains) for a specific AWS Account, regardless of
verification status.
:rtype: dict
:returns: A ListIdentitiesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListIdentities')
def get_identity_verification_attributes(self, identities):
"""Given a list of identities (email addresses and/or domains),
returns the verification status and (for domain identities)
the verification token for each identity.
:type identities: list of strings or string
:param identities: List of identities.
:rtype: dict
:returns: A GetIdentityVerificationAttributesResponse structure.
Note that keys must be unicode strings.
"""
params = {}
self._build_list_params(params, identities,
'Identities.member')
return self._make_request('GetIdentityVerificationAttributes', params)
def verify_domain_identity(self, domain):
"""Verifies a domain.
:type domain: string
:param domain: The domain to be verified.
:rtype: dict
:returns: A VerifyDomainIdentityResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('VerifyDomainIdentity', {
'Domain': domain,
})
def verify_email_identity(self, email_address):
"""Verifies an email address. This action causes a confirmation
email message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailIdentity', {
'EmailAddress': email_address,
})
def delete_identity(self, identity):
"""Deletes the specified identity (email address or domain) from
the list of verified identities.
:type identity: string
:param identity: The identity to be deleted.
:rtype: dict
:returns: A DeleteIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('DeleteIdentity', {
'Identity': identity,
})
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/ses/connection.py
|
Python
|
gpl-3.0
| 21,708
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 10:21:20 2016
@author: suraj
"""
import pickle
import numpy as np
X = pickle.load(open('x_att.p'))
y = pickle.load(open('y_att.p'))
batchX = []
batchy = []
def convertPointsToBatch(day_of_week,data1,data2):
for i in range(5):
batchX.extend(data1[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
batchy.extend(data2[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
pass
for i in range(7):
convertPointsToBatch(i,X,y)
batchX = np.array(batchX)
batchy = np.array(batchy)
print batchX.shape
print batchy.shape
print batchX[0]
print batchy[0]
pickle.dump(batchX,open('batch_x_att.p','wb'))
pickle.dump(batchy,open('batch_y_att.p','wb'))
|
suraj-jayakumar/lstm-rnn-ad
|
src/testdata/random_data_15min_ts/point_to_batch_data_conversion.py
|
Python
|
apache-2.0
| 762
|
# -*- coding: utf-8 -*-
# Pluma External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <steve@istique.net>
# Copyright (C) 2012-2021 MATE Developers
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gio, Gdk, Gtk, GtkSource, Pluma
from .outputpanel import OutputPanel
from .capture import *
def default(val, d):
if val is not None:
return val
else:
return d
def current_word(document):
piter = document.get_iter_at_mark(document.get_insert())
start = piter.copy()
if not piter.starts_word() and (piter.inside_word() or piter.ends_word()):
start.backward_word_start()
if not piter.ends_word() and piter.inside_word():
piter.forward_word_end()
return (start, piter)
# ==== Capture related functions ====
def run_external_tool(window, panel, node):
# Configure capture environment
try:
cwd = os.getcwd()
except OSError:
cwd = os.getenv('HOME');
capture = Capture(node.command, cwd)
capture.env = os.environ.copy()
capture.set_env(PLUMA_CWD = cwd)
view = window.get_active_view()
if view is not None:
# Environment vars relative to current document
document = view.get_buffer()
uri = document.get_uri()
# Current line number
piter = document.get_iter_at_mark(document.get_insert())
capture.set_env(PLUMA_CURRENT_LINE_NUMBER=str(piter.get_line() + 1))
# Current line text
piter.set_line_offset(0)
end = piter.copy()
if not end.ends_line():
end.forward_to_line_end()
capture.set_env(PLUMA_CURRENT_LINE=piter.get_text(end))
# Selected text (only if input is not selection)
if node.input != 'selection' and node.input != 'selection-document':
bounds = document.get_selection_bounds()
if bounds:
capture.set_env(PLUMA_SELECTED_TEXT=bounds[0].get_text(bounds[1]))
bounds = current_word(document)
capture.set_env(PLUMA_CURRENT_WORD=bounds[0].get_text(bounds[1]))
capture.set_env(PLUMA_CURRENT_DOCUMENT_TYPE=document.get_mime_type())
if uri is not None:
gfile = Gio.file_new_for_uri(uri)
scheme = gfile.get_uri_scheme()
name = os.path.basename(uri)
capture.set_env(PLUMA_CURRENT_DOCUMENT_URI = uri,
PLUMA_CURRENT_DOCUMENT_NAME = name,
PLUMA_CURRENT_DOCUMENT_SCHEME = scheme)
if Pluma.utils_uri_has_file_scheme(uri):
path = gfile.get_path()
cwd = os.path.dirname(path)
capture.set_cwd(cwd)
capture.set_env(PLUMA_CURRENT_DOCUMENT_PATH = path,
PLUMA_CURRENT_DOCUMENT_DIR = cwd)
documents_uri = [doc.get_uri()
for doc in window.get_documents()
if doc.get_uri() is not None]
documents_path = [Gio.file_new_for_uri(uri).get_path()
for uri in documents_uri
if Pluma.utils_uri_has_file_scheme(uri)]
capture.set_env(PLUMA_DOCUMENTS_URI = ' '.join(documents_uri),
PLUMA_DOCUMENTS_PATH = ' '.join(documents_path))
flags = capture.CAPTURE_BOTH
if not node.has_hash_bang():
flags |= capture.CAPTURE_NEEDS_SHELL
capture.set_flags(flags)
# Get input text
input_type = node.input
output_type = node.output
# Clear the panel
panel.clear()
if output_type == 'output-panel':
panel.show()
# Assign the error output to the output panel
panel.set_process(capture)
if input_type != 'nothing' and view is not None:
if input_type == 'document':
start, end = document.get_bounds()
elif input_type == 'selection' or input_type == 'selection-document':
try:
start, end = document.get_selection_bounds()
except ValueError:
if input_type == 'selection-document':
start, end = document.get_bounds()
if output_type == 'replace-selection':
document.select_range(start, end)
else:
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
elif input_type == 'line':
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
elif input_type == 'word':
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
if not start.inside_word():
panel.write(_('You must be inside a word to run this command'),
panel.command_tag)
return
if not start.starts_word():
start.backward_word_start()
if not end.ends_word():
end.forward_word_end()
input_text = document.get_text(start, end, False)
capture.set_input(input_text)
# Assign the standard output to the chosen "file"
if output_type == 'new-document':
tab = window.create_tab(True)
view = tab.get_view()
document = tab.get_document()
pos = document.get_start_iter()
capture.connect('stdout-line', capture_stdout_line_document, document, pos)
document.begin_user_action()
view.set_editable(False)
view.set_cursor_visible(False)
elif output_type != 'output-panel' and output_type != 'nothing' and view is not None:
document.begin_user_action()
view.set_editable(False)
view.set_cursor_visible(False)
if output_type == 'insert':
pos = document.get_iter_at_mark(document.get_mark('insert'))
elif output_type == 'replace-selection':
document.delete_selection(False, False)
pos = document.get_iter_at_mark(document.get_mark('insert'))
elif output_type == 'replace-document':
document.set_text('')
pos = document.get_end_iter()
else:
pos = document.get_end_iter()
capture.connect('stdout-line', capture_stdout_line_document, document, pos)
elif output_type != 'nothing':
capture.connect('stdout-line', capture_stdout_line_panel, panel)
document.begin_user_action()
capture.connect('stderr-line', capture_stderr_line_panel, panel)
capture.connect('begin-execute', capture_begin_execute_panel, panel, view, node.name)
capture.connect('end-execute', capture_end_execute_panel, panel, view, output_type)
# Run the command
capture.execute()
if output_type != 'nothing':
document.end_user_action()
class MultipleDocumentsSaver:
def __init__(self, window, panel, docs, node):
self._window = window
self._panel = panel
self._node = node
self._error = False
self._counter = len(docs)
self._signal_ids = {}
self._counter = 0
signals = {}
for doc in docs:
signals[doc] = doc.connect('saving', self.on_document_saving)
Pluma.commands_save_document(window, doc)
doc.disconnect(signals[doc])
def on_document_saving(self, doc, size, total_size):
self._counter += 1
self._signal_ids[doc] = doc.connect('saved', self.on_document_saved)
def on_document_saved(self, doc, error):
if error:
self._error = True
doc.disconnect(self._signal_ids[doc])
del self._signal_ids[doc]
self._counter -= 1
if self._counter == 0 and not self._error:
run_external_tool(self._window, self._panel, self._node)
def capture_menu_action(action, window, panel, node):
if node.save_files == 'document' and window.get_active_document():
MultipleDocumentsSaver(window, panel, [window.get_active_document()], node)
return
elif node.save_files == 'all':
MultipleDocumentsSaver(window, panel, window.get_documents(), node)
return
run_external_tool(window, panel, node)
def capture_stderr_line_panel(capture, line, panel):
if not panel.visible():
panel.show()
panel.write(line, panel.error_tag)
def capture_begin_execute_panel(capture, panel, view, label):
view.get_window(Gtk.TextWindowType.TEXT).set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
panel['stop'].set_sensitive(True)
panel.clear()
panel.write(_("Running tool:"), panel.italic_tag);
panel.write(" %s\n\n" % label, panel.bold_tag);
def capture_end_execute_panel(capture, exit_code, panel, view, output_type):
panel['stop'].set_sensitive(False)
if output_type in ('new-document','replace-document'):
doc = view.get_buffer()
start = doc.get_start_iter()
end = start.copy()
end.forward_chars(300)
mtype, uncertain = Gio.content_type_guess(None, doc.get_text(start, end, False).encode('utf-8'))
lmanager = GtkSource.LanguageManager.get_default()
language = lmanager.guess_language(doc.get_uri(), mtype)
if language is not None:
doc.set_language(language)
view.get_window(Gtk.TextWindowType.TEXT).set_cursor(Gdk.Cursor.new(Gdk.CursorType.XTERM))
view.set_cursor_visible(True)
view.set_editable(True)
if exit_code == 0:
panel.write("\n" + _("Done.") + "\n", panel.italic_tag)
else:
panel.write("\n" + _("Exited") + ":", panel.italic_tag)
panel.write(" %d\n" % exit_code, panel.bold_tag)
def capture_stdout_line_panel(capture, line, panel):
panel.write(line)
def capture_stdout_line_document(capture, line, document, pos):
document.insert(pos, line)
# ex:ts=4:et:
|
mate-desktop/pluma
|
plugins/externaltools/tools/functions.py
|
Python
|
gpl-2.0
| 10,724
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from datetime import datetime, timedelta
# get news in last X days
DAY = 3
# url of the news page
BASE_URL = "http://see.xidian.edu.cn/"
# get the page content
r = requests.get(BASE_URL)
# override the encoding
r.encoding = 'GBK'
doc = html.document_fromstring(r.text)
today = datetime.today()
def get_today_news():
trs = doc.cssselect('table[width="98%"] tr')
for tr in trs:
img_tag = tr.cssselect('img')
date_tag = tr.cssselect('font')
if img_tag and date_tag:
tr_date = date_tag[0].text_content().strip('[]')
tr_timedelta = today - datetime.strptime(tr_date, "%Y-%m-%d")
if tr_timedelta < timedelta(DAY):
link = tr.cssselect('a')[0].get('href')
print tr.text_content(), BASE_URL + link
if __name__ == '__main__':
get_today_news()
|
runningwolf666/xidian-scripts
|
get_xidian_news.py
|
Python
|
mit
| 931
|
'''
New Integration Test for Simple VM stop/start scheduler with repeatCount.
@author: quarkonics
'''
import os
import time
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
test_stub = test_lib.lib_get_test_stub()
vm = None
schd_job1 = None
schd_job2 = None
schd_trigger1 = None
schd_trigger2 = None
def test():
global vm
global schd_job1
global schd_job2
global schd_trigger1
global schd_trigger2
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
start_date = int(time.time())
schd_job1 = schd_ops.create_scheduler_job('simple_stop_vm_scheduler', 'simple_stop_vm_scheduler', vm.get_vm().uuid, 'stopVm', None)
schd_trigger1 = schd_ops.create_scheduler_trigger('simple_stop_vm_scheduler', start_date+60, 3, 120, 'simple')
schd_ops.add_scheduler_job_to_trigger(schd_trigger1.uuid, schd_job1.uuid)
#schd1 = vm_ops.stop_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_stop_vm_scheduler', start_date+60, 120, 3)
# if schd1.stopTime != start_date + 60 + 120 * 3:
# test_util.test_fail('[scheduler:] %s is expected have stopTime as %s' % (schd1.uuid, start_date + 60 + 120 * 3))
schd_job2 = schd_ops.create_scheduler_job('simple_start_vm_scheduler', 'simple_start_vm_scheduler', vm.get_vm().uuid, 'startVm', None)
schd_trigger2 = schd_ops.create_scheduler_trigger('simple_start_vm_scheduler', start_date+120, 2, 120, 'simple')
schd_ops.add_scheduler_job_to_trigger(schd_trigger2.uuid, schd_job2.uuid)
#schd2 = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', start_date+120, 120, 2)
# if schd2.stopTime != start_date + 120 + 120 * 2:
# test_util.test_fail('[scheduler:] %s is expected have stopTime as %s' % (schd2.uuid, start_date + 120 + 120 * 2))
test_stub.sleep_util(start_date+58)
vm.update()
if not test_lib.lib_is_vm_running(vm.get_vm()):
test_util.test_fail('VM is expected to run until stop vm scheduler start_date')
for i in range(0, 5):
test_util.test_logger('round %s' % (i))
test_stub.sleep_util(start_date + 60 + 120*i + 5)
test_util.test_logger('check VM status at %s, VM is expected to stop' % (start_date + 60 + 120*i + 5))
vm.update()
if vm.get_vm().state != 'Stopping' and vm.get_vm().state != 'Stopped':
test_util.test_fail('VM is expected to stop')
test_stub.sleep_util(start_date + 60 + 120*i + 65)
if i >= 2:
test_util.test_logger('check VM status at %s, VM is expected to stay stopped' % (start_date + 60 + 120*i + 65))
vm.update()
if vm.get_vm().state != 'Stopped':
test_util.test_fail('VM is expected to stay stop')
continue
test_util.test_logger('check VM status at %s, VM is expected to start' % (start_date + 60 + 120*i + 65))
vm.update()
if vm.get_vm().state != 'Starting' and vm.get_vm().state != 'Running':
test_util.test_fail('VM is expected to start')
schd_ops.del_scheduler_job(schd_job1.uuid)
schd_ops.del_scheduler_trigger(schd_trigger1.uuid)
schd_ops.del_scheduler_job(schd_job2.uuid)
schd_ops.del_scheduler_trigger(schd_trigger2.uuid)
vm.destroy()
test_util.test_pass('Create Simple VM Stop Start Scheduler Repeat Count Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global schd_job1
global schd_job2
global schd_trigger1
global schd_trigger2
if vm:
vm.destroy()
if schd_job1:
schd_ops.del_scheduler_job(schd_job1.uuid)
if schd_trigger1:
schd_ops.del_scheduler_trigger(schd_trigger1.uuid)
if schd_job2:
schd_ops.del_scheduler_job(schd_job2.uuid)
if schd_trigger2:
schd_ops.del_scheduler_trigger(schd_trigger2.uuid)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virtualrouter/scheduler/test_create_start_stop_vm_simple_repeatcount_scheduler.py
|
Python
|
apache-2.0
| 4,057
|
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import time
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
import jabber_lib
class Client(jabber_lib.JabberClient):
def __init__(self, *args, **kwargs):
apply(jabber_lib.JabberClient.__init__, (self, ) + args, kwargs)
self.username = None
self.resource = None
#self.DEBUG = jabber_lib.my_debug
def start(self, username, password, resource):
# XXX find a better name for this function
log_debug(2)
self.auth(username, password, resource)
log_debug(3, "Authenticated")
self.username = username
self.resource = resource
self.jid = "%s@%s/%s" % (self.username, self._host, self.resource)
self.username = username
self.resource = resource
def _add_jid_resource(self, jid, resource):
if not isinstance(jid, jabber_lib.jabber.JID) or jid.resource:
return jid
return jabber_lib.jabber.JID(str(jid) + '/' + resource)
def _fix_jid(self, jid):
return self._add_jid_resource(jid, 'osad')
def _check_signature(self, stanza, actions=None):
# Do we have this client in the table?
jid = stanza.getFrom()
if jid is None:
log_debug(3, 'no from')
return None
jid = str(self._fix_jid(jid))
# Look for a <x> child that has our namespace
xes = stanza.getTags('x')
for x in xes:
if x.getNamespace() != jabber_lib.NS_RHN_SIGNED:
continue
break
else: #for
log_debug(1, "No signature node found in stanza")
return None
# We now have our signature node
x_client_id = x.getAttr('client-id')
row = lookup_client_by_name(x_client_id)
if not row:
log_debug(3, 'no client found', x_client_id)
if self.debug_level > 5:
raise Exception(1)
return None
shared_key = row['shared_key']
timestamp = x.getAttr('timestap')
serial = x.getAttr('serial')
action = x.getAttr('action')
if actions and action not in actions:
log_debug(1, "action %s not allowed" % action)
return None
attrs = {
'client-id' : x_client_id,
'timestamp' : x.getAttr('timestamp'),
'serial' : x.getAttr('serial'),
'action' : x.getAttr('action'),
'jid' : jid,
}
signing_comps = ['client-id', 'timestamp', 'serial', 'action', 'jid']
args = [shared_key, self.jid]
for sc in signing_comps:
args.append(attrs[sc])
log_debug(4, "Signature args", args)
signature = apply(jabber_lib.sign, args)
x_signature = x.getAttr('signature')
if signature != x_signature:
log_debug(1, "Signatures do not match", signature, x_signature)
if self.debug_level > 5:
raise Exception(1)
return None
# Happy joy
return x
def _create_signature(self, jid, action):
row = lookup_client_by_jid(jid)
if not row:
log_debug(3, 'no client found for jid', jid)
if self.debug_level > 5:
raise Exception(1)
return None
full_jid = row['jabber_id']
shared_key = row['shared_key']
attrs = {
'timestamp' : int(time.time()),
'serial' : self.get_unique_id(),
'action' : action,
'jid' : self.jid,
}
signing_comps = ['timestamp', 'serial', 'action', 'jid']
args = [shared_key, full_jid]
for sc in signing_comps:
args.append(attrs[sc])
log_debug(4, "Signature args", args)
attrs['signature'] = apply(jabber_lib.sign, args)
x = jabber_lib.jabber.xmlstream.Node('x')
x.setNamespace(jabber_lib.NS_RHN_SIGNED)
for k, v in attrs.items():
x.putAttr(k, v)
return x
def _message_callback(self, client, stanza):
log_debug(4)
assert stanza.getName() == 'message'
# Actions we know how to react to
actions = [
jabber_lib.NS_RHN_MESSAGE_RESPONSE_CHECKIN,
jabber_lib.NS_RHN_MESSAGE_RESPONSE_PING,
]
sig = self._check_signature_from_message(stanza, actions)
if not sig:
return
self.update_client_message_received(stanza.getFrom())
action = sig.getAttr('action')
if action == jabber_lib.NS_RHN_MESSAGE_RESPONSE_PING:
log_debug(1, 'Ping response')
# XXX
return
def ping_clients(self, clients):
for client in clients:
jid = client['jabber_id']
if jid is None:
continue
self.send_message(jid, jabber_lib.NS_RHN_MESSAGE_REQUEST_PING)
def set_jid_available(self, jid):
jabber_lib.JabberClient.set_jid_available(self, jid)
self._set_state(jid, self._get_push_state_id('online'))
def set_jid_unavailable(self, jid):
jabber_lib.JabberClient.set_jid_unavailable(self, jid)
self._set_state(jid, self._get_push_state_id('offline'))
_query_set_state = rhnSQL.Statement("""
update rhnPushClient
set state_id = :state_id,
last_ping_time = NULL,
next_action_time = NULL
where jabber_id = :jid
""")
def _set_state(self, jid, state_id):
h = rhnSQL.prepare(self._query_set_state)
h.execute(state_id=state_id, jid=str(jid))
rhnSQL.commit()
def _get_push_state_id(self, state):
t = rhnSQL.Table('rhnPushClientState', 'label')
row = t[state]
assert row is not None
return row['id']
_query_update_client_message_received = rhnSQL.Statement("""
update rhnPushClient
set state_id = :state_id,
last_message_time = current_timestamp,
last_ping_time = NULL,
next_action_time = NULL
where jabber_id = :jid
""")
def update_client_message_received(self, jid):
jid = str(jid)
state_id = self._get_push_state_id('online')
h = rhnSQL.prepare(self._query_update_client_message_received)
ret = h.execute(jid=jid, state_id=state_id)
rhnSQL.commit()
class InvalidClientError(Exception):
pass
def lookup_client_by_name(client_name):
client_name = str(client_name)
t = rhnSQL.Table('rhnPushClient', 'name')
row = t[client_name]
if row is None:
raise InvalidClientError(client_name)
return row
def lookup_client_by_jid(jid):
if not isinstance(jid, jabber_lib.jabber.JID):
jid = jabber_lib.jabber.JID(jid)
if not jid.getResource():
# add the resource so we can find the guy in our table
jid.setResource('osad')
jid = str(jid)
t = rhnSQL.Table('rhnPushClient', 'jabber_id')
row = t[jid]
if row is None:
raise InvalidClientError(jid)
return row
|
xkollar/spacewalk
|
client/tools/osad/src/dispatcher_client.py
|
Python
|
gpl-2.0
| 7,736
|
"""empty message
Revision ID: 74af9cceeeaf
Revises: 6e7b88dc4544
Create Date: 2017-07-30 20:47:07.982489
"""
# revision identifiers, used by Alembic.
revision = '74af9cceeeaf'
down_revision = '6e7b88dc4544'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer', sa.Column('vat_number', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('customer', 'vat_number')
# ### end Alembic commands ###
|
Tethik/faktura
|
migrations/versions/74af9cceeeaf_.py
|
Python
|
mit
| 634
|
"""Twitter authentication related utilities."""
import logging
import tweepy
log = logging.getLogger(__name__)
def get_authorized_api(access_token_key, access_token_secret, consumer_key, consumer_secret):
"""Return an API object initialized with the given token and secret.
:Parameters:
- `access_token_key`: Twitter OAuth access token key received during
OAuth authorization.
- `access_token_secret`: Twitter OAuth access token secret key received
during OAuth authorization.
- `consumer_key`: twitter app consumer key.
- `consumer_secret`: twitter app consumer secret.
:Return:
Instance of tweepy.API or None in case of invalid credentials.
"""
log.info('Initializing Twitter API.')
tweepy_auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
tweepy_auth.set_access_token(access_token_key, access_token_secret)
try:
api = tweepy.API(tweepy_auth)
api.me()
except Exception as error:
api = None
log.error('An error occurred while login to Twitter', exc_info=error)
if api is not None:
log.info('Twitter API is ready.')
return api
def get_access_token(consumer_key, consumer_secret):
"""Utility which allows to get Twitter access token. Should not be used in prod."""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
redirect_url = auth.get_authorization_url()
print ('Please open this URL in browser and grant access. Then copy verification code and paste it here.\n%s\n'
% (redirect_url,))
code = raw_input('Verification code: ')
auth.get_access_token(code)
print
print 'Access token key: %s' % (auth.access_token.key,)
print 'Access token secret: %s' % (auth.access_token.secret,)
|
ysenko/python-from-zero-to-hero
|
twitter_explorer/twitter_backend/auth.py
|
Python
|
mit
| 1,799
|
#!/usr/bin/python3
from setuptools import setup, find_packages
setup(
name = "LaVa",
version = "0.2",
packages = find_packages(),
scripts = ['analyser.py', 'visualizer.py'],
install_requires = [
'nltk>=3.0.0'
],
package_data = {
'': ['*.txt']
},
author = "Scott R. Pledger",
author_email = "pledger@colorado.edu",
description = "Tool to visualize how authors use language.",
license = "GPL",
url = "https://github.com/scottpledger/lava"
)
|
scottpledger/lava
|
setup.py
|
Python
|
gpl-2.0
| 459
|
_author_ = 'Srikrishna'
import ConfigParser
import sys
import logging
logger = logging.getLogger(__name__)
sections = 'Connection','Configuration','Testing'
class MyConfiguration(object):
def __init__(self, *file_names):
parser = ConfigParser.ConfigParser()
parser.optionxform = str # make option names case sensitive
found = parser.read(file_names)
if not found:
raise ValueError('No config file found!')
for names in sections:
self.__dict__.update(parser.items(names))
#config = MyConfiguration('lora.conf') # define the configurations in this file
|
srikrishna3118/Lora_GW
|
PythonFiles/Config.py
|
Python
|
gpl-3.0
| 632
|
# Bob build tool
# Copyright (C) 2016 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from unittest import TestCase
import yaml
from bob.errors import ParseError
from bob.input import ScmOverride
from bob.stringparser import Env
class TestScmOverride(TestCase):
def setUp(self):
self.scm = {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "develop"
}
def testDel(self):
"""Test to delete a key"""
o = ScmOverride({ 'del' : [ 'branch' ] })
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
})
o = ScmOverride({ 'del' : [ "${DEL}" ] })
e = Env({"DEL" : "branch"})
match, scm = o.mangle(self.scm, e)
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
})
def testAdd(self):
"""Test to add a new key"""
o = ScmOverride({ 'set' : { 'commit' : '1234' } })
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "develop",
'commit' : "1234"
}
)
o = ScmOverride({ 'set' : { 'commit' : "${COMMIT}" } })
e = Env({"COMMIT" : "4321"})
match, scm = o.mangle(self.scm, e)
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "develop",
'commit' : "4321"
})
def testOverwrite(self):
"""Test to overwrite existing key"""
o = ScmOverride({ 'set' : { 'branch' : "master" } })
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "master"
})
# test substitution
o = ScmOverride({'set' : { 'branch' : "${BRANCH}" } })
e = Env({"BRANCH" : "master"})
match, scm = o.mangle(self.scm, e)
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "master"
})
def testReplace(self):
"""Test replacement via regex"""
o = ScmOverride({
'replace' : {
'url' : {
'pattern' : "@.*:",
'replacement' : "@acme.test:"
}
}
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@acme.test:foo/bar.git",
'branch' : "develop"
})
def testReplaceInvalid(self):
"""Test that invalid regexes are handled gracefully"""
with self.assertRaises(ParseError):
o = ScmOverride({
'replace' : {
'url' : {
'pattern' : "*",
'replacement' : "foo"
}
}
})
o.mangle(self.scm, Env())
def testMatch(self):
"""Test matching (multiple) keys"""
# match single key
o = ScmOverride({
'match' : { 'branch' : "develop" },
'set' : { 'branch' : "master" }
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "master"
})
# mismatch single key
o = ScmOverride({
'match' : { 'branch' : "upstream" },
'set' : { 'branch' : "master" }
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "develop"
})
# match multiple keys
o = ScmOverride({
'match' : {
'branch' : "develop",
'url' : "git@git.com:foo/bar.git",
},
'set' : { 'branch' : "master" }
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "master"
})
# mismatch one out of two keys
o = ScmOverride({
'match' : {
'branch' : "develop",
'url' : "asdfadgag",
},
'set' : { 'branch' : "master" }
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "develop"
})
# test substitution
o = ScmOverride({'match' : {
'url' : "git@${SERVER}:foo/${NAME}.git",
},
'set' : { 'branch' : "master" }
})
e = Env({"SERVER" : "git.com", "NAME" : "bar"})
match, scm = o.mangle(self.scm, e)
self.assertEqual(match, True)
self.assertEqual(scm, {
'scm' : "git",
'url' : "git@git.com:foo/bar.git",
'branch' : "master"
})
def testMatchGlob(self):
"""Test that matching uses globbing"""
o = ScmOverride({
'match' : { 'url' : "*git.com*" },
'set' : { 'url' : "mirror" }
})
match, scm = o.mangle(self.scm, Env())
self.assertEqual(scm, {
'scm' : "git",
'url' : "mirror",
'branch' : "develop"
})
def testDump(self):
"""Test that a scmOverride correctly converts back to yaml"""
spec = {
'match' : { 'url' : "*git.com*" },
'set' : { 'url' : "mirror", "branch" : "feature" },
'del' : [ 'tag', 'commit' ],
'replace' : {
"url" : {
"pattern" : "pattern",
"replacement" : "replacement",
}
}
}
o = ScmOverride(spec)
self.assertEqual(spec, yaml.load(str(o)))
|
stefanreuther/bob
|
test/test_input_scmoverride.py
|
Python
|
gpl-3.0
| 6,234
|
# -*- coding: utf-8 -*-
"""
批量移动文件
https://developer.qiniu.com/kodo/api/1250/batch
"""
from qiniu import build_batch_move, Auth, BucketManager
access_key = ''
secret_key = ''
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
src_bucket_name = ''
target_bucket_name = ''
# force为true时强制同名覆盖, 字典的键为原文件,值为目标文件
ops = build_batch_move(src_bucket_name, {'src_key1': 'target_key1', 'src_key2': 'target_key2'}, target_bucket_name, force='true')
ret, info = bucket.batch(ops)
print(info)
|
jemygraw/python-sdk
|
examples/batch_move.py
|
Python
|
mit
| 557
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for part in orm.PaymentPart.objects.all():
print('----------------\nfound part %s - $%s' % (part.id, part.price))
solution = orm.Solution.objects.get(issue__id=part.payment.offer.issue.id, programmer__id=part.programmer.id)
print('found corresponding solution %s - %s' % (solution.id, solution.programmer.username))
part.solution = solution
part.save()
print('saved PaymentPart')
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.issuewatch': {
'Meta': {'object_name': 'IssueWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"})
},
'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.offerhistevent': {
'Meta': {'object_name': 'OfferHistEvent'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offerwatch': {
'Meta': {'object_name': 'OfferWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.payment': {
'Meta': {'object_name': 'Payment'},
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'realprice': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']", 'null': 'True'})
},
'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.solution': {
'Meta': {'object_name': 'Solution'},
'accepting_payments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.solutionhistevent': {
'Meta': {'object_name': 'SolutionHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
|
bankonme/www.freedomsponsors.org
|
djangoproject/core/migrations/0021_set_paymentpart_solution.py
|
Python
|
agpl-3.0
| 16,118
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, plistlib, subprocess, glob, zipfile, tempfile, \
py_compile, stat, operator
abspath, join, basename = os.path.abspath, os.path.join, os.path.basename
from setup import __version__ as VERSION, __appname__ as APPNAME, basenames, \
modules as main_modules, Command, SRC, functions as main_functions
LICENSE = open('LICENSE', 'rb').read()
MAGICK_HOME='@executable_path/../Frameworks/ImageMagick'
ENV = dict(
FC_CONFIG_DIR='@executable_path/../Resources/fonts',
FC_CONFIG_FILE='@executable_path/../Resources/fonts/fonts.conf',
MAGICK_CONFIGURE_PATH=MAGICK_HOME+'/config',
MAGICK_CODER_MODULE_PATH=MAGICK_HOME+'/modules-Q16/coders',
MAGICK_CODER_FILTER_PATH=MAGICK_HOME+'/modules-Q16/filter',
QT_PLUGIN_PATH='@executable_path/../MacOS',
PYTHONIOENCODING='UTF-8',
)
SW = os.environ.get('SW', '/sw')
info = warn = None
class OSX32_Freeze(Command):
description = 'Freeze OSX calibre installation'
def add_options(self, parser):
parser.add_option('--test-launchers', default=False,
action='store_true',
help='Only build launchers')
def run(self, opts):
global info, warn
info, warn = self.info, self.warn
main(opts.test_launchers)
def compile_launcher_lib(contents_dir, gcc, base):
info('\tCompiling calibre_launcher.dylib')
fd = join(contents_dir, 'Frameworks')
dest = join(fd, 'calibre-launcher.dylib')
src = join(base, 'util.c')
cmd = [gcc] + '-Wall -arch i386 -arch x86_64 -dynamiclib -std=gnu99'.split() + [src] + \
['-I'+base] + \
['-I/sw/python/Python.framework/Versions/Current/Headers'] + \
'-current_version 1.0 -compatibility_version 1.0'.split() + \
'-fvisibility=hidden -o'.split() + [dest] + \
['-install_name',
'@executable_path/../Frameworks/'+os.path.basename(dest)] + \
['-F/sw/python', '-framework', 'Python', '-framework', 'CoreFoundation', '-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return dest
def compile_launchers(contents_dir, xprograms, pyver):
gcc = os.environ.get('CC', 'gcc')
base = os.path.dirname(__file__)
lib = compile_launcher_lib(contents_dir, gcc, base)
src = open(join(base, 'launcher.c'), 'rb').read()
env, env_vals = [], []
for key, val in ENV.items():
env.append('"%s"'% key)
env_vals.append('"%s"'% val)
env = ', '.join(env)+', '
env_vals = ', '.join(env_vals)+', '
src = src.replace('/*ENV_VARS*/', env)
src = src.replace('/*ENV_VAR_VALS*/', env_vals)
programs = [lib]
for program, x in xprograms.items():
module, func = x
info('\tCompiling', program)
out = join(contents_dir, 'MacOS', program)
programs.append(out)
psrc = src.replace('**PROGRAM**', program)
psrc = psrc.replace('**MODULE**', module)
psrc = psrc.replace('**FUNCTION**', func)
psrc = psrc.replace('**PYVER**', pyver)
fsrc = '/tmp/%s.c'%program
with open(fsrc, 'wb') as f:
f.write(psrc)
cmd = [gcc, '-Wall', '-arch', 'x86_64', '-arch', 'i386',
'-I'+base, fsrc, lib, '-o', out,
'-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return programs
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files if os.path.exists(fn)]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = reduce(operator.add, [len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
def flush(func):
def ff(*args, **kwargs):
sys.stdout.flush()
sys.stderr.flush()
ret = func(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return ret
return ff
class Py2App(object):
FID = '@executable_path/../Frameworks'
def __init__(self, build_dir, test_launchers=False):
self.build_dir = build_dir
self.contents_dir = join(self.build_dir, 'Contents')
self.resources_dir = join(self.contents_dir, 'Resources')
self.frameworks_dir = join(self.contents_dir, 'Frameworks')
self.version_info = '.'.join(map(str, sys.version_info[:2]))
self.site_packages = join(self.resources_dir, 'Python', 'site-packages')
self.to_strip = []
self.warnings = []
self.run(test_launchers)
def warn(self, *args):
warn(*args)
def run(self, test_launchers):
ret = 0
if not test_launchers:
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
self.create_skeleton()
self.create_plist()
self.add_python_framework()
self.add_site_packages()
self.add_stdlib()
self.add_qt_frameworks()
self.add_calibre_plugins()
self.add_podofo()
self.add_poppler()
self.add_libjpeg()
self.add_libpng()
self.add_fontconfig()
self.add_imagemagick()
self.add_misc_libraries()
self.add_resources()
self.compile_py_modules()
self.create_console_app()
self.copy_site()
self.create_exe()
if not test_launchers:
self.strip_files()
ret = self.makedmg(self.build_dir, APPNAME+'-'+VERSION)
return ret
@flush
def add_resources(self):
shutil.copytree('resources', os.path.join(self.resources_dir,
'resources'))
@flush
def strip_files(self):
info('\nStripping files...')
strip_files(self.to_strip)
@flush
def create_exe(self):
info('\nCreating launchers')
programs = {}
progs = []
for x in ('console', 'gui'):
progs += list(zip(basenames[x], main_modules[x], main_functions[x]))
for program, module, func in progs:
programs[program] = (module, func)
programs = compile_launchers(self.contents_dir, programs,
self.version_info)
for out in programs:
self.fix_dependencies_in_lib(out)
@flush
def set_id(self, path_to_lib, new_id):
old_mode = flipwritable(path_to_lib)
subprocess.check_call(['install_name_tool', '-id', new_id, path_to_lib])
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def get_dependencies(self, path_to_lib):
raw = subprocess.Popen(['otool', '-L', path_to_lib],
stdout=subprocess.PIPE).stdout.read()
for line in raw.splitlines():
if 'compatibility' not in line or line.strip().endswith(':'):
continue
idx = line.find('(')
path = line[:idx].strip()
yield path
@flush
def get_local_dependencies(self, path_to_lib):
for x in self.get_dependencies(path_to_lib):
for y in (SW+'/lib/', '/usr/local/lib/', SW+'/qt/lib/',
'/opt/local/lib/',
SW+'/python/Python.framework/', SW+'/freetype/lib/'):
if x.startswith(y):
if y == SW+'/python/Python.framework/':
y = SW+'/python/'
yield x, x[len(y):]
break
@flush
def change_dep(self, old_dep, new_dep, path_to_lib):
info('\tResolving dependency %s to'%old_dep, new_dep)
subprocess.check_call(['install_name_tool', '-change', old_dep, new_dep,
path_to_lib])
@flush
def fix_dependencies_in_lib(self, path_to_lib):
info('\nFixing dependencies in', path_to_lib)
self.to_strip.append(path_to_lib)
old_mode = flipwritable(path_to_lib)
for dep, bname in self.get_local_dependencies(path_to_lib):
ndep = self.FID+'/'+bname
self.change_dep(dep, ndep, path_to_lib)
if list(self.get_local_dependencies(path_to_lib)):
raise Exception('Failed to resolve deps in: '+path_to_lib)
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def add_python_framework(self):
info('\nAdding Python framework')
src = join('/sw/python', 'Python.framework')
x = join(self.frameworks_dir, 'Python.framework')
curr = os.path.realpath(join(src, 'Versions', 'Current'))
currd = join(x, 'Versions', basename(curr))
rd = join(currd, 'Resources')
os.makedirs(rd)
shutil.copy2(join(curr, 'Resources', 'Info.plist'), rd)
shutil.copy2(join(curr, 'Python'), currd)
self.set_id(join(currd, 'Python'),
self.FID+'/Python.framework/Versions/%s/Python'%basename(curr))
@flush
def add_qt_frameworks(self):
info('\nAdding Qt Framework')
for f in ('QtCore', 'QtGui', 'QtXml', 'QtNetwork', 'QtSvg', 'QtWebKit',
'QtXmlPatterns'):
self.add_qt_framework(f)
for d in glob.glob(join(SW, 'qt', 'plugins', '*')):
shutil.copytree(d, join(self.contents_dir, 'MacOS', basename(d)))
for l in glob.glob(join(self.contents_dir, 'MacOS', '*/*.dylib')):
self.fix_dependencies_in_lib(l)
x = os.path.relpath(l, join(self.contents_dir, 'MacOS'))
self.set_id(l, '@executable_path/'+x)
@flush
def add_qt_framework(self, f):
libname = f
f = f+'.framework'
src = join(SW, 'qt', 'lib', f)
ignore = shutil.ignore_patterns('Headers', '*.h', 'Headers/*')
dest = join(self.frameworks_dir, f)
shutil.copytree(src, dest, symlinks=True,
ignore=ignore)
lib = os.path.realpath(join(dest, libname))
rpath = os.path.relpath(lib, self.frameworks_dir)
self.set_id(lib, self.FID+'/'+rpath)
self.fix_dependencies_in_lib(lib)
@flush
def create_skeleton(self):
c = join(self.build_dir, 'Contents')
for x in ('Frameworks', 'MacOS', 'Resources'):
os.makedirs(join(c, x))
for x in ('library.icns', 'book.icns'):
shutil.copyfile(join('icons', x), join(self.resources_dir, x))
@flush
def add_calibre_plugins(self):
dest = join(self.frameworks_dir, 'plugins')
os.mkdir(dest)
for f in glob.glob('src/calibre/plugins/*.so'):
shutil.copy2(f, dest)
self.fix_dependencies_in_lib(join(dest, basename(f)))
@flush
def create_plist(self):
from calibre.ebooks import BOOK_EXTENSIONS
env = dict(**ENV)
env['CALIBRE_LAUNCHED_FROM_BUNDLE']='1';
docs = [{'CFBundleTypeName':'E-book',
'CFBundleTypeExtensions':list(BOOK_EXTENSIONS),
'CFBundleTypeRole':'Viewer',
}]
pl = dict(
CFBundleDevelopmentRegion='English',
CFBundleDisplayName=APPNAME,
CFBundleName=APPNAME,
CFBundleIdentifier='net.kovidgoyal.calibre',
CFBundleVersion=VERSION,
CFBundleShortVersionString=VERSION,
CFBundlePackageType='APPL',
CFBundleSignature='????',
CFBundleExecutable='calibre',
CFBundleDocumentTypes=docs,
LSMinimumSystemVersion='10.5.2',
LSRequiresNativeExecution=True,
NSAppleScriptEnabled=False,
NSHumanReadableCopyright='Copyright 2010, Kovid Goyal',
CFBundleGetInfoString=('calibre, an E-book management '
'application. Visit http://calibre-ebook.com for details.'),
CFBundleIconFile='library.icns',
LSMultipleInstancesProhibited=True,
NSHighResolutionCapable=True,
LSEnvironment=env
)
plistlib.writePlist(pl, join(self.contents_dir, 'Info.plist'))
@flush
def install_dylib(self, path, set_id=True):
shutil.copy2(path, self.frameworks_dir)
if set_id:
self.set_id(join(self.frameworks_dir, basename(path)),
self.FID+'/'+basename(path))
self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
@flush
def add_podofo(self):
info('\nAdding PoDoFo')
pdf = join(SW, 'lib', 'libpodofo.0.9.1.dylib')
self.install_dylib(pdf)
@flush
def add_poppler(self):
info('\nAdding poppler')
for x in ('libpoppler.27.dylib',):
self.install_dylib(os.path.join(SW, 'lib', x))
for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'):
self.install_dylib(os.path.join(SW, 'bin', x), False)
@flush
def add_libjpeg(self):
info('\nAdding libjpeg')
self.install_dylib(os.path.join(SW, 'lib', 'libjpeg.8.dylib'))
@flush
def add_libpng(self):
info('\nAdding libpng')
self.install_dylib(os.path.join(SW, 'lib', 'libpng12.0.dylib'))
self.install_dylib(os.path.join(SW, 'lib', 'libpng.3.dylib'))
@flush
def add_fontconfig(self):
info('\nAdding fontconfig')
for x in ('fontconfig.1', 'freetype.6', 'expat.1'):
src = os.path.join(SW, 'lib', 'lib'+x+'.dylib')
self.install_dylib(src)
dst = os.path.join(self.resources_dir, 'fonts')
if os.path.exists(dst):
shutil.rmtree(dst)
src = os.path.join(SW, 'etc', 'fonts')
shutil.copytree(src, dst, symlinks=False)
fc = os.path.join(dst, 'fonts.conf')
raw = open(fc, 'rb').read()
raw = raw.replace('<dir>/usr/share/fonts</dir>', '''\
<dir>/Library/Fonts</dir>
<dir>/Network/Library/Fonts</dir>
<dir>/System/Library/Fonts</dir>
<dir>/usr/X11R6/lib/X11/fonts</dir>
<dir>/usr/share/fonts</dir>
<dir>/var/root/Library/Fonts</dir>
<dir>/usr/share/fonts</dir>
''')
open(fc, 'wb').write(raw)
@flush
def add_imagemagick(self):
info('\nAdding ImageMagick')
for x in ('Wand', 'Core'):
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.5.dylib'%x))
idir = glob.glob(os.path.join(SW, 'lib', 'ImageMagick-*'))[-1]
dest = os.path.join(self.frameworks_dir, 'ImageMagick')
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(idir, dest, True)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def add_misc_libraries(self):
for x in ('usb-1.0.0', 'mtp.9', 'unrar', 'readline.6.1',
'wmflite-0.2.7', 'chm.0', 'sqlite3.0'):
info('\nAdding', x)
x = 'lib%s.dylib'%x
shutil.copy2(join(SW, 'lib', x), self.frameworks_dir)
dest = join(self.frameworks_dir, x)
self.set_id(dest, self.FID+'/'+x)
if 'mtp' in x:
self.fix_dependencies_in_lib(dest)
@flush
def add_site_packages(self):
info('\nAdding site-packages')
os.makedirs(self.site_packages)
paths = reversed(map(abspath, [x for x in sys.path if x.startswith('/')]))
upaths = []
for x in paths:
if x not in upaths and (x.endswith('.egg') or
x.endswith('/site-packages')):
upaths.append(x)
upaths.append(os.path.expanduser('~/build/calibre/src'))
for x in upaths:
info('\t', x)
tdir = None
try:
if not os.path.isdir(x):
try:
zf = zipfile.ZipFile(x)
except:
self.warn(x, 'is neither a directory nor a zipfile')
continue
tdir = tempfile.mkdtemp()
zf.extractall(tdir)
x = tdir
self.add_modules_from_dir(x)
self.add_packages_from_dir(x)
finally:
if tdir is not None:
shutil.rmtree(tdir)
shutil.rmtree(os.path.join(self.site_packages, 'calibre', 'plugins'))
self.remove_bytecode(join(self.resources_dir, 'Python', 'site-packages'))
@flush
def add_modules_from_dir(self, src):
for x in glob.glob(join(src, '*.py'))+glob.glob(join(src, '*.so')):
shutil.copy2(x, self.site_packages)
if x.endswith('.so'):
self.fix_dependencies_in_lib(x)
@flush
def add_packages_from_dir(self, src):
for x in os.listdir(src):
x = join(src, x)
if os.path.isdir(x) and os.path.exists(join(x, '__init__.py')):
if self.filter_package(basename(x)):
continue
self.add_package_dir(x)
@flush
def add_package_dir(self, x, dest=None):
def ignore(root, files):
ans = []
for y in files:
ext = os.path.splitext(y)[1]
if ext not in ('', '.py', '.so') or \
(not ext and not os.path.isdir(join(root, y))):
ans.append(y)
return ans
if dest is None:
dest = self.site_packages
dest = join(dest, basename(x))
shutil.copytree(x, dest, symlinks=True, ignore=ignore)
self.postprocess_package(x, dest)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def filter_package(self, name):
return name in ('Cython', 'modulegraph', 'macholib', 'py2app',
'bdist_mpkg', 'altgraph')
@flush
def postprocess_package(self, src_path, dest_path):
pass
@flush
def add_stdlib(self):
info('\nAdding python stdlib')
src = '/sw/python/Python.framework/Versions/Current/lib/python'
src += self.version_info
dest = join(self.resources_dir, 'Python', 'lib', 'python')
dest += self.version_info
os.makedirs(dest)
for x in os.listdir(src):
if x in ('site-packages', 'config', 'test', 'lib2to3', 'lib-tk',
'lib-old', 'idlelib', 'plat-mac', 'plat-darwin', 'site.py'):
continue
x = join(src, x)
if os.path.isdir(x):
self.add_package_dir(x, dest)
elif os.path.splitext(x)[1] in ('.so', '.py'):
shutil.copy2(x, dest)
dest2 = join(dest, basename(x))
if dest2.endswith('.so'):
self.fix_dependencies_in_lib(dest2)
self.remove_bytecode(join(self.resources_dir, 'Python', 'lib'))
confdir = join(self.resources_dir, 'Python',
'lib/python%s/config'%self.version_info)
os.makedirs(confdir)
shutil.copy2(join(src, 'config/Makefile'), confdir)
incdir = join(self.resources_dir, 'Python',
'include/python'+self.version_info)
os.makedirs(incdir)
shutil.copy2(join(src.replace('/lib/', '/include/'), 'pyconfig.h'),
incdir)
@flush
def remove_bytecode(self, dest):
for x in os.walk(dest):
root = x[0]
for f in x[-1]:
if os.path.splitext(f) in ('.pyc', '.pyo'):
os.remove(join(root, f))
@flush
def compile_py_modules(self):
info( '\nCompiling Python modules')
base = join(self.resources_dir, 'Python')
for x in os.walk(base):
root = x[0]
for f in x[-1]:
if f.endswith('.py'):
y = join(root, f)
rel = os.path.relpath(y, base)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
except:
self.warn('WARNING: Failed to byte-compile', y)
@flush
def create_console_app(self):
info( '\nCreating console.app')
cc_dir = os.path.join(self.contents_dir, 'console.app', 'Contents')
os.makedirs(cc_dir)
for x in os.listdir(self.contents_dir):
if x == 'console.app':
continue
if x == 'Info.plist':
plist = plistlib.readPlist(join(self.contents_dir, x))
plist['LSUIElement'] = '1'
plist.pop('CFBundleDocumentTypes')
plistlib.writePlist(plist, join(cc_dir, x))
else:
os.symlink(join('../..', x),
join(cc_dir, x))
@flush
def copy_site(self):
base = os.path.dirname(__file__)
shutil.copy2(join(base, 'site.py'), join(self.resources_dir, 'Python',
'lib', 'python'+self.version_info))
@flush
def makedmg(self, d, volname,
destdir='dist',
internet_enable=True,
format='UDBZ'):
''' Copy a directory d into a dmg named volname '''
info('\nCreating dmg')
sys.stdout.flush()
if not os.path.exists(destdir):
os.makedirs(destdir)
dmg = os.path.join(destdir, volname+'.dmg')
if os.path.exists(dmg):
os.unlink(dmg)
tdir = tempfile.mkdtemp()
appdir = os.path.join(tdir, os.path.basename(d))
shutil.copytree(d, appdir, symlinks=True)
subprocess.check_call(['/Users/kovid/sign.sh', appdir])
os.symlink('/Applications', os.path.join(tdir, 'Applications'))
subprocess.check_call(['/usr/bin/hdiutil', 'create', '-srcfolder', tdir,
'-volname', volname, '-format', format, dmg])
shutil.rmtree(tdir)
if internet_enable:
subprocess.check_call(['/usr/bin/hdiutil', 'internet-enable', '-yes', dmg])
size = os.stat(dmg).st_size/(1024*1024.)
info('\nInstaller size: %.2fMB\n'%size)
return dmg
def test_exe():
build_dir = abspath(join('build', APPNAME+'.app'))
py2app = Py2App(build_dir)
py2app.create_exe()
return 0
def main(test=False):
if 'test_exe' in sys.argv:
return test_exe()
build_dir = abspath(join(os.path.dirname(SRC), 'build', APPNAME+'.app'))
Py2App(build_dir, test_launchers=test)
return 0
if __name__ == '__main__':
sys.exit(main())
|
yeyanchao/calibre
|
setup/installer/osx/app/main.py
|
Python
|
gpl-3.0
| 23,748
|
# This script animates an averaged variable in time, Notably in the Telow case for the void fraction
#
# A FOLDER ./voidfraction/averaged must exist!
# Author : Bruno Blais
# Last modified : 24-02-2014
#Python imports
#----------------
import os
import sys
import numpy
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Initial time of simulation, final time and time increment must be specified by user
t0=2.0
tf=100.
dT=0.5
nt=int((tf-t0)/dT)
t=t0
#Number of r and z cells has to be specified
nz=1
nr = 10
#Load first file to acquire the axis
print "Acquiring time : ", t0
fname='./voidFraction/averaged/radialVoidFraction_' + str(t0)
r,phi = numpy.loadtxt(fname, unpack=True) # Load data from text file
fig = plt.figure("Void Fraction in time")
ax = fig.add_subplot(111, autoscale_on=False, xlim=(min(r)-0.001,max(r)+0.001), ylim=(0.1, 0.8))
ax.grid()
ax.set_ylabel("Fraction of solid")
ax.set_xlabel("Radius")
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
#plt.ylabel('Fraction of solid')
#plt.xlabel('Radius (r)')
#plt.legend(loc=9)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def onClick(event):
global pause
pause ^= True
def animate(t):
print "Plotting time : ", t
fname='./voidFraction/averaged/radialVoidFraction_' + str(t)
r,phi = numpy.loadtxt(fname, unpack=True) # Load data from text file
line.set_data(r,1-phi) # Update the data
time_text.set_text(time_template%(t))
return line,time_text
ani = animation.FuncAnimation(fig, animate,numpy.arange(2,100,0.5), blit=True, init_func=init)
plt.show()
|
blaisb/cfdemUtilities
|
couette/movieAllTetlow.py
|
Python
|
lgpl-3.0
| 1,840
|
# -*- coding: utf-8 -*-
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'vchemiere'
from openerp.osv import osv, fields
class sale_order(osv.osv):
_inherit = 'sale.order'
def create(self, cr, uid, vals, context=None):
usr_obj = self.pool.get('res.users')
group_obj = self.pool.get('res.groups')
ir_model_data = self.pool.get('ir.model.data')
adv_group_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'adv')[1]
adv_users = group_obj.browse(cr, uid, adv_group_id).users
if not vals.get('message_follower_ids', False):
vals['message_follower_ids'] = []
if adv_users:
for adv_user_id in adv_users:
adv_id = usr_obj.browse(cr, uid, adv_user_id.id).partner_id.id
vals['message_follower_ids'] += [4, adv_id]
mrp_group_id = ir_model_data.get_object_reference(cr, uid, 'mrp', 'team')[1]
mrp_users = group_obj.browse(cr, uid, mrp_group_id).users
if mrp_users:
for mrp_user_id in mrp_users:
mrp_id = usr_obj.browse(cr, uid, mrp_user_id.id).partner_id.id
vals['message_follower_ids'] += [4, mrp_id]
new_id = super(sale_order, self).create(cr, uid, vals, context)
follower_ids = self.pool.get('mail.followers').search(cr, uid, [('res_id', '=', new_id)])
for follower_id in follower_ids:
follower = self.pool.get('mail.followers').browse(cr, uid, follower_id)
return new_id
def action_button_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = super(sale_order, self).action_button_confirm(cr, uid, ids, context)
self.pool.get('mail.proxy').send_mail(cr, uid, ids, 'sale.order', 'Sales Order - Send by Email', context)
return res
|
noemis-fr/old-custom
|
e3z_mail_ipbox/sale.py
|
Python
|
agpl-3.0
| 2,613
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table('C3:F13', {'columns': [{'header': 'Foo'},
{'header': ''},
{},
{'header': 'Baz'}
]})
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Foo"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Baz"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/table/test_table06.py
|
Python
|
bsd-2-clause
| 2,225
|
#!/usr/bin/python
'''
annotate a fasta sequence for KMAD with structural information
requirements:
- SwissProt database:
- fasta and text files split in separate entries)
- formatted for blast
- DSSP
- blastp
input:
- a regular fasta file
or
- a fasta.7c file (converted with IDP converter fr KMAD) - then structural
annotations will be added to the IDP annotations
fasta headers:
1. if you want to use information from specific PDB files then you need to
include the PDB id in the header (where normally the uniprot ID is):
>xxx|PDB_ID|xxx xxx xxx
the important part is the placement of the PDB id between two pipe ('|')
characters - the length and content of the xxx does not matter
(unless there are no more pipes before the PDB id)
2. If you don't know (or don't care) which PDB files you want to use,
but you do know the Uniprot accession numbers of sequences then
place them like the PDB_ID
above (so just like in any fasta file from Uniprot)
- this will speed up the process of annotation because the script doesn't
have to run blast then. Remember to use th accession number, and not
the entry name (e.g. for crambin use P01542 and not CRAM_CRAAB)
3. If you didn't specify any of the aforementioned ids, then the program
will attempt to run blast against swissprot. If the best hit has
identity
higher than 90%, it aligns the query sequence with it, and 'copies'
the 2ndary structure annotation for the identical positions
'''
import argparse
import hjson
import logging
import modeller as m
import os
import re
import sys
import subprocess
import tempfile
from jsonlibconfig import encoder
SWISS_DAT = "/home/joanna/data/swissprot_dat/uniprot_dat/"
SWISS_FASTA = "/home/joanna/data/swiss_fasta/uniprot_fasta/"
SWISS_BLAST = "/home/joanna/data/uniprot_sprot"
PDB_BLAST = "/home/joanna/data/pdb_seqres.txt"
PDB_DIR = "/mnt/cmbi4/pdb/flat"
DSSP_DIR = "/home/joanna/data/dssp/"
PDBFIND = "/mnt/cmbi4/pdbfinder/PDBFIND.TXT"
SCRIPT_PATH = os.path.realpath(__file__)
KMAD = '/'.join(SCRIPT_PATH.split('/')[:-2] + ['kmad'])
log_file = "test.log"
if os.path.exists(log_file):
os.remove(log_file)
logging.basicConfig(filename=log_file, level=logging.DEBUG)
def parse_dssp(dssp, chain_id):
result = {"seq": "", "strct": {'G': [], 'E': [], 'B': [],
'I': [], 'T': [], 'H': [],
'S': [], 'C': []}}
in_strct_section = False
in_chain = False
counter = 0
for i in dssp:
if i.startswith(" # RESIDUE AA "):
in_strct_section = True
elif in_strct_section:
if i.split()[2] == chain_id:
in_chain = True
if in_chain:
if i.split()[3].islower():
result['seq'] += 'C'
result['strct']['C'].append(counter)
else:
result['seq'] += i.split()[3]
if i.split()[4] in result['strct'].keys():
result['strct'][i.split()[4]].append(counter)
counter += 1
return result
def get_strct_from_dssp(query_seq):
strct_elements = {'G': [], 'E': [], 'B': [], 'I': [], 'T': [], 'H': [],
'S': [], 'C': []}
blast_result = run_blast(query_seq, PDB_BLAST)
if blast_result:
chain_id = blast_result[0].split(',')[1].split('_')[1].upper()
pdb_id = blast_result[0].split(',')[1].split('_')[0]
dssp_path = os.path.join(DSSP_DIR, pdb_id + '.dssp')
if os.path.exists(dssp_path):
with open(dssp_path) as a:
dssp = a.read().splitlines()
dssp_data = parse_dssp(dssp, chain_id)
equivalent_positions = align(query_seq, dssp_data['seq'])
for i in dssp_data['strct']:
for j in dssp_data['strct'][i]:
if j in equivalent_positions.keys():
strct_elements[i].append(equivalent_positions[j])
return strct_elements
def run_blast(sequence, blastdb):
tmp_file = tempfile.NamedTemporaryFile(suffix=".fasta", delete=False)
with tmp_file as f:
f.write(sequence)
out_blast = tmp_file.name + '.blastp'
args = ["blastp", "-query", tmp_file.name, "-evalue", "1e-5",
"-num_threads", "15", "-db", blastdb,
"-out", out_blast, '-outfmt', '10',
"-max_target_seqs", '10']
try:
subprocess.call(args)
except subprocess.CalledProcessError as e:
print "Error: {}".format(e.output)
if os.path.exists(out_blast):
with open(out_blast) as a:
output = a.read().splitlines()
os.remove(out_blast)
else:
output = []
os.remove(tmp_file.name)
return output
# return the closest homologue sequence (if %id
# >= 90% - otherwise return empty list)
def find_closest_hit(sequence):
blast_result = run_blast(sequence, SWISS_BLAST)
result = {}
# if blast_result and float(blast_result[0].split(',')[2]) >= 70:
if blast_result:
seq_id = blast_result[0].split(',')[1].split('|')[2]
fasta_path = os.path.join(SWISS_FASTA, seq_id + '.fasta')
data_path = os.path.join(SWISS_DAT, seq_id + '.dat')
if os.path.exists(fasta_path) and os.path.exists(data_path):
with open(fasta_path) as a:
result['fasta'] = a.read().splitlines()
result['data_path'] = data_path
return result
# position - position in the aln_sequence
# returns position in the sequence without gaps
def get_real_position_al(aln_sequence, position):
cut = re.sub('-', '', aln_sequence[:position + 1])
return len(cut) - 1
# position - position in the sequence without gaps
# returns position in the aligned sequence
def get_real_position_seq(aln_sequence, position):
counter = 0
real_pos = -1
for i in range(len(aln_sequence)):
if aln_sequence[i] != '-':
counter += 1
if position == counter - 1:
real_pos = i
break
return real_pos
def align(seq1, seq2):
tmp_file = tempfile.NamedTemporaryFile(suffix=".fasta", delete=False)
with tmp_file as f:
f.write('\n'.join(['>1', seq1, '>2', seq2]))
positions = {}
args = [KMAD, '-i', tmp_file.name,
'-o', tmp_file.name,
'-g', '-12', '-e', '-1.2', '-n', '-1.2', '-c', '1', '--gapped']
try:
subprocess.call(args)
if os.path.exists(tmp_file.name + '_al'):
with open(tmp_file.name + '_al') as a:
aln = unwrap(a.read().splitlines())
aln_seq1 = aln[1]
aln_seq2 = aln[3]
for i in range(len(aln_seq1)):
if aln_seq1[i] == aln_seq2[i]:
real_pos2 = get_real_position_al(aln_seq2, i)
real_pos1 = get_real_position_seq(seq1,
get_real_position_al(
aln_seq1, i))
positions[real_pos2] = real_pos1
os.remove(tmp_file.name + '_al')
except subprocess.CalledProcessError as e:
print "Error: {}".format(e.output)
os.remove(tmp_file.name)
return positions
# get 2ndary strct info based on the information from a very close homologue
# (transfer only data from identical positions)
def transfer_data_from_homologue(sequence, closest_sp):
seq2 = unwrap(closest_sp['fasta'])[1]
equivalent_positions = align(sequence, seq2)
homologue_strct_elems = get_strct_from_sp(closest_sp['data_path'])
query_strct_elems = {'G': [], 'E': [], 'B': [], 'I': [], 'T': [], 'H': [],
'S': [], 'C': []}
for i in homologue_strct_elems:
for j in homologue_strct_elems[i]:
if j in equivalent_positions.keys():
query_strct_elems[i].append(equivalent_positions[j])
return query_strct_elems
def change_char(mystring, position, new_char):
if position < len(mystring) - 1:
new_string = mystring[:position] + new_char + mystring[position + 1:]
else:
new_string = mystring[:position] + new_char
return new_string
# based on the dictionary strct_data (with lists of positions of certain 2ndary
# strct elements) encode a fasta file (only for a plain fasta file, not for 7c)
def encode(fasta, strct_data):
encoded = []
for i, lineI in enumerate(fasta):
if lineI.startswith('>'):
encoded.append(lineI)
else:
got_data = any(strct_data[i / 2].values())
if got_data:
newline = ""
for j in range(len(lineI)):
codon = lineI[j] + 'AAAAAA'
for k in strct_data[i / 2].keys():
if k != 'C' and j in strct_data[i / 2][k]:
codon = change_char(codon, 1, k)
if j in strct_data[i / 2]['C']:
codon = change_char(codon, 4, 's')
newline += codon
encoded.append(newline)
else:
encoded.append(''.join([j + 'AAAAAA' for j in lineI]))
return encoded
# get 2ndary strct annotations from swiss prot
def get_strct_from_sp(sp_path):
with open(sp_path) as a:
dat_file = a.read().splitlines()
strct_elements = {'G': [], 'E': [], 'B': [], 'I': [], 'T': [], 'H': [],
'S': [], 'C': []}
strct_dict = {'HELIX': 'H', 'TURN': 'T', 'STRAND': 'S',
'DISULFID': 'C'}
for i in dat_file:
if (i.startswith('FT') and len(i.split()) > 1
and i.split()[1] in strct_dict.keys()):
if i.split()[1] != 'DISULFID':
start = int(i.split()[2]) - 1
end = int(i.split()[3])
strct_code = strct_dict[i.split()[1]]
strct_elements[strct_code].extend(range(start, end))
else:
cys1 = int(i.split()[2]) - 1
cys2 = int(i.split()[3]) - 1
strct_elements['C'].extend([cys1, cys2])
return strct_elements
def unwrap(alignment):
new = []
for i in alignment:
if i.startswith('>'):
new.append(i)
new.append("")
else:
new[-1] += i
return new
def decode(fasta_7c):
fasta = []
for i in fasta_7c:
if i.startswith('>'):
fasta.append(i)
elif not i.startswith('#'):
fasta.append(''.join(i[::7]))
else:
break
return fasta
def get_seq_from_pdb(pdb_id, chain_id):
with open(PDBFIND) as a:
pdbfind = a.read().splitlines()
in_prot = False
in_chain = False
sequence = ""
for i in pdbfind:
if i.startswith('ID'):
if not in_prot and i.split()[-1] == pdb_id.upper():
in_prot = True
elif in_prot:
break
elif in_prot and i.startswith("Chain") and i.split()[-1] == chain_id:
in_chain = True
elif in_chain and i.startswith(" Sequence"):
sequence = i.split()[-1]
break
return sequence
def merge_dicts(dict1, dict2):
dict_tmp = dict1.copy()
dict_tmp.update(dict2)
return dict_tmp
def annotate_secondary_structure(fasta, output_name):
strct_data = []
for i in range(0, len(fasta), 2):
strct_elements = {'G': [], 'E': [], 'B': [], 'I': [], 'T': [], 'H': [],
'S': [], 'C': []}
closest_sp = find_closest_hit(fasta[i + 1])
if closest_sp:
strct_elements = transfer_data_from_homologue(fasta[i + 1],
closest_sp)
print strct_elements
strct_elements = merge_dicts(strct_elements,
get_strct_from_dssp(fasta[i + 1]))
print strct_elements
strct_data.append(strct_elements)
out_fasta = encode(fasta, strct_data)
out = open(output_name, 'w')
out.write('\n'.join(out_fasta))
out.close()
def get_structure_data(query_seq, pdb_fastas):
result = {'id': '',
'chain_id': '',
'pdb_path': '',
'pdb_seq': '',
'positions_map': {}
}
blast_result = run_blast(query_seq, PDB_BLAST)
if blast_result:
result['chain_id'] = blast_result[0].split(',')[1].split('_')[1]
result['id'] = blast_result[0].split(',')[1].split('_')[0]
result['pdb_seq'] = get_seq_from_pdb(result['id'], result['chain_id'])
result['pdb_path'] = os.path.join(PDB_DIR,
'pdb' + result['id'].lower() + '.ent')
result['positions_map'] = align(result['pdb_seq'], query_seq)
return result
# run structure alignment and return a dictionary of equivalent positions
def mk_strct_al_modeller(strct_data1, strct_data2):
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
tmp_file = tempfile.NamedTemporaryFile(suffix=".fasta", delete=False)
env = m.environ()
aln = m.alignment(env)
code1 = 'pdb' + strct_data1['id']
code2 = 'pdb' + strct_data2['id']
chain1 = strct_data1['chain_id']
chain2 = strct_data2['chain_id']
env.io.atom_files_directory = ['.', PDB_DIR]
result = {}
try:
for (code, chain) in ((code1, chain1), (code2, chain2)):
mdl = m.model(env, file=code, model_segment=('FIRST:'+chain,
'LAST:'+chain))
aln.append_model(mdl, atom_files=code, align_codes=code+chain)
for (weights, write_fit, whole) in (((1., 0., 0., 0., 1., 0.), False,
True),
((1., 0.5, 1., 1., 1., 0.), False,
True),
((1., 1., 1., 1., 1., 0.), True,
False)):
r = aln.salign(rms_cutoff=3.5, normalize_pp_scores=False,
rr_file='$(LIB)/as1.sim.mat', overhang=30,
gap_penalties_1d=(-450, -50),
gap_penalties_3d=(0, 3), gap_gap_score=0,
gap_residue_score=0,
alignment_type='tree', # If 'progresive', the tree is not
# computed and all structures will be
# aligned sequentially to the first
#ext_tree_file='1is3A_exmat.mtx', # Tree building can be avoided
# if the tree is input
feature_weights=weights, # For a multiple sequence alignment only
# the first feature needs to be non-zero
improve_alignment=True, fit=True, write_fit=False,
write_whole_pdb=whole, output='ALIGNMENT QUALITY')
if r.qscorepct > 70:
aln.write(file=tmp_file.name, alignment_format='FASTA')
with open(tmp_file.name) as a:
alignment = unwrap(a.read().splitlines())
for i in range(len(alignment[1])):
if alignment[1] != '-' and alignment[3] != '-':
pos1 = get_real_position_al(alignment[1], i)
pos2 = get_real_position_al(alignment[3], i)
result[pos1] = pos2
except:
print 'Modeller failed'
sys.stdout.close()
sys.stdout = _stdout
return result
def mk_strct_alignment(strct_data1, strct_data2):
args = ['TMalign', strct_data1['pdb_path'], strct_data2['pdb_path']]
result = {}
output = []
try:
output = subprocess.check_output(args).splitlines()
except subprocess.CalledProcessError as e:
print "Error: {}".format(e.output)
if output:
seq1 = output[-4]
seq2 = output[-2]
al = output[-3]
# if (al.find(':' * (len(al) / 4)) != -1
if len(seq1) == len(seq2) and len(seq1) == len(al):
for i in range(len(seq1)):
if al[i] == ':':
pos1 = get_real_position_al(seq1, i)
pos2 = get_real_position_al(seq2, i)
result[pos1] = pos2
else:
print "bad alignment {}".format(al), al.count(':'), len(al)
return result
def reverse_dict(some_dict):
new_dict = {}
for i in some_dict:
new_dict[some_dict[i]] = i
return new_dict
# return equivalent positions between seq and seq2
def find_equivalent_positions(seq1_str1, seq2_str2, str1_str2):
str1_seq1 = reverse_dict(seq1_str1)
str2_seq2 = reverse_dict(seq2_str2)
seq1_seq2 = {}
for i in str1_str2:
i_str1 = i
i_str2 = str1_str2[i]
i_seq1 = ''
i_seq2 = ''
if i_str1 in str1_seq1.keys():
i_seq1 = str1_seq1[i_str1]
if i_str2 in str2_seq2.keys():
i_seq2 = str2_seq2[i_str2]
if i_seq1 and i_seq2:
seq1_seq2[i_seq1] = i_seq2
return seq1_seq2
def make_conf_dict(query_seq, eq_positions, al_score):
settings_dict = {"feature_settings": {"usr_features": []}}
all_eqs = []
for i in eq_positions:
all_eqs.extend(eq_positions[i].keys())
for pos in range(len(query_seq)):
curr_feature_positions = []
if pos in all_eqs:
for seq_no in eq_positions:
if pos in eq_positions[seq_no].keys():
curr_feature_positions.append(
{"seq": seq_no + 1,
"pos": tuple([eq_positions[seq_no][pos] + 1])})
if curr_feature_positions:
curr_feature_positions.append({"seq": 1, "pos": tuple([pos + 1])})
single_feat = {"name": "pos{}".format(pos + 1),
"add_score": al_score,
"subtract_score": 0,
"add_features": tuple(["pos{}".format(pos + 1)]),
"add_tags": [],
"add_exceptions": [],
"subtract_features": [],
"subtract_tags": [],
"subtract_exceptions": [],
"subtract_features": [],
"pattern": '',
"positions": tuple(curr_feature_positions)}
settings_dict["feature_settings"]["usr_features"].append(
single_feat)
return settings_dict
def dict_to_cfg(data_dict):
data_h = hjson.dumps(data_dict)
text = re.sub('\}', '};', re.sub('\]', ');', re.sub('\[', '(', data_h)))
text = text[:-2].lstrip('{')
return text
def write_conf_file(query_seq, eq_positions, output_conf, al_score):
data_dict = make_conf_dict(query_seq, eq_positions, al_score)
indent = 2
outtxt = encoder.dumps(data_dict, indent=indent)
out = open(output_conf, 'w')
out.write(outtxt)
out.close()
def structure_alignment_conf(fasta, output_conf, al_score):
with open(PDB_BLAST) as a:
pdb_fastas = unwrap(a.read().splitlines())
query_sequence = fasta[1]
query_strct_data = get_structure_data(query_sequence, pdb_fastas)
eq_positions = {}
for i in range(2, len(fasta)):
if not fasta[i].startswith('>'):
strct_data = get_structure_data(fasta[i], pdb_fastas)
eq_seq1_seq2 = {}
if strct_data['id']:
strct_al = mk_strct_al_modeller(query_strct_data, strct_data)
eq_seq1_seq2 = find_equivalent_positions(
query_strct_data['positions_map'],
strct_data['positions_map'],
strct_al)
eq_positions[i / 2] = eq_seq1_seq2
write_conf_file(query_sequence, eq_positions, output_conf, al_score)
def annotate(fasta_in, output_name, output_conf, al_score):
with open(fasta_in) as a:
fasta = unwrap(a.read().splitlines())
annotate_secondary_structure(fasta, output_name)
structure_alignment_conf(fasta, output_conf, al_score)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Annotate fasta sequences with'
' structural information')
parser.add_argument('fasta_in')
parser.add_argument('output_filename')
parser.add_argument('output_conf')
parser.add_argument('--al_score', type=int)
args = parser.parse_args()
annotate(args.fasta_in, args.output_filename, args.output_conf,
args.al_score)
|
cmbi/kmad
|
scripts/annotate_strct_alignment.py
|
Python
|
gpl-3.0
| 20,890
|
import platform
from base64 import b64encode
import re
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.defaultimpl.default_marshaller import \
DefaultMarshaller
from ingenico.connect.sdk.domain.metadata.shopping_cart_extension import ShoppingCartExtension
from request_header import RequestHeader
class IterProperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return self.func(owner)
class MetaDataProvider:
"""
Provides meta info about the server.
"""
__SDK_VERSION = "3.30.0"
__SERVER_META_INFO_HEADER = "X-GCS-ServerMetaInfo"
__prohibited_headers = [__SERVER_META_INFO_HEADER, "X-GCS-Idempotence-Key",
"Date", "Content-Type", "Authorization"]
__PROHIBITED_HEADERS = tuple(sorted(__prohibited_headers, key=str.lower))
__meta_data_headers = None
class ServerMetaInfo(DataObject):
platform_identifier = None
sdk_identifier = None
sdk_creator = None
integrator = None
shopping_cart_extension = None
def to_dictionary(self):
dictionary = super(MetaDataProvider.ServerMetaInfo, self).to_dictionary()
if self.platform_identifier is not None:
dictionary['platformIdentifier'] = self.platform_identifier
if self.sdk_identifier is not None:
dictionary['sdkIdentifier'] = self.sdk_identifier
if self.sdk_creator is not None:
dictionary['sdkCreator'] = self.sdk_creator
if self.integrator is not None:
dictionary['integrator'] = self.integrator
if self.shopping_cart_extension is not None:
dictionary['shoppingCartExtension'] = self.shopping_cart_extension.to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(MetaDataProvider.ServerMetaInfo, self).from_dictionary(dictionary)
if 'platformIdentifier' in dictionary:
self.platform_identifier = dictionary['platformIdentifier']
if 'sdkIdentifier' in dictionary:
self.sdk_identifier = dictionary['sdkIdentifier']
if 'sdkCreator' in dictionary:
self.sdk_creator = dictionary['sdkCreator']
if 'integrator' in dictionary:
self.integrator = dictionary['integrator']
if 'shoppingCartExtension' in dictionary:
if not isinstance(dictionary['shoppingCartExtension'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['shoppingCartExtension']))
self.shopping_cart_extension = ShoppingCartExtension.create_from_dictionary(dictionary['shoppingCartExtension'])
return self
def __init__(self, integrator, shopping_cart_extension=None,
additional_request_headers=()):
MetaDataProvider.__validate_additional_request_headers(
additional_request_headers)
for i in additional_request_headers:
i.name = re.sub(r'\r?\n(?:(?![\r\n])\s)*', " ", i.name)
i.name = i.name.strip()
i.value = re.sub(r'\r?\n(?:(?![\r\n])\s)*', " ", i.value)
i.value = i.value.strip()
server_meta_info = self.ServerMetaInfo()
server_meta_info.platform_identifier = self._platform_identifier
server_meta_info.sdk_identifier = self._sdk_identifier
server_meta_info.sdk_creator = "Ingenico"
server_meta_info.integrator = integrator
server_meta_info.shopping_cart_extension = shopping_cart_extension
server_meta_info_string = DefaultMarshaller.INSTANCE().marshal(
server_meta_info)
server_meta_info_header = RequestHeader(
self.__SERVER_META_INFO_HEADER, b64encode(
server_meta_info_string.encode('utf-8')))
if not additional_request_headers:
self.__meta_data_headers = tuple([server_meta_info_header])
else:
request_headers = [server_meta_info_header]
request_headers.extend(additional_request_headers)
self.__meta_data_headers = tuple(request_headers)
@staticmethod
def __validate_additional_request_headers(additional_request_headers):
if additional_request_headers is not None:
for additional_request_header in additional_request_headers:
MetaDataProvider.__validate_additional_request_header(
additional_request_header)
@staticmethod
def __validate_additional_request_header(additional_request_header):
try:
if additional_request_header.name in MetaDataProvider.__PROHIBITED_HEADERS:
raise ValueError("request header not allowed: ",
str(additional_request_header))
except AttributeError:
raise AttributeError("Each request header should have an attribute 'name' and an attribute 'value'")
@IterProperty
def prohibited_headers(self):
return self.__PROHIBITED_HEADERS
@property
def meta_data_headers(self):
"""
:return: The server related headers containing the META data to be
associated with the request (if any). This will always contain at least
an automatically generated header X-GCS-ServerMetaInfo.
"""
return self.__meta_data_headers
@property
def _platform_identifier(self):
return platform.system() + " " + platform.release() + "/" + \
platform.version() + " Python/" + platform.python_version() + \
" (" + platform.python_implementation() + "; " + \
str(platform.python_compiler()) + ")"
@property
def _sdk_identifier(self):
return "Python2ServerSDK/v" + self.__SDK_VERSION
|
Ingenico-ePayments/connect-sdk-python2
|
ingenico/connect/sdk/meta_data_provider.py
|
Python
|
mit
| 5,912
|
#!/usr/bin/env python
# A module for sleeping and testing return codes, so tests run on windows too.
import time
import signal
import sys
DROP_SIGTERM = 250
def handle_sigterm(*args):
# Should support the various OS's to return the correct value
global sigtermReturnCode
if sigtermReturnCode == DROP_SIGTERM:
time.sleep(float(sys.argv[1]))
return
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(sigtermReturnCode)
# os.kill(os.getpid(), signal.SIGTERM)
return sigtermReturnCode
if __name__ == '__main__':
global sigtermReturnCode
try:
returnCode = int(sys.argv[2])
except KeyError:
returnCode = 0
try:
sigtermReturnCode = int(sys.argv[3])
except:
sigtermReturnCode = returnCode
signal.signal(signal.SIGTERM, handle_sigterm)
time.sleep(float(sys.argv[1]))
sys.exit(returnCode)
|
kata198/python-subprocess2
|
tests/subprocess2Tests/sleeper.py
|
Python
|
lgpl-3.0
| 959
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import caffe_pb2
import flask
import lmdb
import PIL.Image
from .forms import ImageClassificationDatasetForm
from .job import ImageClassificationDatasetJob
from digits import utils
from digits.dataset import tasks
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
def from_folders(job, form):
"""
Add tasks for creating a dataset by parsing folders of images
"""
job.labels_file = utils.constants.LABELS_FILE
### Add ParseFolderTask
percent_val = form.folder_pct_val.data
val_parents = []
if form.has_val_folder.data:
percent_val = 0
percent_test = form.folder_pct_test.data
test_parents = []
if form.has_test_folder.data:
percent_test = 0
min_per_class = form.folder_train_min_per_class.data
max_per_class = form.folder_train_max_per_class.data
parse_train_task = tasks.ParseFolderTask(
job_dir = job.dir(),
folder = form.folder_train.data,
percent_val = percent_val,
percent_test = percent_test,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_train_task)
# set parents
if not form.has_val_folder.data:
val_parents = [parse_train_task]
if not form.has_test_folder.data:
test_parents = [parse_train_task]
if form.has_val_folder.data:
min_per_class = form.folder_val_min_per_class.data
max_per_class = form.folder_val_max_per_class.data
parse_val_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_val.data,
percent_val = 100,
percent_test = 0,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_val_task)
val_parents = [parse_val_task]
if form.has_test_folder.data:
min_per_class = form.folder_test_min_per_class.data
max_per_class = form.folder_test_max_per_class.data
parse_test_task = tasks.ParseFolderTask(
job_dir = job.dir(),
parents = parse_train_task,
folder = form.folder_test.data,
percent_val = 0,
percent_test = 100,
min_per_category = min_per_class if min_per_class>0 else 1,
max_per_category = max_per_class if max_per_class>0 else None
)
job.tasks.append(parse_test_task)
test_parents = [parse_test_task]
### Add CreateDbTasks
backend = form.backend.data
encoding = form.encoding.data
compression = form.compression.data
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = parse_train_task,
input_file = utils.constants.TRAIN_FILE,
db_name = utils.constants.TRAIN_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
)
)
if percent_val > 0 or form.has_val_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = val_parents,
input_file = utils.constants.VAL_FILE,
db_name = utils.constants.VAL_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
)
)
if percent_test > 0 or form.has_test_folder.data:
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
parents = test_parents,
input_file = utils.constants.TEST_FILE,
db_name = utils.constants.TEST_DB,
backend = backend,
image_dims = job.image_dims,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
)
)
def from_files(job, form):
"""
Add tasks for creating a dataset by reading textfiles
"""
### labels
if form.textfile_use_local_files.data:
job.labels_file = form.textfile_local_labels_file.data.strip()
else:
flask.request.files[form.textfile_labels_file.name].save(
os.path.join(job.dir(), utils.constants.LABELS_FILE)
)
job.labels_file = utils.constants.LABELS_FILE
shuffle = bool(form.textfile_shuffle.data)
backend = form.backend.data
encoding = form.encoding.data
compression = form.compression.data
### train
if form.textfile_use_local_files.data:
train_file = form.textfile_local_train_images.data.strip()
else:
flask.request.files[form.textfile_train_images.name].save(
os.path.join(job.dir(), utils.constants.TRAIN_FILE)
)
train_file = utils.constants.TRAIN_FILE
image_folder = form.textfile_train_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = train_file,
db_name = utils.constants.TRAIN_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
mean_file = utils.constants.MEAN_FILE_CAFFE,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### val
if form.textfile_use_val.data:
if form.textfile_use_local_files.data:
val_file = form.textfile_local_val_images.data.strip()
else:
flask.request.files[form.textfile_val_images.name].save(
os.path.join(job.dir(), utils.constants.VAL_FILE)
)
val_file = utils.constants.VAL_FILE
image_folder = form.textfile_val_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = val_file,
db_name = utils.constants.VAL_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
### test
if form.textfile_use_test.data:
if form.textfile_use_local_files.data:
test_file = form.textfile_local_test_images.data.strip()
else:
flask.request.files[form.textfile_test_images.name].save(
os.path.join(job.dir(), utils.constants.TEST_FILE)
)
test_file = utils.constants.TEST_FILE
image_folder = form.textfile_test_folder.data.strip()
if not image_folder:
image_folder = None
job.tasks.append(
tasks.CreateDbTask(
job_dir = job.dir(),
input_file = test_file,
db_name = utils.constants.TEST_DB,
backend = backend,
image_dims = job.image_dims,
image_folder= image_folder,
resize_mode = job.resize_mode,
encoding = encoding,
compression = compression,
labels_file = job.labels_file,
shuffle = shuffle,
)
)
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Returns a form for a new ImageClassificationDatasetJob
"""
form = ImageClassificationDatasetForm()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('datasets/images/classification/new.html', form=form)
@blueprint.route('.json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Creates a new ImageClassificationDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationDatasetForm()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('datasets/images/classification/new.html', form=form), 400
job = None
try:
job = ImageClassificationDatasetJob(
username = utils.auth.get_username(),
name = form.dataset_name.data,
image_dims = (
int(form.resize_height.data),
int(form.resize_width.data),
int(form.resize_channels.data),
),
resize_mode = form.resize_mode.data
)
if form.method.data == 'folder':
from_folders(job, form)
elif form.method.data == 'textfile':
from_files(job, form)
else:
raise ValueError('method not supported')
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job, related_jobs=None):
"""
Called from digits.dataset.views.datasets_show()
"""
return flask.render_template('datasets/images/classification/show.html', job=job, related_jobs=related_jobs)
@blueprint.route('/summary', methods=['GET'])
def summary():
"""
Return a short HTML summary of a DatasetJob
"""
job = job_from_request()
return flask.render_template('datasets/images/classification/summary.html', dataset=job)
class DbReader(object):
"""
Reads a database
"""
def __init__(self, location):
"""
Arguments:
location -- where is the database
"""
self._db = lmdb.open(location,
map_size=1024**3, # 1MB
readonly=True, lock=False)
with self._db.begin() as txn:
self.total_entries = txn.stat()['entries']
def entries(self):
"""
Generator returning all entries in the DB
"""
with self._db.begin() as txn:
cursor = txn.cursor()
for item in cursor:
yield item
@blueprint.route('/explore', methods=['GET'])
def explore():
"""
Returns a gallery consisting of the images of one of the dbs
"""
job = job_from_request()
# Get LMDB
db = flask.request.args.get('db', 'train')
if 'train' in db.lower():
task = job.train_db_task()
elif 'val' in db.lower():
task = job.val_db_task()
elif 'test' in db.lower():
task = job.test_db_task()
if task is None:
raise ValueError('No create_db task for {0}'.format(db))
if task.status != 'D':
raise ValueError("This create_db task's status should be 'D' but is '{0}'".format(task.status))
if task.backend != 'lmdb':
raise ValueError("Backend is {0} while expected backend is lmdb".format(task.backend))
db_path = job.path(task.db_name)
labels = task.get_labels()
page = int(flask.request.args.get('page', 0))
size = int(flask.request.args.get('size', 25))
label = flask.request.args.get('label', None)
if label is not None:
try:
label = int(label)
label_str = labels[label]
except ValueError:
label = None
reader = DbReader(db_path)
count = 0
imgs = []
min_page = max(0, page - 5)
if label is None:
total_entries = reader.total_entries
else:
total_entries = task.distribution[str(label)]
max_page = min((total_entries-1) / size, page + 5)
pages = range(min_page, max_page + 1)
for key, value in reader.entries():
if count >= page*size:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if label is None or datum.label == label:
if datum.encoded:
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
else:
import caffe.io
arr = caffe.io.datum_to_array(datum)
# CHW -> HWC
arr = arr.transpose((1,2,0))
if arr.shape[2] == 1:
# HWC -> HW
arr = arr[:,:,0]
elif arr.shape[2] == 3:
# BGR -> RGB
# XXX see issue #59
arr = arr[:,:,[2,1,0]]
img = PIL.Image.fromarray(arr)
imgs.append({"label":labels[datum.label], "b64": utils.image.embed_image_html(img)})
if label is None:
count += 1
else:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if datum.label == int(label):
count += 1
if len(imgs) >= size:
break
return flask.render_template('datasets/images/classification/explore.html', page=page, size=size, job=job, imgs=imgs, labels=labels, pages=pages, label=label, total_entries=total_entries, db=db)
|
brainstorm-ai/DIGITS
|
digits/dataset/images/classification/views.py
|
Python
|
bsd-3-clause
| 15,470
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def reset_extensions_translations_locales(apps, schema_editor):
"""Reset the locale field for all translations on existing Extensions. This
is done to fix bug 1215094: some translations were created with the wrong
language - the one from the request, instead of the one from the
default_language field."""
Extension = apps.get_model('extensions', 'Extension')
Translation = apps.get_model('translations', 'Translation')
extensions = Extension.objects.all()
for extension in extensions:
translations_ids = filter(
None, [extension.name_id, extension.description_id])
lang = extension.default_language.lower()
Translation.objects.filter(id__in=translations_ids).update(locale=lang)
class Migration(migrations.Migration):
dependencies = [
('extensions', '0015_extension_author'),
]
operations = [
migrations.RunPython(reset_extensions_translations_locales),
]
|
washort/zamboni
|
mkt/extensions/migrations/0016_reset_extensions_translations_locale.py
|
Python
|
bsd-3-clause
| 1,062
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.policies import volumes as policy
class VolumeMigStatusAttributeController(wsgi.Controller):
def _add_volume_mig_status_attribute(self, req, resp_volume):
db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:migstat" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['migration_status']
key = "%s:name_id" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['_name_id']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False):
self._add_volume_mig_status_attribute(req, resp_obj.obj['volume'])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False):
for vol in list(resp_obj.obj['volumes']):
self._add_volume_mig_status_attribute(req, vol)
class Volume_mig_status_attribute(extensions.ExtensionDescriptor):
"""Expose migration_status as an attribute of a volume."""
name = "VolumeMigStatusAttribute"
alias = "os-vol-mig-status-attr"
updated = "2013-08-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeMigStatusAttributeController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
|
phenoxim/cinder
|
cinder/api/contrib/volume_mig_status_attribute.py
|
Python
|
apache-2.0
| 2,136
|
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import re
from types import ListType, IntType
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnException
from spacewalk.server import rhnSQL
from spacewalk.server.rhnServer import server_kickstart
# the "exposed" functions
__rhnexport__ = ['remove',
'update',
'refresh_list',
'delta',
'runTransaction',
'verify']
class InvalidDep(Exception):
pass
_query_insert_attribute_verify_results = rhnSQL.Statement("""
insert into rhnServerActionVerifyResult (
server_id, action_id,
package_name_id,
package_evr_id,
package_arch_id,
package_capability_id,
attribute, size_differs, mode_differs, checksum_differs,
devnum_differs, readlink_differs, uid_differs,
gid_differs, mtime_differs
)
values (
:server_id, :action_id,
lookup_package_name(:package_name),
lookup_evr(:epoch || '', :version, :release),
lookup_package_arch(:arch),
lookup_package_capability(:filename),
:attrib, :test_S, :test_M, :test_5,
:test_D, :test_L, :test_U,
:test_G, :test_T
)
""")
_query_insert_missing_verify_results = rhnSQL.Statement("""
insert into rhnServerActionVerifyMissing (
server_id,
action_id,
package_name_id,
package_evr_id,
package_arch_id,
package_capability_id
)
values (
:server_id,
:action_id,
lookup_package_name(:package_name),
lookup_evr(:epoch || '', :version, :release),
lookup_package_arch(:arch),
lookup_package_capability(:filename)
)
""")
_query_delete_verify_results = rhnSQL.Statement("""
delete from rhnServerActionVerifyResult
where server_id = :server_id
and action_id = :action_id
""")
_query_delete_verify_missing = rhnSQL.Statement("""
delete from rhnServerActionVerifyMissing
where server_id = :server_id
and action_id = :action_id
""")
def verify(server_id, action_id, data={}):
log_debug(3, action_id)
if not data or not data.has_key('verify_info'):
# some data should have been passed back...
log_error("Insufficient package verify information returned",
server_id, action_id, data)
return
log_debug(4, "pkg verify data", data)
# Remove old results
h = rhnSQL.prepare(_query_delete_verify_results)
h.execute(server_id=server_id, action_id=action_id)
h = rhnSQL.prepare(_query_delete_verify_missing)
h.execute(server_id=server_id, action_id=action_id)
attrib_tests = ['S', 'M', '5', 'D', 'L', 'U', 'G', 'T']
# Store the values for executemany() for the attribute-failures
verify_attribs = {'server_id': [], 'action_id': [], 'package_name': [],
'epoch': [], 'version': [], 'release': [], 'arch': [],
'filename': [], 'attrib': [], }
for test in attrib_tests:
verify_attribs["test_" + test] = []
# Store the "missing xxxx" results for executemany()
missing_files = {'server_id': [], 'action_id': [], 'package_name': [],
'epoch': [], 'version': [], 'release': [], 'arch': [],
'filename': []}
# Uniquify the packages
uq_packages = {}
for package_spec, responses in data['verify_info']:
package_spec = list(package_spec)
# Fix the epoch
if package_spec[3] == '':
package_spec[3] = None
package_spec = tuple(package_spec)
if uq_packages.has_key(package_spec):
# Been here already
continue
# We need to uniquify the file names within a package too
hash = {}
for response in responses:
try:
dict = _parse_response_line(response, attrib_tests)
except InvalidResponseLine:
log_error("packages.verify: (%s, %s): invalid line %s"
% (server_id, action_id, response))
continue
hash[dict['filename']] = dict
# Add the rest of the variables to the dictionaries
for filename, dict in hash.items():
dict['server_id'] = server_id
dict['action_id'] = action_id
dict['package_name'] = package_spec[0]
dict['version'] = package_spec[1]
dict['release'] = package_spec[2]
dict['epoch'] = package_spec[3]
dict['arch'] = package_spec[4]
if not dict.has_key('missing'):
_hash_append(verify_attribs, dict)
else:
_hash_append(missing_files, dict)
# This package was visited, store it
uq_packages[package_spec] = None
if verify_attribs['action_id']:
h = rhnSQL.prepare(_query_insert_attribute_verify_results)
h.executemany(**verify_attribs)
if missing_files['action_id']:
h = rhnSQL.prepare(_query_insert_missing_verify_results)
h.executemany(**missing_files)
rhnSQL.commit()
# Exception raised when an invalid line is found
class InvalidResponseLine(Exception):
pass
def _parse_response_line(response, tests):
# Parses a single line of output from rpmverify
# Returns a dictionary of values that can be plugged into the SQL query
# response looks like:
# 'S.5....T c /usr/share/rhn/up2date_client/iutil.pyc'
# or
# '....L... /var/www/html'
# or
# 'missing /usr/include/curl/types.h'
# or
# 'missing c /var/www/html/index.html'
#
#
# or something like S.5....T. /usr/lib/anaconda-runtime/boot/boot.msg
# with the last line being a . or a C, depending on selinux context
# see #155952
#
res_re = re.compile("^(?P<ts>[\S]+)\s+(?P<attr>[cdglr]?)\s* (?P<filename>[\S]+)$")
m = res_re.match(response)
if not m:
raise InvalidResponseLine
ts, attr, filename = m.groups()
# clean up attr, as it can get slightly fudged in the
if ts == 'missing':
return {'filename': filename, 'missing': None}
# bug 155952: SELinux will return an extra flag
# FIXME: need to support the extra selinux context flag
# I think this is just being paranoid, but to avoid changing schema for
# bug 155952 we going to remove the 9th char if we get it
# ahem, ignore the last flag if we 9 chars
if len(ts) < len(tests):
raise InvalidResponseLine
if not filename:
raise InvalidResponseLine
dict = {
'attrib': attr or None, # convert empty attribute to None
'filename': filename,
}
# Add the tests
for i in range(len(tests)):
val = ts[i]
t_name = tests[i]
if val == t_name:
val = 'Y'
elif val == '.':
val = 'N'
elif val != '?':
raise InvalidResponseLine
dict["test_" + t_name] = val
return dict
def _hash_append(dst, src):
# Append the values of src to dst
for k, list in dst.items():
list.append(src[k])
def update(server_id, action_id, data={}):
log_debug(3, server_id, action_id)
action_status = rhnFlags.get('action_status')
if action_status == 3:
# Action failed
kickstart_state = 'failed'
next_action_type = None
else:
kickstart_state = 'deployed'
# This is horrendous, but in order to fix it I would have to change almost all of the
# actions code, which we don't have time to do for the 500 beta. --wregglej
try:
ks_session_type = server_kickstart.get_kickstart_session_type(server_id, action_id)
except rhnException, re:
ks_session_type = None
if ks_session_type is None:
next_action_type = "None"
elif ks_session_type == 'para_guest':
next_action_type = 'kickstart_guest.initiate'
else:
next_action_type = 'kickstart.initiate'
log_debug(4, "next_action_type: %s" % next_action_type)
# More hideous hacked together code to get around our inflexible actions "framework".
# If next_action_type is "None", we're assuming that we're *not* in a kickstart session
# at this point, so we don't want to update a non-existant kickstart session.
# I feel so dirty. --wregglej
if next_action_type != "None":
server_kickstart.update_kickstart_session(server_id, action_id,
action_status, kickstart_state=kickstart_state,
next_action_type=next_action_type)
_mark_dep_failures(server_id, action_id, data)
def remove(server_id, action_id, data={}):
log_debug(3, action_id, data.get('name'))
_mark_dep_failures(server_id, action_id, data)
_query_delete_dep_failures = rhnSQL.Statement("""
delete from rhnActionPackageRemovalFailure
where server_id = :server_id and action_id = :action_id
""")
_query_insert_dep_failures = rhnSQL.Statement("""
insert into rhnActionPackageRemovalFailure (
server_id, action_id, name_id, evr_id, capability_id,
flags, suggested, sense)
values (
:server_id, :action_id, LOOKUP_PACKAGE_NAME(:name),
LOOKUP_EVR(:epoch, :version, :release),
LOOKUP_PACKAGE_CAPABILITY(:needs_name, :needs_version),
:flags, LOOKUP_PACKAGE_NAME(:suggested, :ignore_null), :sense)
""")
def _mark_dep_failures(server_id, action_id, data):
if not data:
log_debug(4, "Nothing to do")
return
failed_deps = data.get('failed_deps')
if not failed_deps:
log_debug(4, "No failed deps")
return
if not isinstance(failed_deps, ListType):
# Not the right format
log_error("action_extra_data.packages.remove: server %s, action %s: "
"wrong type %s" % (server_id, action_id, type(failed_deps)))
return
inserts = {}
for f in ('server_id', 'action_id',
'name', 'version', 'release', 'epoch',
'needs_name', 'needs_version', 'ignore_null',
'flags', 'suggested', 'sense'):
inserts[f] = []
for failed_dep in failed_deps:
try:
pkg, needs_pkg, flags, suggested, sense = _check_dep(server_id,
action_id, failed_dep)
except InvalidDep:
continue
inserts['server_id'].append(server_id)
inserts['action_id'].append(action_id)
inserts['name'] .append(pkg[0])
inserts['version'].append(pkg[1])
inserts['release'].append(pkg[2])
inserts['epoch'].append(None)
inserts['needs_name'].append(needs_pkg[0])
inserts['needs_version'].append(needs_pkg[1])
inserts['flags'].append(flags)
inserts['suggested'].append(suggested)
inserts['ignore_null'].append(1)
inserts['sense'].append(sense)
h = rhnSQL.prepare(_query_delete_dep_failures)
rowcount = h.execute(server_id=server_id, action_id=action_id)
log_debug(5, "Removed old rows", rowcount)
h = rhnSQL.prepare(_query_insert_dep_failures)
rowcount = h.execute_bulk(inserts)
log_debug(5, "Inserted rows", rowcount)
def _check_dep(server_id, action_id, failed_dep):
log_debug(5, failed_dep)
if not failed_dep:
return
if not isinstance(failed_dep, ListType):
# Not the right format
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep type error: %s" % (
server_id, action_id, type(failed_dep)))
raise InvalidDep
# This is boring, but somebody's got to do it
if len(failed_dep) < 5:
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep: not enough entries: %s" % (
server_id, action_id, len(failed_dep)))
raise InvalidDep
pkg, needs_pkg, flags, suggested, sense = failed_dep[:5]
if not isinstance(pkg, ListType) or len(pkg) < 3:
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep: bad package spec %s (type %s, len %s)" % (
server_id, action_id, pkg, type(pkg), len(pkg)))
raise InvalidDep
pkg = map(str, pkg[:3])
if not isinstance(needs_pkg, ListType) or len(needs_pkg) < 2:
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep: bad needs package spec %s (type %s, len %s)" % (
server_id, action_id, needs_pkg, type(needs_pkg),
len(needs_pkg)))
raise InvalidDep
needs_pkg = map(str, needs_pkg[:2])
if not isinstance(flags, IntType):
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep: bad flags type %s" % (server_id, action_id, type(flags)))
raise InvalidDep
if not isinstance(sense, IntType):
log_error("action_extra_data.packages.remove: server %s, action %s: "
"failed dep: bad sense type %s" % (server_id, action_id, type(sense)))
raise InvalidDep
return pkg, needs_pkg, flags, str(suggested), sense
def refresh_list(server_id, action_id, data={}):
if not data:
return
log_debug(2, "action_extra_data.packages.refresh_list: Should do something "
"useful with this data", server_id, action_id, data)
def delta(server_id, action_id, data={}):
if not data:
return
log_debug(2, "action_extra_data.packages.delta: Should do something "
"useful with this data", server_id, action_id, data)
def runTransaction(server_id, action_id, data={}):
log_debug(3, action_id)
# If it's a kickstart-related transaction, mark the kickstart session as
# completed
action_status = rhnFlags.get('action_status')
ks_session_id = _next_kickstart_step(server_id, action_id, action_status)
# Cleanup package profile
server_kickstart.cleanup_profile(server_id, action_id, ks_session_id,
action_status)
_mark_dep_failures(server_id, action_id, data)
# Determine the next step to be executed in the kickstart code
def _next_kickstart_step(server_id, action_id, action_status):
if action_status == 3: # Failed
# Nothing more to do here
return server_kickstart.update_kickstart_session(server_id,
action_id, action_status, kickstart_state='complete',
next_action_type=None)
# Fetch kickstart session id
ks_session_id = server_kickstart.get_kickstart_session_id(server_id,
action_id)
if ks_session_id is None:
return server_kickstart.update_kickstart_session(server_id,
action_id, action_status, kickstart_state='complete',
next_action_type=None)
# Get the current server profile
server_profile = server_kickstart.get_server_package_profile(server_id)
server_kickstart.schedule_config_deploy(server_id, action_id,
ks_session_id, server_profile=server_profile)
return ks_session_id
|
hustodemon/spacewalk
|
backend/server/action_extra_data/packages.py
|
Python
|
gpl-2.0
| 16,087
|
# -*- coding: utf-8 -*-
import random
import pygame
import classes.board
import classes.drw.fraction_hq
import classes.game_driver as gd
import classes.level_controller as lc
import classes.extras as ex
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 15, 3)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 15, 10)
def create_game_objects(self, level=1):
self.max_size = 99
self.board.draw_grid = False
if self.mainloop.scheme is not None:
white = self.mainloop.scheme.u_color
line_color = self.mainloop.scheme.u_font_color
h1 = 170
h2 = 40
color1 = ex.hsv_to_rgb(h1, 255, 255)
color2 = ex.hsv_to_rgb(h2, 75, 255)
bd_color1 = ex.hsv_to_rgb(h1, 127, 155)
bd_color2 = ex.hsv_to_rgb(h2, 127, 155)
else:
white = (255, 255, 255)
line_color = (0, 0, 0)
h1 = random.randrange(0, 255)
h2 = (h1 + 128) % 255
color1 = ex.hsv_to_rgb(h1, 150, 255)
color2 = ex.hsv_to_rgb(h2, 40, 255)
bd_color1 = ex.hsv_to_rgb(h1, 187, 200)
bd_color2 = ex.hsv_to_rgb(h2, 100, 200)
transp = (0, 0, 0, 0)
data = [22, 14]
f_size = 10
self.data = data
self.vis_buttons = [0, 0, 0, 0, 1, 1, 1, 0, 1]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
self.board.board_bg.update_me = True
self.board.board_bg.line_color = (20, 20, 20)
self.multiplier = 2
lst = self.get_multiple_factors(80, 3)
n = random.randint(0, len(lst)-1)
num1 = lst[n][0]
num2 = lst[n][1]
self.numbers = [num1, num2]
self.numbers2 = [num1, num2 * self.multiplier]
self.max_num = 119
# add first fraction
self.board.add_unit(0, 0, f_size, f_size, classes.board.Label, "", white, "", 0)
self.fraction_canvas = self.board.units[-1]
self.fraction = classes.drw.fraction_hq.Fraction(1, self.board.scale * f_size, color1, color2, bd_color1,
bd_color2, self.numbers, 2)
self.fraction.set_offset(0, 0)
self.fraction_canvas.painting = self.fraction.get_canvas().copy()
self.board.add_unit(2, f_size, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_l_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(bd_color1)
self.board.add_unit(4, f_size, 2, 2, classes.board.Label, str(self.numbers[0]), white, "", 31)
self.nm1 = self.board.units[-1]
self.nm1.checkable = True
self.nm1.init_check_images()
self.nm1.set_fraction_lines(top=False, bottom=True, color=line_color)
self.nm1.font_color = bd_color1
self.board.add_unit(6, f_size, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_r_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(bd_color1)
self.board.add_unit(2, f_size + 2, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_l_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(bd_color2)
self.board.add_unit(4, f_size + 2, 2, 2, classes.board.Label, str(self.numbers[1]), white, "", 31)
self.nm2 = self.board.units[-1]
self.nm2.checkable = True
self.nm2.init_check_images()
self.nm2.font_color = bd_color2
self.board.add_unit(6, f_size + 2, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_r_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(bd_color2)
# add second fraction
self.board.add_unit(f_size + 2, 0, f_size, f_size, classes.board.Label, "", white, "", 0)
self.fraction2_canvas = self.board.units[-1]
self.fraction2 = classes.drw.fraction_hq.Fraction(1, self.board.scale * f_size, color1, color2, bd_color1,
bd_color2, self.numbers2, 2)
self.fraction2.set_offset(0, 0)
self.fraction2_canvas.painting = self.fraction2.get_canvas().copy()
self.board.add_unit(f_size + 6, f_size, 2, 2, classes.board.Label, str(self.numbers2[0]), white, "", 31)
self.nm1a = self.board.units[-1]
self.nm1a.checkable = True
self.nm1a.init_check_images()
self.nm1a.set_fraction_lines(top=False, bottom=True, color=line_color)
self.nm1a.font_color = bd_color1
self.board.add_unit(f_size + 6, f_size + 2, 2, 2, classes.board.Label, str(self.numbers2[1]), white, "", 31)
self.nm2a = self.board.units[-1]
self.nm2a.checkable = True
self.nm2a.init_check_images()
self.nm2a.font_color = bd_color2
self.factor_list = []
for i in range(12):
self.board.add_unit(f_size, i, 2, 1, classes.board.Letter, "", white, "", 0)
self.board.ships[-1].font_color = bd_color1
self.factor_list.append(self.board.ships[-1])
self.update_factors()
self.active_factor = self.factor_list[0]
self.activate_factor(0)
for each in self.board.ships:
each.readable = False
each.immobilize()
def show_info_dialog(self):
self.mainloop.dialog.show_dialog(3, self.lang.d["To reduce a fraction..."])
def update_factors(self):
self.factors = self.get_factors(self.numbers)
ld = len(self.factors)
for i in range(12):
if i < ld:
val = str(self.factors[i])
else:
val = ""
self.factor_list[i].set_value(val)
self.activate_factor(0)
def get_multiple_factors(self, top, min_of_factors):
"""
Get a list of tuples of numbers that have more than min_of_factors common factors
:param top: top of the range ie. 1-120
:param min_of_factors: minimum number of common factors
:return: list of tuples of numbers that have more than min_of_factors number of common factors
"""
lst = []
for i in range(1, top + 1):
for j in range(1, top + 1):
if i < j:
if len(self.get_factors((i, j))) > min_of_factors:
lst.append((i, j))
return lst
def get_factors(self, n):
"""
Get a list of common factors
:param n: list/tupple of (numerator, denominator)
:return: a list of common factors for both numbers in n
"""
mn = int(min(n[0], n[1]))
mx = int(max(n[0], n[1]))
lst = [1]
if mn > 3:
for i in range(2, int(mn / 2 + 1)):
if mn % i == 0 and mx % i == 0:
lst.append(i)
if mx % mn == 0 and mn != 1:
lst.append(mn)
return lst
def activate_factor(self, active):
if len(self.factor_list[active].value) > 0:
self.factor_list[active].update_font_size(31)
self.active_factor = self.factor_list[active]
for i in range(12):
if i != active:
self.factor_list[i].update_font_size(25)
self.factor_list[i].update_me = True
self.update_fractions()
self.mainloop.redraw_needed[0] = True
def handle(self, event):
gd.BoardGame.handle(self, event)
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
active = self.board.active_ship
if active == 0:
self.change_fract_btn(-1, 0)
elif active == 1:
self.change_fract_btn(1, 0)
elif active == 2:
self.change_fract_btn(0, -1)
elif active == 3:
self.change_fract_btn(0, 1)
elif 3 < active < 16:
self.activate_factor(active-4)
self.auto_check_reset()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER):
self.check_result()
elif event.type == pygame.KEYDOWN:
self.auto_check_reset()
def auto_check_reset(self):
self.nm1.set_display_check(None)
self.nm2.set_display_check(None)
def change_fract_btn(self, n1, n2):
if n1 == -1:
if self.numbers[0] > 1:
self.numbers[0] -= 1
elif n1 == 1:
if self.numbers[0] < self.max_num:
self.numbers[0] += 1
if self.numbers[0] >= self.numbers[1]:
self.numbers[1] = self.numbers[0]+1
elif n2 == -1:
if self.numbers[1] > 2:
self.numbers[1] -= 1
if self.numbers[0] >= self.numbers[1]:
self.numbers[0] = self.numbers[1]-1
elif n2 == 1:
if self.numbers[1] <= self.max_num:
self.numbers[1] += 1
if self.numbers[0] == 1:
self.board.ships[0].change_image("nav_l_mtsd.png")
elif self.numbers[0] == self.max_num:
self.board.ships[1].change_image("nav_r_mtsd.png")
else:
self.board.ships[0].change_image("nav_l_mts.png")
self.board.ships[1].change_image("nav_r_mts.png")
if self.numbers[1] == 2:
self.board.ships[2].change_image("nav_l_mtsd.png")
elif self.numbers[1] == self.max_num + 1:
self.board.ships[3].change_image("nav_r_mtsd.png")
else:
self.board.ships[2].change_image("nav_l_mts.png")
self.board.ships[3].change_image("nav_r_mts.png")
self.update_factors()
self.update_fractions()
def update_fractions(self):
self.nm1.set_value(str(self.numbers[0]))
self.nm2.set_value(str(self.numbers[1]))
self.fraction.update_values(self.numbers)
self.fraction_canvas.painting = self.fraction.get_canvas().copy()
self.mainloop.redraw_needed[0] = True
self.fraction_canvas.update_me = True
self.nm1a.set_value(str(int(round(self.numbers[0] / float(self.active_factor.value)))))
self.nm2a.set_value(str(int(round(self.numbers[1] / float(self.active_factor.value)))))
self.fraction2.update_values((int(round(self.numbers[0] / float(self.active_factor.value))), int(round(self.numbers[1] / float(self.active_factor.value)))))
self.fraction2_canvas.painting = self.fraction2.get_canvas().copy()
self.fraction2_canvas.update_me = True
self.mainloop.redraw_needed[0] = True
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def check_result(self):
pass
|
imiolek-ireneusz/eduActiv8
|
game_boards/game093.py
|
Python
|
gpl-3.0
| 11,038
|
# Author: echel0n <echel0n@sickrage.ca>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from hashlib import md5
from CodernityDB.hash_index import HashIndex
class CacheLastUpdateIndex(HashIndex):
_version = 2
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheLastUpdateIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'lastUpdate' and data.get('provider'):
return md5(data.get('provider')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
class CacheLastSearchIndex(HashIndex):
_version = 2
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheLastSearchIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'lastSearch' and data.get('provider'):
return md5(data.get('provider')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
class CacheSceneExceptionsIndex(HashIndex):
_version = 1
def __init__(self, *args, **kwargs):
kwargs['key_format'] = 'I'
super(CacheSceneExceptionsIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'scene_exceptions' and data.get('indexer_id'):
return data.get('indexer_id'), None
def make_key(self, key):
return key
class CacheSceneNamesIndex(HashIndex):
_version = 2
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheSceneNamesIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'scene_names' and data.get('name'):
return md5(data.get('name')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
class CacheNetworkTimezonesIndex(HashIndex):
_version = 2
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheNetworkTimezonesIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'network_timezones' and data.get('network_name'):
return md5(data.get('network_name')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
class CacheSceneExceptionsRefreshIndex(HashIndex):
_version = 2
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheSceneExceptionsRefreshIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'scene_exceptions_refresh' and data.get('list'):
return md5(data.get('list')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
class CacheProvidersIndex(HashIndex):
_version = 3
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(CacheProvidersIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'providers' and data.get('provider'):
return md5(data.get('provider')).hexdigest(), None
def make_key(self, key):
return md5(key.encode('utf-8')).hexdigest()
|
gborri/SickRage
|
sickrage/core/databases/cache/index.py
|
Python
|
gpl-3.0
| 4,038
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import random
import geohash
import pandas as pd
from sqlalchemy import DateTime, Float, String
from superset import db
from superset.utils import core as utils
from .helpers import (
get_example_data,
get_slice_json,
merge_slice,
misc_dash_slices,
Slice,
TBL,
)
def load_long_lat_data(only_metadata=False, force=False):
"""Loading lat/long data from a csv file in the repo"""
tbl_name = "long_lat"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("san_francisco.csv.gz", make_bytes=True)
pdf = pd.read_csv(data, encoding="utf-8")
start = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0
)
pdf["datetime"] = [
start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1))
for i in range(len(pdf))
]
pdf["occupancy"] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf["radius_miles"] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf["geohash"] = pdf[["LAT", "LON"]].apply(lambda x: geohash.encode(*x), axis=1)
pdf["delimited"] = pdf["LAT"].map(str).str.cat(pdf["LON"].map(str), sep=",")
pdf.to_sql( # pylint: disable=no-member
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"longitude": Float(),
"latitude": Float(),
"number": Float(),
"street": String(100),
"unit": String(10),
"city": String(50),
"district": String(50),
"region": String(50),
"postcode": Float(),
"id": String(100),
"datetime": DateTime(),
"occupancy": Float(),
"radius_miles": Float(),
"geohash": String(12),
"delimited": String(60),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "datetime"
obj.database = database
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "day",
"since": "2014-01-01",
"until": "now",
"viz_type": "mapbox",
"all_columns_x": "LON",
"all_columns_y": "LAT",
"mapbox_style": "mapbox://styles/mapbox/light-v9",
"all_columns": ["occupancy"],
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Mapbox Long/Lat",
viz_type="mapbox",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
zhouyao1994/incubator-superset
|
superset/examples/long_lat.py
|
Python
|
apache-2.0
| 3,916
|
"""
SoftLayer.tests.CLI.core_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import logging
import SoftLayer
from SoftLayer.CLI import core
from SoftLayer.CLI import environment
from SoftLayer import testing
from SoftLayer import utils
import click
import mock
class CoreTests(testing.TestCase):
def test_load_all(self):
for path, cmd in recursive_subcommand_loader(core.cli, current_path='root'):
try:
cmd.main(args=['--help'])
except SystemExit as ex:
if ex.code != 0:
self.fail("Non-zero exit code for command: %s" % path)
def test_verbose_max(self):
with mock.patch('logging.getLogger') as log_mock:
result = self.run_command(['-vvv', 'vs', 'list'])
self.assert_no_fail(result)
log_mock().addHandler.assert_called_with(mock.ANY)
log_mock().setLevel.assert_called_with(logging.DEBUG)
def test_build_client(self):
env = environment.Environment()
result = self.run_command(['vs', 'list'], env=env)
self.assert_no_fail(result)
self.assertIsNotNone(env.client)
def test_diagnostics(self):
result = self.run_command(['-v', 'vs', 'list'])
self.assert_no_fail(result)
self.assertIn('SoftLayer_Account::getVirtualGuests', result.output)
self.assertIn('"execution_time"', result.output)
self.assertIn('"api_calls"', result.output)
self.assertIn('"version"', result.output)
self.assertIn('"python_version"', result.output)
self.assertIn('"library_location"', result.output)
class CoreMainTests(testing.TestCase):
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=utils.StringIO)
def test_unexpected_error(self, stdoutmock, climock):
climock.side_effect = AttributeError('Attribute foo does not exist')
self.assertRaises(SystemExit, core.main)
self.assertIn("Feel free to report this error as it is likely a bug",
stdoutmock.getvalue())
self.assertIn("Traceback (most recent call last)",
stdoutmock.getvalue())
self.assertIn("AttributeError: Attribute foo does not exist",
stdoutmock.getvalue())
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=utils.StringIO)
def test_sl_error(self, stdoutmock, climock):
ex = SoftLayer.SoftLayerAPIError('SoftLayer_Exception', 'Not found')
climock.side_effect = ex
self.assertRaises(SystemExit, core.main)
self.assertIn("SoftLayerAPIError(SoftLayer_Exception): Not found",
stdoutmock.getvalue())
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=utils.StringIO)
def test_auth_error(self, stdoutmock, climock):
ex = SoftLayer.SoftLayerAPIError('SoftLayer_Exception',
'Invalid API token.')
climock.side_effect = ex
self.assertRaises(SystemExit, core.main)
self.assertIn("Authentication Failed:", stdoutmock.getvalue())
self.assertIn("use 'slcli config setup'", stdoutmock.getvalue())
def recursive_subcommand_loader(root, current_path=''):
"""Recursively load and list every command."""
if getattr(root, 'list_commands', None) is None:
return
ctx = click.Context(root)
for command in root.list_commands(ctx):
new_path = '%s:%s' % (current_path, command)
logging.info("loading %s", new_path)
new_root = root.get_command(ctx, command)
if new_root is None:
raise Exception('Could not load command: %s' % command)
for path, cmd in recursive_subcommand_loader(new_root,
current_path=new_path):
yield path, cmd
yield current_path, new_root
|
nanjj/softlayer-python
|
tests/CLI/core_tests.py
|
Python
|
mit
| 4,014
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
aws_access_key:
description:
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket:
description: Bucket name.
required: true
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
encrypt:
description:
- When set for PUT mode, asks for server-side encryption
required: false
default: no
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: []
version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src:
description: The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
- "Ralph Tice (@ralph-tice)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket
- s3: bucket=mybucket mode=create
# Create a bucket with key as directory, in the EU region
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
'''
import os
import urlparse
import hashlib
from ssl import SSLError
try:
import boto
import boto.ec2
from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def key_check(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e:
if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
try:
result = s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
return True
else:
return False
def create_bucket(module, s3, bucket, location=None):
if location is None:
location = Location.DEFAULT
try:
bucket = s3.create_bucket(bucket, location=location)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket):
try:
bucket = s3.lookup(bucket)
bucket_contents = bucket.list()
bucket.delete_keys([key.name for key in bucket_contents])
bucket.delete()
return True
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def upload_file_check(src):
if os.path.exists(src):
file_exists is True
else:
file_exists is False
if os.path.isdir(src):
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
return file_exists
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
if metadata:
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src, encrypt_key=encrypt)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
for x in range(0, retries + 1):
try:
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def get_md5_digest(local_file):
md5 = hashlib.md5()
with open(local_file, 'rb') as f:
for data in f.read(1024 ** 2):
md5.update(data)
return md5.hexdigest()
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
dest = dict(default=None),
encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']),
marker = dict(default=None),
max_keys = dict(default=1000),
metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
src = dict(),
),
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = int(module.params['expiry'])
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = os.path.expanduser(module.params['object'])
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
s3 = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat(),
**aws_connect_kwargs
)
elif is_walrus(s3_url):
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
else:
s3 = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_kwargs)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if s3 is None:
s3 = boto.connect_s3(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception, e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
if s3 is None: # this should never happen
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist, no need to md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = get_md5_digest(dest)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite == 'never':
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# At this point explicitly define the overwrite condition.
if sum_matches is True and pathrtn is True and overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# sys.exit(0)
# Lets check the src path.
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = get_md5_digest(src)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
if bucket and not obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, dirobj)
if keyrtn is True:
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, s3, bucket, dirobj)
if bucketrtn is False:
created = create_bucket(module, s3, bucket, location)
create_dirkey(module, s3, bucket, dirobj)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
if mode == 'getstr':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True:
download_s3str(module, s3, bucket, obj, version=version)
else:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
Tiger66639/ansible-modules-core
|
cloud/amazon/s3.py
|
Python
|
gpl-3.0
| 25,945
|
"""
n! means n * (n 1) * ... * 3 * 2 * 1
For example, 10! = 10 * 9 * ... * 3 * 2 * 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
"""
from math import factorial
print sum(map(int,list(str(factorial(100)))))
|
hickeroar/project-euler
|
020/solution020.py
|
Python
|
mit
| 302
|
#!/usr/bin/env python
'''
Handles access to the WebDAV's server creditentials.
'''
from collections import namedtuple
import secretstorage
APPLICATION_NAME = "davify"
FileStorage = namedtuple('FileStorage',
'username password protocol server port path')
def get_secret_storage():
bus = secretstorage.dbus_init()
return secretstorage.get_default_collection(bus)
def store_password(username, pwd, protocol, server, port, path):
'''
stores the given password in the gnome keyring
'''
secret_storage = get_secret_storage()
attrs = {'application': APPLICATION_NAME,
'username': username,
'server': server,
'protocol': protocol,
'port': str(port),
'path': path}
description = f'davify WebDAV password for <{protocol}://{username}@' \
'{server}:{port}/{path}>'
secret_storage.create_item(description, attrs, pwd.encode('utf-8'))
def get_passwords():
'''
retrieves the stored login data from the keyring
'''
secret_storage = get_secret_storage()
if secret_storage.is_locked():
secret_storage.unlock()
items = [_parse_item(item) for item in secret_storage.search_items(
{'application': APPLICATION_NAME})]
return items
def _parse_item(item):
item_attr = item.get_attributes()
return FileStorage(username=item_attr['username'],
password=item.get_secret().decode('utf-8'),
protocol=item_attr['protocol'],
server=item_attr['server'],
port=item_attr['port'],
path=item_attr['path'])
if __name__ == '__main__':
print(get_passwords())
|
AlbertWeichselbraun/davify
|
src/davify/keyring.py
|
Python
|
gpl-3.0
| 1,784
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
|
lllucius/climacast
|
requests/api.py
|
Python
|
agpl-3.0
| 6,229
|
from tcpgecko import TCPGecko
from binascii import hexlify, unhexlify
import sys
try: import __builtin__
except: import builtins as __builtin__
def hex(value, fill):
return "0x" + __builtin__.hex(value).lstrip("0x").rstrip("L").zfill(fill).upper()
tcp = TCPGecko("192.168.0.10")
title_id = 0x0005000010144F00 #Smash USA
SYSCheckTitleExists = tcp.get_symbol("sysapp.rpl", "SYSCheckTitleExists", True)
doesExist = SYSCheckTitleExists(title_id >> 32, title_id & 0xFFFFFFFF)
if not doesExist: print("Title " + hex(title_id, 16) + " does not exist!")
else:
SYSLaunchTitle = tcp.get_symbol("sysapp.rpl", "SYSLaunchTitle", True)
SYSLaunchTitle(title_id >> 32, title_id & 0xFFFFFFFF)
print("Game switched!")
tcp.s.close()
|
XBigTK13X/wiiu-memshark
|
vendor/tcpgecko/syshax.py
|
Python
|
mit
| 732
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_clients_response import GetClientsResponse # noqa: E501
from swagger_client.rest import ApiException
class TestGetClientsResponse(unittest.TestCase):
"""GetClientsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetClientsResponse(self):
"""Test GetClientsResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_clients_response.GetClientsResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
mindbody/API-Examples
|
SDKs/Python/test/test_get_clients_response.py
|
Python
|
bsd-2-clause
| 972
|
#!/usr/bin/env python
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
clearspark/django-monthfield
|
test_project/manage.py
|
Python
|
bsd-3-clause
| 354
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all packages in progress.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
pkg_service = client.GetService('PackageService', version='v201808')
# Create a statement to select packages.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('status = :status')
.WithBindVariable('status', 'IN_PROGRESS'))
# Retrieve a small amount of packages at a time, paging
# through until all packages have been retrieved.
while True:
response = pkg_service.getPackagesByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for pkg in response['results']:
# Print out some information for each package.
print(
'Package with ID "%d", name "%s", and proposal ID "%d" was found.\n'
% (pkg['id'], pkg['name'], pkg['proposalId']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201808/package_service/get_in_progress_packages.py
|
Python
|
apache-2.0
| 1,890
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import qisys.script
from qibuild.test.conftest import TestBuildWorkTree
import pytest
def test_pml_outside_worktree(tmpdir, monkeypatch):
foo = tmpdir.mkdir("foo")
pml_path = foo.join("foo.pml")
pml_path.write("""
<Package name="foo" format_version="4">
<Translations>
<Translation name="foo_fr_FR"
src="translations/foo_fr_FR.ts"
language="fr_FR" />
</Translations>
</Package>
""")
translations_dir = foo.mkdir("translations")
translations_dir.join("foo_fr_FR.ts").write("""
<TS language="fr_FR" version="2.1">
<context>
<name>QApplication</name>
<message>
<location filename="../main.cpp" line="24" />
<source>Hello world!</source>
<translation>Bonjour, monde</translation>
</message>
</context>
</TS>
""")
monkeypatch.chdir(foo)
qisys.script.run_action("qilinguist.actions.release", [pml_path.strpath])
qm_path = translations_dir.join("foo_fr_FR.qm")
assert qm_path.check(file=True)
def test_raise_when_no_project_given_outside_a_worktree(tmpdir, monkeypatch):
monkeypatch.chdir(tmpdir)
with pytest.raises(Exception) as e:
qisys.script.run_action("qilinguist.actions.release")
assert "outside a worktree" in e.value.message
def test_non_translated_messages_gettext(qilinguist_action, record_messages):
trad_project = qilinguist_action.trad
qilinguist_action.create_po(trad_project)
main_cpp = os.path.join(trad_project.path, "main.cpp")
with open(main_cpp, "a") as fp:
fp.write("""
char* foo() {
return _("Hello, world");
}
""")
qilinguist_action("update", "translate")
qilinguist_action("release", "translate", raises=True)
assert record_messages.find("untranslated")
def test_non_translated_messages_qt(qilinguist_action):
build_worktree = TestBuildWorkTree()
project = build_worktree.add_test_project("translateme/qt")
qilinguist_action("update", "helloqt")
qilinguist_action("release", "helloqt", raises=True)
def test_invalid_po_file(qilinguist_action):
trad_project = qilinguist_action.trad
qilinguist_action.create_po(trad_project)
fr_FR_po = os.path.join(trad_project.path, "po", "fr_FR.po")
with open(fr_FR_po, "a") as fp:
fp.write("""
#: broken
syntax-error
""")
error = qilinguist_action("release", "translate", raises=True)
assert "failed" in error
|
dmerejkowsky/qibuild
|
python/qilinguist/test/test_qilinguist_release.py
|
Python
|
bsd-3-clause
| 2,577
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import logger
import numpy as np
import json
import AbinsModules
def old_python():
"""" Check if Python has proper version."""
is_python_old = AbinsModules.AbinsTestHelpers.old_python()
if is_python_old:
logger.warning("Skipping AbinsLoadCRYSTALTest because Python is too old.")
return is_python_old
def skip_if(skipping_criteria):
"""
Skip all tests if the supplied function returns true.
Python unittest.skipIf is not available in 2.6 (RHEL6) so we'll roll our own.
"""
def decorate(cls):
if skipping_criteria():
for attr in cls.__dict__.keys():
if callable(getattr(cls, attr)) and 'test' in attr:
delattr(cls, attr)
return cls
return decorate
@skip_if(old_python)
class AbinsLoadCRYSTALTest(unittest.TestCase):
# simple tests
def test_non_existing_file(self):
with self.assertRaises(IOError):
bad_crystal_reader = AbinsModules.LoadCRYSTAL(input_dft_filename="NonExistingFile.txt")
bad_crystal_reader.read_phonon_file()
with self.assertRaises(ValueError):
# noinspection PyUnusedLocal
poor_crystal_reader = AbinsModules.LoadCRYSTAL(input_dft_filename=1)
def tearDown(self):
AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCRYSTAL"])
# *************************** USE CASES ********************************************
# ===================================================================================
# | Use case: Gamma point calculation for CRYSTAL |
# ===================================================================================
_gamma_crystal = "crystalB3LYP_LoadCRYSTAL"
_set_crystal = "crystal_set_key_LoadCRYSTAL"
_molecule = "toluene_molecule_LoadCRYSTAL"
_phonon_dispersion_v1 = "mgo-GX_LoadCRYSTAL"
_phonon_dispersion_v2 = "MgO-222-DISP_LoadCRYSTAL"
def test_gamma_crystal(self):
self._check(name=self._gamma_crystal)
self._check(name=self._set_crystal)
def test_molecule(self):
self._check(name=self._molecule)
def test_phonon_dispersion_crystal(self):
self._check(name=self._phonon_dispersion_v1)
self._check(name=self._phonon_dispersion_v2)
def _check(self, name=None):
# get calculated data
data = self._read_dft(filename=name)
# get correct data
correct_data = self._prepare_data(filename=name)
# check read data
self._check_reader_data(correct_data=correct_data, data=data, filename=name)
# check loaded data
self._check_loader_data(correct_data=correct_data, input_dft_filename=name)
def _read_dft(self, filename=None):
"""
Reads data from .phonon file.
:param filename: name of file with phonon data (name + phonon)
:return: phonon data
"""
# 1) Read data
crystal_reader = AbinsModules.LoadCRYSTAL(
input_dft_filename=AbinsModules.AbinsTestHelpers.find_file(filename=filename + ".out"))
data = self._get_reader_data(crystal_reader=crystal_reader)
# test validData method
self.assertEqual(True, crystal_reader._clerk._valid_hash())
return data
# noinspection PyMethodMayBeStatic
def _prepare_data(self, filename=None):
"""Reads a correct values from ASCII file."""
with open(AbinsModules.AbinsTestHelpers.find_file(filename + "_data.txt")) as data_file:
correct_data = json.loads(data_file.read().replace("\n", " "))
num_k = len(correct_data["datasets"]["k_points_data"]["weights"])
atoms = len(correct_data["datasets"]["atoms_data"])
array = {}
for k in range(num_k):
temp = np.loadtxt(
AbinsModules.AbinsTestHelpers.find_file(
filename + "_atomic_displacements_data_%s.txt" % k)).view(complex).reshape(-1)
total_size = temp.size
num_freq = int(total_size / (atoms * 3))
array[str(k)] = temp.reshape(atoms, num_freq, 3)
freq = correct_data["datasets"]["k_points_data"]["frequencies"][str(k)]
correct_data["datasets"]["k_points_data"]["frequencies"][str(k)] = np.asarray(freq)
correct_data["datasets"]["k_points_data"].update({"atomic_displacements": array})
return correct_data
def _check_reader_data(self, correct_data=None, data=None, filename=None):
# check data
correct_k_points = correct_data["datasets"]["k_points_data"]
items = data["datasets"]["k_points_data"]
for k in correct_k_points["frequencies"]:
self.assertEqual(True, np.allclose(correct_k_points["frequencies"][k], items["frequencies"][k]))
self.assertEqual(True, np.allclose(correct_k_points["atomic_displacements"][k],
items["atomic_displacements"][k]))
self.assertEqual(True, np.allclose(correct_k_points["k_vectors"][k], items["k_vectors"][k]))
self.assertEqual(correct_k_points["weights"][k], items["weights"][k])
correct_atoms = correct_data["datasets"]["atoms_data"]
atoms = data["datasets"]["atoms_data"]
for item in range(len(correct_atoms)):
self.assertEqual(correct_atoms["atom_%s" % item]["sort"], atoms["atom_%s" % item]["sort"])
self.assertAlmostEqual(correct_atoms["atom_%s" % item]["mass"], atoms["atom_%s" % item]["mass"],
delta=0.00001) # delta in amu units
self.assertEqual(correct_atoms["atom_%s" % item]["symbol"], atoms["atom_%s" % item]["symbol"])
self.assertEqual(True, np.allclose(np.array(correct_atoms["atom_%s" % item]["fract_coord"]),
atoms["atom_%s" % item]["fract_coord"]))
# check attributes
self.assertEqual(correct_data["attributes"]["advanced_parameters"], data["attributes"]["advanced_parameters"])
self.assertEqual(correct_data["attributes"]["hash"], data["attributes"]["hash"])
self.assertEqual(correct_data["attributes"]["DFT_program"], data["attributes"]["DFT_program"])
self.assertEqual(AbinsModules.AbinsTestHelpers.find_file(filename + ".out"), data["attributes"]["filename"])
# check datasets
self.assertEqual(True, np.allclose(correct_data["datasets"]["unit_cell"], data["datasets"]["unit_cell"]))
def _check_loader_data(self, correct_data=None, input_dft_filename=None):
loader = AbinsModules.LoadCRYSTAL(
input_dft_filename=AbinsModules.AbinsTestHelpers.find_file(input_dft_filename + ".out"))
loaded_data = loader.load_formatted_data().extract()
# k points
correct_items = correct_data["datasets"]["k_points_data"]
items = loaded_data["k_points_data"]
for k in correct_items["frequencies"]:
self.assertEqual(True, np.allclose(correct_items["frequencies"][k], items["frequencies"][k]))
self.assertEqual(True, np.allclose(correct_items["atomic_displacements"][k],
items["atomic_displacements"][k]))
self.assertEqual(True, np.allclose(correct_items["k_vectors"][k], items["k_vectors"][k]))
self.assertEqual(correct_items["weights"][k], items["weights"][k])
# atoms
correct_atoms = correct_data["datasets"]["atoms_data"]
atoms = loaded_data["atoms_data"]
for item in range(len(correct_atoms)):
self.assertEqual(correct_atoms["atom_%s" % item]["sort"], atoms["atom_%s" % item]["sort"])
self.assertAlmostEqual(correct_atoms["atom_%s" % item]["mass"], atoms["atom_%s" % item]["mass"],
delta=0.00001)
self.assertEqual(correct_atoms["atom_%s" % item]["symbol"], atoms["atom_%s" % item]["symbol"])
self.assertEqual(True, np.allclose(np.array(correct_atoms["atom_%s" % item]["fract_coord"]),
atoms["atom_%s" % item]["fract_coord"]))
# noinspection PyMethodMayBeStatic
def _get_reader_data(self, crystal_reader=None):
abins_type_data = crystal_reader.read_phonon_file()
data = {"datasets": abins_type_data.extract(),
"attributes": crystal_reader._clerk._attributes
}
data["datasets"].update({"unit_cell": crystal_reader._clerk._data["unit_cell"]})
return data
if __name__ == '__main__':
unittest.main()
|
wdzhou/mantid
|
scripts/test/AbinsLoadCRYSTALTest.py
|
Python
|
gpl-3.0
| 8,735
|
from rlkit.torch.core import PyTorchModule
class LinearTransform(PyTorchModule):
def __init__(self, m, b):
super().__init__()
self.m = m
self.b = b
def __call__(self, t):
return self.m * t + self.b
|
vitchyr/rlkit
|
rlkit/torch/networks/linear_transform.py
|
Python
|
mit
| 241
|
# -*- coding: utf-8 -*-
"""Program to process a single station using information from files.
If you use the optimized birrp.exe and you get an error saying: didn't find all
the .bf files, something a miss. Try running using the old version. Something
in the optimization compiler changes something in the variable useage in the
Fortran program.
At the end I've added a plot section, if you want to save the plots change
the n to y, and hopefully that will work, not sure about windows 7. If that
doesn't work you can leave it at n and save the plot when it comes up by
clicking on the save icon on the plot and save manually.
It the rotation angles are not correct or you want to change birrp parameters
change it in the processinginfofile and rerun the program.
--if you want to change parameters in plst[ii] from the command line use:
plst[ii][parameter]=newvalue
where parameter can be anything in plst[ii].keys() (type that in to the
command line to see what parameters are available)
then run flst=brp.runBIRRPpp(dirpath,plst[stationindex],stationinfofile,
birrploc,ffactor=1)
from the command line. If this makes it run or makes the parameters better
than change it in the processinginfofile for later use.
--if you want to look at the time frequency plot:
your should decimate the data down to something smaller than 50Hz
import TFtools as tf
bx=np.loadtxt(filename)
bx=tf.decimatef(bx,decimation factor)
psm,tsm,fsm,pst=tf.smethod(bx,L=11,nh=2**8,nfbins=2**9,tstep=2**6,
df=decimation frequency)
where decimation frequency the frequency to which you decimated
so if you sampled at 500 Hz and you decimated to 50 Hz then df=50
to plot the spectrogram
tf.plottf(psm,tsm,fsm)
there are a whole lot of parameters that you can change for plotting,
type in tf.plottf? on the command line and it should give you all the
parameters to change.
Good luck
Jared Peacock 2011"""
#=========================================================================
# Import necessary packages
#=========================================================================
import os
import os.path as op
import sys
import mtpy.processing.birrptools as brp
import mtpy.core.mttools as mt
import mtpy.imaging.mtplottools as mtplot
import mtpy.core.z as Z
def main():
#=========================================================================
# Input files
#=========================================================================
arglist = sys.argv[1:]
if len(arglist) < 4:
sys.exit('ERROR - provide 4 arguments:<station folder directory> <processingparameter file> <stationparameter file> <BIRRP executable>')
# directory where station folders are
dirpath = op.abspath(arglist[0])
# file where all the processing parameters are, ie day, start time, end time
# and birrp parameters like tbw, thetae, etc
processinginfofile = op.abspath(arglist[1])
# file where the station info is, ie lat, long, ex, ey, notes
stationinfofile = op.abspath(arglist[2])
# the location of birrp5.exe on your computer, can be the full path to the
# executable like r"c:\BIRRP\birrp5Optimized.exe"
# birrploc=r"c:\Peacock\PHD\BIRRP\birrp5_3pcs20E9ptsOptimized.exe"
birrploc = op.abspath(arglist[3])
# this is the index of which station to process which corresponds to the
# line number in Notepad++ minus 2 of the processinginfofile. So if you want
# to process the first station in processinginfofile which is line 2 in the
# notepad file, the statinindex will be 0.
stationindex = 0
#=========================================================================
# #get information from the processing file and put into a list of dictionaries
#=========================================================================
plst = brp.readProDict(processinginfofile, dirpath)
#=========================================================================
# Combine files, make script file, run birrp
#=========================================================================
# if you find that your responses are not scaled correctly, change the parameter
# ffactor which multiplies the responses by that number. This might happen if the
# gains are not quite right or the dipole lengths are not quite right.
# flst=brp.runBIRRPpp(dirpath,plst[stationindex],stationinfofile,birrploc,
# ffactor=1)
#
# if you want to run multiple stations, one after the other uncomment the
# following loop. This will processes the station then plot the apparent
# resistivity and phase of all 4 components, then plot the phase tensor
# components. If you want to start plst from a different index, because you
# keep adding to the processinginfofile for each day, which I suggest doing so
# when you come back from the field all the info is one place, just change
# the plst in enumrate(plst,1) to plst[start:stop] or plst[start:] for all
# stations after start.
flstall = []
for ii, pdict in enumerate(plst, 1):
try:
flst = brp.runBIRRPpp(dirpath, pdict, stationinfofile, birrploc,
ffactor=1)
flstall.append(flst)
brp.plotBFfiles(flst['edifile'], cohfile=flst['cohfile'], save='y',
show='n')
# z1=Z.Z(flst['edifile'])
# z1.plotResPhase(fignum=ii,plottype=2)
# z1.plotPTAll(fignum=ii+len(plst))
except TypeError:
print 'Did not process ', pdict['station']
except IOError:
print 'Did not process ', pdict['station']
except IndexError:
print 'Did not process ', pdict['station']
except ValueError:
print 'Did not process ', pdict['station']
#=========================================================================
# Plot files
#=========================================================================
# change save='n' to save='y' if want to save the plots, will save in a folder
# called dirpath\plots
# if you don't want to use the save icon in the plots you can type in the
# interpreter plt.savefig(FullPathSaveName,fmt='pdf)
# note that fmt can be jpg, eps, or svg
# brp.plotBFfiles(flst['edifile'],cohfile=flst['cohfile'],save='n',show='y')
# if this doesn't work try:
# mtplot.plotResPhase(flst['edifile'],plotnum=2,fignum=1)
# or
# z1=Z.Z(flst['edifile'])
# z1.plotResPhase(fignum=1,plottype=2)
# z1.plotPTAll(fignum=2
if __name__ == '__main__':
main()
|
MTgeophysics/mtpy
|
legacy/runbirrpsinglestation.py
|
Python
|
gpl-3.0
| 6,757
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.agenda.views
~~~~~~~~~~~~~~~~~~~~~~~
Views for the agenda app.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from datetime import datetime
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext as _
from openslides.agenda.models import Item
from openslides.agenda.api import get_active_item, is_summary, children_list, \
del_confirm_form_for_items
from openslides.agenda.forms import ElementOrderForm, MODELFORM
from openslides.application.models import Application
from openslides.assignment.models import Assignment
from openslides.poll.models import Poll
from openslides.system.api import config_set, config_get
from openslides.utils.template import render_block_to_string
from openslides.utils.utils import template, permission_required, \
del_confirm_form, ajax_request
from openslides.utils.pdf import print_agenda
from poll.models import Poll, Option
def view(request, item_id):
"""
Shows the Slide.
"""
item = Item.objects.get(id=item_id)
votes = assignment_votes(item)
polls = assignment_polls(item)
return render_to_response('beamer/%s.html' % item.type,
{
'item': item.cast(),
'ajax': 'off',
'votes': votes,
'polls': polls,
},
context_instance=RequestContext(request))
@permission_required('agenda.can_see_projector')
def beamer(request):
"""
Shows a active Slide.
"""
data = {'ajax': 'on'}
template = ''
try:
item = get_active_item()
votes = assignment_votes(item)
polls = assignment_polls(item)
if is_summary():
items = item.children.filter(hidden=False)
data['items'] = items
data['title'] = item.title
template = 'beamer/overview.html'
else:
data['item'] = item.cast()
data['title'] = item.title
data['votes'] = votes
data['polls'] = polls
template = 'beamer/%s.html' % (item.type)
except Item.DoesNotExist:
items = Item.objects.filter(parent=None).filter(hidden=False) \
.order_by('weight')
data['items'] = items
data['title'] = _("Agenda")
template = 'beamer/overview.html'
if request.is_ajax():
content = render_block_to_string(template, 'content', data)
jsondata = {'content': content,
'title': data['title'],
'time': datetime.now().strftime('%H:%M'),
'bigger': config_get('bigger'),
'up': config_get('up'),
'countdown_visible': config_get('countdown_visible'),
'countdown_time': config_get('agenda_countdown_time'),
'countdown_control': config_get('countdown_control'),
}
return ajax_request(jsondata)
else:
return render_to_response(template,
data,
context_instance=RequestContext(request))
@permission_required('agenda.can_see_projector')
def beamerhome(request):
"""
Shows a active Slide.
"""
data = {'ajax': 'on'}
template = ''
try:
item = get_active_item()
votes = assignment_votes(item)
polls = assignment_polls(item)
if is_summary():
items = item.children.filter(hidden=False)
data['items'] = items
data['title'] = item.title
template = 'beamer/overview.html'
else:
data['item'] = item.cast()
data['title'] = item.title
data['votes'] = votes
data['polls'] = polls
template = 'beamer/%shome.html' % (item.type)
except Item.DoesNotExist:
items = Item.objects.filter(parent=None).filter(hidden=False)\
.order_by('weight')
data['items'] = items
data['title'] = _("Agenda")
template = 'beamer/overview.html'
if request.is_ajax():
content = render_block_to_string(template, 'content', data)
jsondata = {'content': content,
'title': data['title'],
'time': datetime.now().strftime('%H:%M'),
'bigger': config_get('bigger'),
'up': config_get('up'),
'countdown_visible': config_get('countdown_visible'),
'countdown_time': config_get('agenda_countdown_time'),
'countdown_control': config_get('countdown_control'),
}
return ajax_request(jsondata)
else:
return render_to_response(template,
data,
context_instance=RequestContext(request))
@permission_required('agenda.can_manage_agenda')
def beamer_edit(request, direction):
if direction == 'bigger':
config_set('bigger', int(config_get('bigger', 100)) + 10)
elif direction == 'smaller':
config_set('bigger', int(config_get('bigger', 100)) - 10)
elif direction == 'up':
config_set('up', int(config_get('up', 0)) - 10)
elif direction == 'down':
config_set('up', int(config_get('up', 0)) + 10)
elif direction == 'clean':
config_set('up', 0)
config_set('bigger', 100)
if request.is_ajax():
return ajax_request({})
return redirect(reverse('item_overview'))
@permission_required('agenda.can_manage_agenda')
def beamer_countdown(request, command, time=60):
if command == 'show':
config_set('countdown_visible', True)
elif command == 'hide':
config_set('countdown_visible', False)
elif command == 'reset':
config_set('countdown_control', 'reset')
elif command == 'start':
config_set('countdown_control', 'start')
elif command == 'stop':
config_set('countdown_control', 'stop')
if request.is_ajax():
if command == "show":
link = reverse('countdown_close')
else:
link = reverse('countdown_open')
return ajax_request({'countdown_visible': config_get('countdown_visible'),
'link': link})
return redirect(reverse('item_overview'))
def assignment_votes(item):
votes = []
if item.type == "ItemAssignment":
assignment = item.cast().assignment
# list of votes
votes = []
for candidate in assignment.candidates:
tmplist = [[candidate, assignment.is_elected(candidate)], []]
for poll in assignment.poll_set.all():
if poll.published:
if candidate in poll.options_values:
if assignment.is_elected(candidate):
option = Option.objects.filter(poll=poll).filter(user=candidate)[0]
if poll.optiondecision:
tmplist[1].append([option.yes, option.no, option.undesided])
else:
tmplist[1].append(option.yes)
else:
tmplist[1].append("")
else:
tmplist[1].append("-")
votes.append(tmplist)
return votes
def assignment_polls(item):
polls = []
if item.type == "ItemAssignment":
for poll in item.cast().assignment.poll_set.filter(assignment=item.cast().assignment):
polls.append(poll)
return polls
@permission_required('agenda.can_see_agenda')
@template('agenda/overview.html')
def overview(request):
"""
Shows an overview of all items.
"""
if request.method == 'POST':
for item in Item.objects.all():
form = ElementOrderForm(request.POST, prefix="i%d" % item.id)
if form.is_valid():
try:
item.parent = Item.objects.get( \
id=form.cleaned_data['parent'])
except Item.DoesNotExist:
item.parent = None
item.weight = form.cleaned_data['weight']
item.save()
items = children_list(Item.objects.filter(parent=None).exclude(hidden=True).order_by('weight'))
items_hidden = children_list(Item.objects.filter(parent=None).exclude(hidden=False).order_by('weight'))
try:
overview = is_summary() and not get_active_item()
except Item.DoesNotExist:
overview = True
return {
'items': items,
'items_hidden': items_hidden,
'overview': overview,
'summary': is_summary(),
'countdown_visible': config_get('countdown_visible'),
'countdown_time': config_get('agenda_countdown_time'),
}
@permission_required('agenda.can_manage_agenda')
def set_active(request, item_id, summary=False):
"""
Set an Item as the active one.
"""
if item_id == "0":
config_set("presentation", "0")
else:
try:
item = Item.objects.get(id=item_id)
item.set_active(summary)
except Item.DoesNotExist:
messages.error(request, _('Item ID %d does not exist.') % int(item_id))
config_set("bigger", 100)
config_set("up", 0)
if request.is_ajax():
return ajax_request({'active': item_id})
return redirect(reverse('item_overview'))
@permission_required('agenda.can_manage_agenda')
def set_closed(request, item_id, closed=True):
"""
Close or open an Item.
"""
try:
item = Item.objects.get(id=item_id)
item.set_closed(closed)
except Item.DoesNotExist:
messages.error(request, _('Item ID %d does not exist.') % int(item_id))
if request.is_ajax():
if closed:
link = reverse('item_open', args=[item.id])
else:
link = reverse('item_close', args=[item.id])
return ajax_request({'closed': closed,
'link': link})
return redirect(reverse('item_overview'))
@permission_required('agenda.can_manage_agenda')
@template('agenda/edit.html')
def edit(request, item_id=None, form='ItemText', default=None):
"""
Show a form to edit an existing Item, or create a new one.
"""
if item_id is not None:
try:
item = Item.objects.get(id=item_id).cast()
except Item.DoesNotExist:
messages.error(request, _('Item ID %d does not exist.') % int(item_id))
return redirect(reverse('item_overview'))
else:
item = None
if request.method == 'POST':
if item_id is None:
form = MODELFORM[form](request.POST)
else:
form = item.edit_form(request.POST)
if form.is_valid():
item = form.save()
if item_id is None:
messages.success(request, _('New item was successfully created.'))
if "application" in request.POST:
item.application.writelog(_('Agenda item created'), request.user)
else:
messages.success(request, _('Item was successfully modified.'))
if "application" in request.POST:
item.application.writelog(_('Agenda item modified'), request.user)
if not 'apply' in request.POST:
return redirect(reverse('item_overview'))
if item_id is None:
return redirect(reverse('item_edit', args=[item.id]))
else:
messages.error(request, _('Please check the form for errors.'))
else:
initial = {}
if default:
if form == "ItemAssignment":
assignment = Assignment.objects.get(pk=default)
initial = {
'assignment': assignment,
'title': assignment.name,
}
elif form == "ItemApplication":
application = Application.objects.get(pk=default)
initial = {
'application': application,
'title': application.title,
}
if item_id is None:
form = MODELFORM[form](initial=initial)
else:
form = item.edit_form()
return { 'form': form,
'item': item }
@permission_required('agenda.can_manage_agenda')
def delete(request, item_id):
"""
Delete an Item.
"""
item = Item.objects.get(id=item_id).cast()
if request.method == 'POST':
if 'all' in request.POST:
item.delete()
messages.success(request, _("Item <b>%s</b> and his children were successfully deleted.") % item)
else:
for child in item.children:
child.parent = item.parent
child.save()
item.delete()
messages.success(request, _("Item <b>%s</b> was successfully deleted.") % item)
else:
del_confirm_form_for_items(request, item)
return redirect(reverse('item_overview'))
|
svschannak/openslides-templates-pmv
|
openslides/agenda/views.py
|
Python
|
gpl-2.0
| 13,460
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Entry.author'
db.add_column(u'feeds_entry', 'author',
self.gf('django.db.models.fields.CharField')(default='', max_length=1023, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Entry.author'
db.delete_column(u'feeds_entry', 'author')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'entries_per_page': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'read_later': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'read_later_credentials': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sharing_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sharing_gplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sharing_twitter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'UTC'", 'max_length': '75'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feeds.category': {
'Meta': {'ordering': "('order', 'name', 'id')", 'unique_together': "(('user', 'slug'), ('user', 'name'))", 'object_name': 'Category'},
'color': ('django.db.models.fields.CharField', [], {'default': "'blue'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['auth.User']"})
},
u'feeds.entry': {
'Meta': {'ordering': "('-date', 'title')", 'object_name': 'Entry'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'blank': 'True'}),
'broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entries'", 'null': 'True', 'to': u"orm['feeds.Feed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('feedhq.feeds.fields.URLField', [], {}),
'permalink': ('feedhq.feeds.fields.URLField', [], {'blank': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'read_later_url': ('feedhq.feeds.fields.URLField', [], {'blank': 'True'}),
'starred': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['auth.User']"})
},
u'feeds.favicon': {
'Meta': {'object_name': 'Favicon'},
'favicon': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('feedhq.feeds.fields.URLField', [], {'unique': 'True', 'db_index': 'True'})
},
u'feeds.feed': {
'Meta': {'ordering': "('name',)", 'object_name': 'Feed'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['feeds.Category']"}),
'favicon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_safe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unread_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'url': ('feedhq.feeds.fields.URLField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feeds'", 'to': u"orm['auth.User']"})
},
u'feeds.uniquefeed': {
'Meta': {'object_name': 'UniqueFeed'},
'backoff_factor': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_column': "'muted_reason'", 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'null': 'True', 'blank': 'True'}),
'hub': ('feedhq.feeds.fields.URLField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_loop': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'link': ('feedhq.feeds.fields.URLField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'null': 'True', 'blank': 'True'}),
'muted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subscribers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'url': ('feedhq.feeds.fields.URLField', [], {'unique': 'True'})
}
}
complete_apps = ['feeds']
|
vincentbernat/feedhq
|
feedhq/feeds/migrations/0008_auto__add_field_entry_author.py
|
Python
|
bsd-3-clause
| 9,489
|
import traceback
events = {}
def registerEvent(name,func,priority=5):
"""Registers an event handler"""
global events
# Max priority of 5
if priority > 5:
priority = 5
if priority < 0:
priority = 0
# No such event, add it
if name not in events:
events[name] = {}
# No such priority, add it
if priority not in events[name]:
events[name][priority] = []
# Append and return the reference function
events[name][priority].append(func)
return {'name':name,'func':func,'priority':priority}
def unregisterEvent(name,func):
"""Unregisters a registered event handler"""
global events
# Does the event name exist?
if name not in events:
# Failed to unregister
return False
if func != None:
for priority in events[name]:
i=0
for f in events[name][priority]:
if f == func:
events[name][priority].pop(i)
del i
# Unregistered event
return True
i += 1
# Welp, event not found. sucks.
return False
def force_unregisterEvent(funct):
"""Unregisters an event handler without knowing where it came from (slower)"""
global events
for name in events:
for priority in events[name]:
if funct in events[name][priority]:
events[name][priority].remove(funct)
def callEvent(name,*args):
"""Calls an event"""
if name not in events:
return True
x = None
# Loop through event names
for priority in events[name].keys():
# Leep through event's priorities
for func in events[name][priority]:
# Try to run the function.
try:
if x == None:
x = args
x = func(*x)
except Exception,e:
print("An exception occurred!")
print("Helpful info: ")
print(x)
print(name)
print(e)
traceback.print_exc()
# To prevent bad things from happening,
## stop the event from continuing
return False
# If it's not True or None, stop the event.
if x == False:
return False
# Event was good, congrats!
return True
|
andrewphorn/Ainsly
|
core/events.py
|
Python
|
mit
| 2,421
|
"""
Tests for Suite model.
"""
from django.core.exceptions import ValidationError
from tests import case
class SuiteTest(case.DBTestCase):
def test_unicode(self):
"""Unicode representation is name of Suite."""
self.assertEqual(unicode(self.F.SuiteFactory(name="Foo")), u"Foo")
def test_clone_cases(self):
"""Cloning a suite clones its member SuiteCases."""
sc = self.F.SuiteCaseFactory()
new = sc.suite.clone()
self.assertEqual(new.cases.get(), sc.case)
def test_clone_sets_draft_state(self):
"""Clone of active suite is still draft."""
s = self.F.SuiteFactory(status="active")
new = s.clone()
self.assertEqual(new.status, "draft")
def test_default_active(self):
"""New suite defaults to active state."""
s = self.F.SuiteFactory()
self.assertEqual(s.status, "active")
def test_unique_constraint(self):
"""Can't have two SuiteCases with same suite and case."""
sc = self.F.SuiteCaseFactory.create()
new = self.F.SuiteCaseFactory.build(
case=sc.case, suite=sc.suite)
with self.assertRaises(ValidationError):
new.full_clean()
def test_unique_constraint_with_unset_case_and_suite(self):
"""Uniqueness checking doesn't blow up if suite/case unset."""
new = self.model.SuiteCase()
with self.assertRaises(ValidationError):
new.full_clean()
def test_unique_constraint_doesnt_prevent_edit(self):
"""Unique constraint still allows saving an edited existing object."""
sc = self.F.SuiteCaseFactory.create()
sc.instruction = "new instruction"
sc.full_clean()
def test_unique_constraint_ignores_deleted(self):
"""Deleted suitecase doesn't prevent new with same suite and case."""
sc = self.F.SuiteCaseFactory.create()
sc.delete()
self.F.SuiteCaseFactory.create(
case=sc.case, suite=sc.suite)
|
peterbe/moztrap
|
tests/model/library/models/test_suite.py
|
Python
|
bsd-2-clause
| 2,013
|
"""XViewMiddleware has been moved to django.contrib.admindocs.middleware."""
import warnings
warnings.warn(__doc__, DeprecationWarning, stacklevel=2)
from django.contrib.admindocs.middleware import XViewMiddleware
|
denisenkom/django
|
django/middleware/doc.py
|
Python
|
bsd-3-clause
| 216
|
import unittest
import pinq
class queryable_difference_tests(unittest.TestCase):
def setUp(self):
self.queryable1 = pinq.as_queryable(range(1, 11))
self.queryable2 = pinq.as_queryable(range(2, 10, 2))
def test_difference_list(self):
self.assertEqual(list(self.queryable1.difference(
[1, 3, 5, 7, 9])), [2, 4, 6, 8, 10])
def test_difference_list_remove_all(self):
self.assertEqual(list(self.queryable1.difference(
[1, 3, 5, 7, 9, 2, 4, 6, 8, 10])), [])
def test_difference_list_remove_none(self):
self.assertEqual(list(self.queryable1.difference(
[15])), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_difference_queryable(self):
self.assertEqual(list(self.queryable1.difference(
self.queryable2)), [1, 3, 5, 7, 9, 10])
def test_difference_queryable_more_removed_values(self):
self.assertEqual(list(self.queryable2.difference(
self.queryable1)), [])
def test_difference_list_with_key_selector(self):
self.assertEqual(list(self.queryable1.difference(
[1, 3, 5, 7, 9], lambda x: x % 2)), [2, 4, 6, 8, 10])
def test_difference_list_remove_all_with_key_selector(self):
self.assertEqual(list(self.queryable1.difference(
[1, 3, 5, 7, 9, 2, 4, 6, 8, 10], lambda x: x % 2)), [])
def test_difference_list_remove_none_with_key_selector(self):
self.assertEqual(list(self.queryable1.difference(
[15], lambda x: x + 12)), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_difference_other_type_error(self):
self.assertRaises(TypeError, self.queryable1.difference, 100)
def test_difference_key_selector_type_error(self):
self.assertRaises(
TypeError, self.queryable1.difference, [1, 3, 6], 15)
|
dlshriver/pinq
|
tests/test_queryable_difference.py
|
Python
|
mit
| 1,827
|
import sys
import time
import unittest
from neupy.helpers import progressbar
from base import BaseTestCase
from utils import catch_stdout
class ProgressbarTestCase(BaseTestCase):
@unittest.skip("Broken test")
def test_simple_progressbar(self):
with catch_stdout() as out:
iterator = progressbar(
range(10),
mininterval=0.,
file=sys.stdout
)
for i in iterator:
time.sleep(0.1)
terminal_output = out.getvalue()
self.assertRegexpMatches(
terminal_output,
'\|{}{}\|\s{}/10\s+{}\%.+'.format(
'#' * i, '-' * (10 - i), i, i * 10
)
)
|
stczhc/neupy
|
tests/helpers/test_progressbar.py
|
Python
|
mit
| 780
|
import xml.etree.ElementTree as ET
import checkmod
DEBUG = 0
if DEBUG:
XMLPath = '../../Package.xml'
EmbedFile = '../../DreamInstaller/Embed.inc'
CodeFile = '../../DreamInstaller/Files.inc'
else:
XMLPath = '../Package.xml'
EmbedFile = 'Embed.inc'
CodeFile = 'Files.inc'
def Init():
# XML Data
root = ET.parse(XMLPath).getroot();
f = open(EmbedFile, 'w')
fc = open(CodeFile, 'w')
i = 0
for file in root:
i = i + 1
name = file.get('name')
path = file.text
f.write('%d\tRCDATA\t"%s"\n' % (i, path))
fc.write('{L"%s", %d},\n' % (name, i));
fc.close()
f.close()
def Main():
Init()
if __name__ == '__main__':
if (not checkmod.check(XMLPath) and not DEBUG):
print('PackFiles: No change on xml, skip.')
else:
Main()
|
DreamHacks/dreamdota
|
DreamWarcraft/Build Tools/PackFiles.py
|
Python
|
mit
| 860
|
import colorsys
from itertools import cycle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
def get_separate_colors(count):
num_div = np.linspace(0.0,1.0,num=count,endpoint=False)
return [[a,b,c] for (a,b,c) in [colorsys.hsv_to_rgb(d,1.0,1.0) \
for d in num_div]]
class Plotter(object):
def __init__(self):
self.fig, self.axis = plt.subplots(1,1)
self.linesplotted_count = 0
def plot(self, xmeans, ymeans, label, ymedians, yerr=None,
xmedians=None, xerr=None):
line, _, _ = self.axis.errorbar(xmeans, ymeans, xerr=xerr, yerr=yerr,
label=label,
linewidth=3)
# plot dot at median
if xmedians is not None:
# if error bars extend on both axes
self.axis.errorbar(xmedians, ymedians, fmt='o',
color=line.get_color())
else:
self.axis.errorbar(xmeans, ymedians, fmt='o',
color=line.get_color())
def savefig(self, name):
# put legend centered under plot
lgd = self.axis.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
self.fig.savefig(name, bbox_extra_artist=(lgd,), bbox_inches='tight')
def set_title(self, title):
self.axis.set_title(title)
def set_xlabel(self, label):
self.axis.set_xlabel(label)
def set_ylabel(self, label):
self.axis.set_ylabel(label)
def set_ylim(self, lims):
self.axis.set_ylim(lims)
if __name__ == '__main__':
# example of how to use Plotter
line_count = 4
plotter = Plotter()
xmeans = np.linspace(0.0,1.0,10)
for i in range(line_count):
# generate some data
ymeans = (xmeans * xmeans / 2) - i
# calculate errorbars
yerr = np.vstack((np.array([0.1]*len(xmeans)),
np.array([0.1]*len(xmeans))))
plotter.plot(xmeans, ymeans, 'line {:d}'.format(i), ymeans,
yerr=yerr)
plotter.set_title('Example')
plotter.set_xlabel('x label')
plotter.set_ylabel('y label')
plotter.savefig('example.pdf')
|
nOkuda/classtm
|
classtm/plot.py
|
Python
|
gpl-3.0
| 2,160
|
'''
Tests properties.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import unittest
import test_setup
import pyttsx
class TestProperties(unittest.TestCase):
def setUp(self):
self.engine = pyttsx.init(debug=False)
def tearDown(self):
del self.engine
def testDefaults(self):
voices = self.engine.getProperty('voices')
drate = 200
dvolume = 1.0
rate = self.engine.getProperty('rate')
self.assert_(drate == rate)
volume = self.engine.getProperty('volume')
self.assert_(dvolume == volume)
voice = self.engine.getProperty('voice')
self.assert_(voice in [v.id for v in voices])
def testSetRate(self):
for rate in xrange(100, 400, 10):
self.engine.setProperty('rate', rate)
self.engine.runAndWait()
grate = self.engine.getProperty('rate')
self.assert_(rate == grate)
def testSetVoice(self):
voices = self.engine.getProperty('voices')
for voice in voices:
self.engine.setProperty('voice', voice.id)
self.engine.runAndWait()
gvoice = self.engine.getProperty('voice')
self.assert_(voice.id == gvoice)
def testSetVolume(self):
for volume in xrange(0, 100, 1):
volume /= 100.0
self.engine.setProperty('volume', volume)
self.engine.runAndWait()
gvolume = self.engine.getProperty('volume')
self.assertAlmostEqual(volume, gvolume, 4)
def testSetMultiple(self):
voices = self.engine.getProperty('voices')
self.engine.setProperty('volume', 0.5)
self.engine.setProperty('rate', 300)
self.engine.setProperty('voice', voices[0].id)
self.engine.runAndWait()
gvoice = self.engine.getProperty('voice')
self.assert_(gvoice == voices[0].id)
gvolume = self.engine.getProperty('volume')
self.assertAlmostEqual(gvolume, 0.5, 4)
grate = self.engine.getProperty('rate')
self.assert_(grate == 300)
def testBadVolume(self):
errors = []
def errback(exception, name):
errors.append(exception)
tok = self.engine.connect('error', errback)
self.engine.setProperty('volume', 'foobar')
self.engine.setProperty('volume', None)
self.engine.setProperty('volume', object())
self.engine.runAndWait()
self.engine.disconnect(tok)
for error in errors:
self.assert_(isinstance(error, ValueError))
def testBadRate(self):
errors = []
def errback(exception, name):
errors.append(exception)
tok = self.engine.connect('error', errback)
self.engine.setProperty('rate', 'foobar')
self.engine.setProperty('rate', None)
self.engine.setProperty('rate', object())
self.engine.runAndWait()
self.engine.disconnect(tok)
for error in errors:
self.assert_(isinstance(error, ValueError))
def testBadVoice(self):
errors = []
def errback(exception, name):
errors.append(exception)
tok = self.engine.connect('error', errback)
self.engine.setProperty('voice', 'foobar')
self.engine.setProperty('voice', 100)
self.engine.setProperty('voice', 1.0)
self.engine.setProperty('voice', None)
self.engine.setProperty('voice', object())
self.engine.runAndWait()
self.engine.disconnect(tok)
for error in errors:
self.assert_(isinstance(error, ValueError))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestProperties)
#suite = unittest.TestLoader().loadTestsFromName('testBadVoice', TestProperties)
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
parente/pyttsx
|
tests/unit/test_prop.py
|
Python
|
isc
| 4,557
|
import re,xbmcaddon,time,requests
from ..scraper import Scraper
from ..common import random_agent,send_log,error_log
requests.packages.urllib3.disable_warnings()
s = requests.session()
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
from universalscrapers.modules import cfscrape
class watchstream(Scraper):
domains = ['https://putlockers.movie']
name = "Watchstream"
sources = []
def __init__(self):
self.base_link = 'https://putlockers.movie/embed/'
self.scraper = cfscrape.create_scraper()
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid = False):
try:
get_link = self.base_link + '%s/' %(imdb)
headers={'User-Agent':random_agent(),'referrer':get_link}
data = {'tmp_chk':'1'}
html = self.scraper.post(get_link,headers=headers,data=data,verify=False,timeout=5).content
#print html
link = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(html)[0]
#print link
count = 0
try:
chk = requests.get(link).content
rez = re.compile('"description" content="(.+?)"',re.DOTALL).findall(chk)[0]
if '1080' in rez:
res='1080p'
elif '720' in rez:
res='720p'
else:
res ='DVD'
except: res = 'DVD'
count +=1
self.sources.append({'source': 'Openload', 'quality': res, 'scraper': self.name, 'url': link,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamtvsupertuga/lib/resources/lib/sources/it/watchstream.py
|
Python
|
gpl-2.0
| 2,011
|
import struct
import textwrap
import typing
from .abstractentry import AbstractEntry
from .sound import Sound
if typing.TYPE_CHECKING:
from ..rom import Rom
class InstrumentList(AbstractEntry):
"""The list of instruments used by the game to play music.
Instrument information was found through some guess work and through spc dumps using snes9x.
Note that this information also appears at 0xfd7de for the Instrument test screen, but this information is
misleading and not used for song playback. In fact, the Pitch Scale and Velocity Scale values on that screen do
not seem to do anything at all, at least in an emulator.
"""
def __init__(self, offset, name):
super().__init__(offset, name)
self.instruments = None
def load(self, rom):
# Only load once.
if self.instruments is not None:
return
rom.seek(self.offset)
# Load the instrument numbers and info offsets.
self.instruments = {}
has_another = True
while has_another:
instrument_number = rom.read_ubyte()
b1 = rom.read_ubyte()
assert b1 == 0
b1 = rom.read_ubyte()
if b1 == 0xf0: # Relative offset
info_offset = rom.read_ubyte() + 1
info_offset += rom.tell()
elif b1 == 0xd0: # Absolute offset
# No clue what this is.
unknown = rom.read(2)
assert unknown == b'\x03\x4c'
info_offset = rom.read_ushort() + 1
else:
raise Exception(f'Unexpected value: {b1:#04x}')
self.instruments[instrument_number] = Instrument(instrument_number, info_offset)
has_another = (rom.read_ubyte() == 0xc9)
# Load the instrument sound info.
for info in self.instruments:
self.instruments[info].load(rom)
def save(self, path, filename=None, filetype=None):
filename = self._get_filename(path, filename, self.name + '.txt')
with open(filename, 'w') as f:
f.write(str(self))
def __getitem__(self, key):
"""Gets an instrument by MIDI instrument number."""
return self.instruments[key]
def __str__(self):
return f'{len(self.instruments)} instruments:\n\n' + \
'\n\n'.join([str(self.instruments[i]) for i in sorted(self.instruments)])
class Instrument:
"""An instrument based on a sound from the rom."""
def __init__(self, instrument_number, info_offset):
self.instrument_number = instrument_number
self._info_offset = info_offset
self.sound_number = None
self.data = None
self.loop_offset = None
self.is_percussion = None
self.pitch = None
self.velocity = None
def load(self, rom: "Rom"):
sound_number = 0
rom.seek(self._info_offset)
b1 = rom.read_ubyte()
b2 = rom.read_ubyte()
if b2 == 0: # In this cause, b1 is the sound number.
sound_number = b1
# No clue what this is.
unknown = rom.read(2)
assert unknown == b'\x85\x01'
b2 = rom.read_ubyte()
if b2 == 0xa9: # Percussion
# Next value is the pitch value to use no matter what midi note is given.
self.is_percussion = True
self.pitch = rom.read_ushort()
elif b2 == 0xa5: # Melodic
self.is_percussion = False
# No clue what this is.
unknown = rom.read(4)
assert unknown == b'\x0c\x0a\xaa\xbf'
self.pitch = Instrument.read_ushort_list_at(rom, Instrument.read_3_byte_address(rom), 0x80)
else:
raise Exception('Unexpected value while loading instrument {}. '
'Tell: 0x{:x}. Value: 0x{:x}'.format(self.instrument_number, rom.tell(), b2))
# No clue what this is.
unknown = rom.read(5)
assert unknown == b'\x85\x03\xa6\x0e\xbf'
self.velocity = Instrument.read_ubyte_list_at(rom, Instrument.read_3_byte_address(rom), 0x80)
# Load the sound data from the rom.
sound_info = rom.get_entry(rom.get_entries_of_class(Sound)[sound_number]).get_wav_info()
self.sound_number = sound_number
self.data = sound_info['data']
self.loop_offset = sound_info['loop_offset']
@staticmethod
def read_3_byte_address(rom: "Rom"):
"""Reads a 3 byte rom address (really 22-bit, not 24)."""
return struct.unpack('<I', rom.read(3) + b'\00')[0] & 0x3fffff
@staticmethod
def read_ubyte_list_at(rom: "Rom", offset: int, length: int):
"""Reads a list of unsigned bytes from the rom and sets the rom position back to where it was."""
previous_offset = rom.tell()
rom.seek(offset)
values = struct.unpack('<' + length * 'B', rom.read(length))
rom.seek(previous_offset)
return values
@staticmethod
def read_ushort_list_at(rom: "Rom", offset: int, length: int):
"""Reads a list of unsigned shorts from the rom and sets the rom position back to where it was."""
previous_offset = rom.tell()
rom.seek(offset)
values = struct.unpack('<' + length * 'H', rom.read(length * 2))
rom.seek(previous_offset)
return values
def __str__(self):
text = f'{self.instrument_number} ({"percussion" if self.is_percussion else "melodic"}): ' \
f'Sound {self.sound_number}, length: {len(self.data)}, loops at {self.loop_offset}\n'
text += textwrap.fill(f'Pitch: {self.pitch}') + '\n'
text += textwrap.fill(f'Velocity: {self.velocity}')
return text
|
adambiser/snes-wolf3d-extractor
|
extractor/entrytype/instrument_list.py
|
Python
|
mit
| 5,727
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2013 Rémi Palancher
#
# This file is part of Cloubed.
#
# Cloubed is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# Cloubed is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Cloubed. If not, see
# <http://www.gnu.org/licenses/>.
""" HTTPServer class of Cloubed """
import http.server
import socketserver
import threading
import logging
import socket
class HTTPServer():
""" HTTPServer class """
def __init__(self):
self._port = 5432
self._handler = http.server.SimpleHTTPRequestHandler
self._address = None
self._httpd = None
self._thread = None
def launched(self):
"""
Returns True if the HTTPServer is already launched
"""
# check both the thread and the httpd server since httpd server could
# have failed when launched
return self._thread is not None and self._httpd is not None
def launch(self, address):
"""
launch: Creates the daemon thread that will start the HTTP server
"""
self._address = address
self._thread = threading.Thread(target=self.threaded_server,
name="ClouBedHTTPServer")
self._thread.setDaemon(True)
self._thread.start()
def terminate(self):
"""
shutdown the http server in thread
"""
logging.debug("shutting down http server")
self._httpd.shutdown()
def threaded_server(self):
"""
threaded_server: Thread routine that actually starts the HTTP server
"""
try:
self._httpd = socketserver.TCPServer((self._address, self._port),
self._handler)
self._httpd.serve_forever()
except socket.error as e:
logging.warn("error while launching TCP Server: {err}".format(err=e))
|
rezib/cloubed
|
cloubed/HTTPServer.py
|
Python
|
lgpl-3.0
| 2,423
|
import os
from .authenticated_client import AuthenticatedClient
from .errors import ControllerConnectionError
LOCAL_DEFAULT_USERNAME = "paradrop"
LOCAL_DEFAULT_PASSWORD = ""
PARADROP_API_TOKEN = os.environ.get("PARADROP_API_TOKEN", None)
PARADROP_CHUTE_NAME = os.environ.get("PARADROP_CHUTE_NAME", None)
PDSERVER_URL = os.environ.get('PDSERVER_URL', 'https://paradrop.org')
class ControllerClient(AuthenticatedClient):
"""
Client for interacting with a cloud controller.
"""
connection_error_type = ControllerConnectionError
def __init__(self, host=PDSERVER_URL, auth_methods=None):
super(ControllerClient, self).__init__("cloud", PDSERVER_URL,
auth_methods=auth_methods)
self.host = host
self.base_url = host + "/api"
#self.base_url = "http://{}/api".format(host)
def add_ssh_key(self, key_text, name="default"):
"""
Add an authorized key for SSH access.
"""
user = self.get_current_user()
url = "{}/users/{}/sshKeys".format(self.base_url, user['_id'])
data = {
'key': key_text,
'name': name
}
return self.request("POST", url, json=data)
def claim_node(self, token, name=None):
"""
Claim ownership of a node using a claim token.
"""
url = self.base_url + "/routers/claim"
data = {
"claim_token": token
}
if name is not None and len(name) > 0:
data['name'] = name
return self.request("POST", url, json=data)
def create_chute(self, name, description, public=False):
"""
Create a new chute in the store.
"""
url = self.base_url + "/chutes"
data = {
"name": name,
"description": description,
"public": public
}
return self.request("POST", url, json=data)
def create_node(self, name, orphaned=False, claim=None):
"""
Create a new node tracked by the controller.
"""
url = self.base_url + "/routers"
data = {
"name": name,
"orphaned": orphaned
}
if claim is not None:
data['claim'] = claim
return self.request("POST", url, json=data)
def create_user(self, name, email, password, password2):
"""
Create a new user account on the controller.
"""
url = self.base_url + "/users"
data = {
"name": name,
"email": email,
"password": password,
"confirmPassword": password2
}
return self.request("POST", url, json=data)
def create_version(self, name, config):
"""
Create a new chute version.
"""
chute = self.find_chute(name)
if chute is None:
return None
url = "{}/chutes/{}/versions".format(self.base_url, chute['_id'])
data = {
"chute_id": chute['_id'],
"config": config
}
return self.request("POST", url, json=data)
def delete_node(self, name):
"""
Delete a node tracked by the controller.
"""
node = self.find_node(name)
if node is not None:
url = "{}/routers/{}".format(self.base_url, node['_id'])
return self.request("DELETE", url)
else:
return None
def find_chute(self, name):
"""
Find a chute by name or id.
"""
# If this client object is ever used for multiple requests during its
# lifetime, we could consider caching the group list locally for a
# better response time. Then we need to add cache invalidation to all
# of the methods that might affect the group list.
chutes = self.list_chutes()
for chute in chutes:
if chute['_id'] == name or chute['name'] == name:
return chute
return None
def find_group(self, name):
"""
Find a group by name or id.
"""
# If this client object is ever used for multiple requests during its
# lifetime, we could consider caching the group list locally for a
# better response time. Then we need to add cache invalidation to all
# of the methods that might affect the group list.
groups = self.list_groups()
for group in groups:
if group['_id'] == name or group['name'] == name:
return group
return None
def find_node(self, name):
"""
Find a node by name or id.
"""
# If this client object is ever used for multiple requests during its
# lifetime, we could consider caching the node list locally for a
# better response time. Then we need to add cache invalidation to all
# of the methods that might affect the node list.
nodes = self.list_nodes()
for node in nodes:
if node['_id'] == name or node['name'] == name:
return node
return None
def find_update(self, node_id, update_id):
"""
Find a node update.
"""
url = "{}/routers/{}/updates/{}".format(self.base_url, node_id, update_id)
return self.request("GET", url)
def follow_chute(self, chute_name, node_name):
"""
Follow updates to a chute.
The node will automatically update when new versions of the chute are
created.
"""
chute = self.find_chute(chute_name)
if chute is None:
raise Exception("Chute was not found")
node = self.find_node(node_name)
if node is None:
raise Exception("Node was not found")
data = {
"node_id": node['_id']
}
url = "{}/chutes/{}/watchers".format(self.base_url, chute['_id'])
return self.request("POST", url, json=data)
def get_current_user(self):
"""
Get the logged in user.
"""
url = "{}/users/me".format(self.base_url)
return self.request("GET", url)
def group_add_node(self, group_name, node_name):
"""
Add a node to a group.
"""
group = self.find_group(group_name)
if group is None:
raise Exception("Group was not found")
node = self.find_node(node_name)
if node is None:
raise Exception("Node was not found")
url = "{}/groups/{}/addRouter".format(self.base_url, group['_id'])
data = {
'router_id': node['_id']
}
return self.request("POST", url, json=data)
def install_chute(self, chute_name, node_name, select_version=None):
"""
Install a chute from the store.
"""
chute = self.find_chute(chute_name)
if chute is None:
raise Exception("Chute was not found")
node = self.find_node(node_name)
if node is None:
raise Exception("Node was not found")
versions = self.list_versions(chute_name)
if versions is None:
raise Exception("No version to install")
version = None
if select_version is None:
version = versions[-1]
else:
for ver in versions:
if str(ver['version']) == str(select_version):
version = ver
break
if version is None:
raise Exception("Version not found")
data = {
"updateClass": "CHUTE",
"updateType": "update",
"chute_id": chute['_id'],
"router_id": node['_id'],
"version_id": version['_id'],
"config": version['config']
}
# Important: the server will reject the update if the name field is
# missing. Version is also not automatically filled in.
data['config']['name'] = chute['name']
data['config']['version'] = version['version']
url = "{}/routers/{}/updates".format(self.base_url, node['_id'])
return self.request("POST", url, json=data)
def list_chutes(self):
"""
List chutes that the user owns or has access to.
"""
url = self.base_url + "/chutes"
return self.request("GET", url)
def list_groups(self):
"""
List groups that the user belongs to.
"""
url = self.base_url + "/groups"
return self.request("GET", url)
def list_nodes(self):
"""
List nodes that the user owns or has access to.
"""
url = self.base_url + "/routers"
return self.request("GET", url)
def list_update_messages(self, node_id, update_id):
"""
List messages from an update.
"""
url = "{}/routers/{}/updates/{}/messages".format(self.base_url,
node_id, update_id)
return self.request("GET", url)
def list_versions(self, name):
"""
List nodes that the user owns or has access to.
"""
chute = self.find_chute(name)
if chute is None:
return []
url = "{}/chutes/{}/versions".format(self.base_url, chute['_id'])
return self.request("GET", url)
def save_node(self, node):
"""
Save changes to a node object.
"""
url = "{}/routers/{}".format(self.base_url, node['_id'])
return self.request("PUT", url, json=node)
|
ParadropLabs/Paradrop
|
tools/pdtools/pdtools/controller_client.py
|
Python
|
apache-2.0
| 9,487
|
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
import sys
# Read command line arguments and check there is only one
arguments = sys.argv
if len(arguments) != 2:
print("Usage: amfprint [FILE]")
sys.exit(2)
amf = arguments[1]
# Read AMF file
try:
tree = ET.parse(amf)
root = tree.getroot()
except ET.ParseError:
print(arguments[0], ": ", amf, ": File not in valid XML format", sep='')
sys.exit(2)
except FileNotFoundError:
print(arguments[0], ": ", amf, ": No such file or directory", sep='')
sys.exit(2)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(2)
# Make list of lists from vertices
vertices = []
for group in root.findall(".//coordinates"):
vertices.append([])
for x in [ x for x in group ]:
vertices[-1].append(float(x.text))
# Make list of lists from volumes
volumes = []
for group in root.findall(".//triangle"):
volumes.append([])
for x in [ x for x in group ]:
volumes[-1].append(int(x.text))
# Test data:
#vertices = [[-54.3898, 38.8428, 6.0], [-54.9202, 38.7432, 6.0], [-54.9202, 34.7432, 6.0],
# [-54.9202, 34.7432, 2.0], [-54.9202, -8.7432, 1.0], [-54.3877, 38.8293, 6.0],
# [-54.9225, 38.7567, 6.0], [-55.1983, 39.1807, 6.0]]
#volumes = [[0, 1, 2], [2, 1, 3], [0, 4, 1], [1, 4, 3],
# [2, 4, 1], [1, 5, 6], [2, 4, 6], [3, 5, 7]]
# Get order of vertices:
# - Enumerate will make (index; value) pairs.
# - Sorted with given key will sort based on values
# - b[0] will return indexes
order = [b[0] for b in sorted(enumerate(vertices), key=lambda i:i[1])]
# Create sorted list of vertices:
sorted_vertices = [ vertices[order[i]] for i in range(len(vertices)) ]
# Creation of sorted volumes as list of lists:
# TODO after volumes are sorted, slic3r returns errors.
#sorted_volumes = sorted([[ order.index(x) for x in triangle ] for triangle in volumes ])
sorted_volumes = [[ order.index(x) for x in triangle ] for triangle in volumes ]
# Change vertices order in AMF file:
i = 0
for group in root.findall(".//coordinates"):
j = 0
for x in [ x for x in group ]:
x.text = str(sorted_vertices[i][j])
j += 1
i += 1
# Change volumes order in AMF file:
i = 0
for group in root.findall(".//triangle"):
j = 0
for x in [ x for x in group ]:
x.text = str(sorted_volumes[i][j])
j += 1
i += 1
# Write back to AMF file
tree.write(amf, encoding="UTF-8", xml_declaration=True)
|
UniversalScientificTechnologies/MLAB-3Dprinter
|
hw/cad/amfsort.py
|
Python
|
gpl-3.0
| 2,472
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.research.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from nupic.research.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from sensorimotor.fast_general_temporal_memory import (
FastGeneralTemporalMemory)
from sensorimotor.temporal_pooler_monitor_mixin import (
TemporalPoolerMonitorMixin)
from union_pooling.union_pooler import UnionPooler
class MonitoredFastGeneralTemporalMemory(TemporalMemoryMonitorMixin,
FastGeneralTemporalMemory):
pass
# Implement a UnionPoolerMonitorMixin if needed...
class MonitoredUnionPooler(TemporalPoolerMonitorMixin, UnionPooler):
pass
realDType = GetNTAReal()
class UnionPoolerExperiment(object):
"""
This class defines a Temporal Memory-Union Pooler network and provides methods
to run the network on data sequences.
"""
DEFAULT_TEMPORAL_MEMORY_PARAMS = {"columnDimensions": (1024,),
"cellsPerColumn": 8,
"activationThreshold": 20,
"initialPermanence": 0.5,
"connectedPermanence": 0.6,
"minThreshold": 20,
"maxNewSynapseCount": 30,
"permanenceIncrement": 0.10,
"permanenceDecrement": 0.02,
"seed": 42,
"learnOnOneCell": False}
DEFAULT_UNION_POOLER_PARAMS = {# Spatial Pooler Params
# inputDimensions set to TM cell count
# potentialRadius set to TM cell count
"columnDimensions": [1024],
"numActiveColumnsPerInhArea": 20,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.1,
"potentialPct": 0.5,
"globalInhibition": True,
"localAreaDensity": -1,
"minPctOverlapDutyCycle": 0.001,
"minPctActiveDutyCycle": 0.001,
"dutyCyclePeriod": 1000,
"maxBoost": 10.0,
"seed": 42,
"spVerbosity": 0,
"wrapAround": True,
# Union Pooler Params
"activeOverlapWeight": 1.0,
"predictedActiveOverlapWeight": 0.0,
"fixedPoolingActivationBurst": False,
"exciteFunction": None,
"decayFunction": None,
"maxUnionActivity": 0.20}
DEFAULT_CLASSIFIER_PARAMS = {
'distThreshold': 0.000001,
'maxCategoryCount': 10,
#'distanceMethod': 'rawOverlap', # Default is Euclidean distance
}
def __init__(self, tmOverrides=None, upOverrides=None, seed=42):
print "Initializing Temporal Memory..."
params = dict(self.DEFAULT_TEMPORAL_MEMORY_PARAMS)
params.update(tmOverrides or {})
params["seed"] = seed
self.tm = MonitoredFastGeneralTemporalMemory(mmName="TM", **params)
print "Initializing Union Pooler..."
params = dict(self.DEFAULT_UNION_POOLER_PARAMS)
params.update(upOverrides or {})
params["inputDimensions"] = [self.tm.numberOfCells()]
params["potentialRadius"] = self.tm.numberOfCells()
params["seed"] = seed
self.up = MonitoredUnionPooler(mmName="UP", **params)
# TODO KNN classifer
def runNetworkOnSequence(self, sensorSequences, sequencesLabels, tmLearn=True,
upLearn=None, verbosity=0, progressInterval=None):
"""
Runs Union Pooler network on specified sequence.
@param sensorSequences A sequence of sensor sequences. Each
sequence is terminated by None.
@param sequenceLabels A sequence of string representations of the
current sequence. Each sequence is terminated
by None.
@param tmLearn: (bool) Either False, or True
@param upLearn: (None,bool) Either None, False, or True. If None,
union pooler will be skipped.
@param progressInterval: (int) Prints progress every N iterations,
where N is the value of this param
"""
currentTime = time.time()
for i in xrange(len(sensorSequences)):
sensorPattern = sensorSequences[i]
sequenceLabel = sequencesLabels[i]
self.runNetworkOnPattern(sensorPattern,
tmLearn=tmLearn,
upLearn=upLearn,
sequenceLabel=sequenceLabel)
if progressInterval is not None and i > 0 and i % progressInterval == 0:
elapsed = (time.time() - currentTime) / 60.0
print ("Ran {0} / {1} elements of sequence in "
"{2:0.2f} minutes.".format(i, len(sensorSequences), elapsed))
currentTime = time.time()
print MonitorMixinBase.mmPrettyPrintMetrics(
self.tm.mmGetDefaultMetrics())
if verbosity >= 2:
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if upLearn is not None:
traces = self.up.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.up.mmGetTraceResets())
print
def runNetworkOnPattern(self, sensorPattern, tmLearn=True, upLearn=None,
sequenceLabel=None):
if sensorPattern is None:
self.tm.reset()
self.up.reset()
else:
self.tm.compute(sensorPattern,
formInternalConnections=True,
learn=tmLearn,
sequenceLabel=sequenceLabel)
if upLearn is not None:
activeCells, predActiveCells, burstingCols, = self.getUnionPoolerInput()
self.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=sequenceLabel)
def getUnionPoolerInput(self):
"""
Gets the Union Pooler input from the Temporal Memory
"""
activeCells = numpy.zeros(self.tm.numberOfCells()).astype(realDType)
activeCells[list(self.tm.activeCellsIndices())] = 1
predictedActiveCells = numpy.zeros(self.tm.numberOfCells()).astype(
realDType)
predictedActiveCells[list(self.tm.predictedActiveCellsIndices())] = 1
burstingColumns = numpy.zeros(self.tm.numberOfColumns()).astype(realDType)
burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1
return activeCells, predictedActiveCells, burstingColumns
def getBurstingColumnsStats(self):
"""
Gets statistics on the Temporal Memory's bursting columns. Used as a metric
of Temporal Memory's learning performance.
:return: mean, standard deviation, and max of Temporal Memory's bursting
columns over time
"""
traceData = self.tm.mmGetTraceUnpredictedActiveColumns().data
resetData = self.tm.mmGetTraceResets().data
countTrace = []
for x in xrange(len(traceData)):
if not resetData[x]:
countTrace.append(len(traceData[x]))
mean = numpy.mean(countTrace)
stdDev = numpy.std(countTrace)
maximum = max(countTrace)
return mean, stdDev, maximum
|
tomsilver/nupic.research
|
union_pooling/union_pooling/experiments/union_pooler_experiment.py
|
Python
|
gpl-3.0
| 9,172
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import datetime
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class PrintObjectArrayTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
def test_print_array(self):
"""Test that expr -O -Z works"""
self.build()
self.printarray_data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.mm', 'break here')
def printarray_data_formatter_commands(self):
"""Test that expr -O -Z works"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.mm", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.expect(
'expr --element-count 3 --object-description -- objects',
substrs=[
'3735928559',
'4276993775',
'3203398366',
'Hello',
'World',
'Two =',
'1 ='])
self.expect(
'poarray 3 objects',
substrs=[
'3735928559',
'4276993775',
'3203398366',
'Hello',
'World',
'Two =',
'1 ='])
self.expect(
'expr --element-count 3 --object-description --description-verbosity=full -- objects',
substrs=[
'[0] =',
'3735928559',
'4276993775',
'3203398366',
'[1] =',
'Hello',
'World',
'[2] =',
'Two =',
'1 ='])
self.expect(
'parray 3 objects',
substrs=[
'[0] = 0x',
'[1] = 0x',
'[2] = 0x'])
self.expect(
'expr --element-count 3 -d run -- objects',
substrs=[
'3 elements',
'2 elements',
'2 key/value pairs'])
self.expect(
'expr --element-count 3 -d run --ptr-depth=1 -- objects',
substrs=[
'3 elements',
'2 elements',
'2 key/value pairs',
'3735928559',
'4276993775',
'3203398366',
'"Hello"',
'"World"'])
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/poarray/TestPrintObjectArray.py
|
Python
|
bsd-3-clause
| 3,381
|
import asyncio
# python 3.6 doesn't have "get_running_loop"
get_running_loop = getattr(asyncio, "get_running_loop", asyncio.get_event_loop)
|
madedotcom/photon-pump
|
photonpump/compat.py
|
Python
|
mit
| 141
|
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test dist-upgrader custom uploads.
See also lp.soyuz.tests.test_distroseriesqueue_dist_upgrader for high-level
tests of dist-upgrader upload and queue manipulation.
"""
import os
from lp.archivepublisher.customupload import (
CustomUploadAlreadyExists,
CustomUploadBadUmask,
)
from lp.archivepublisher.dist_upgrader import (
DistUpgraderBadVersion,
DistUpgraderUpload,
process_dist_upgrader,
)
from lp.services.tarfile_helpers import LaunchpadWriteTarFile
from lp.testing import TestCase
class FakeConfig:
"""A fake publisher configuration."""
def __init__(self, archiveroot):
self.archiveroot = archiveroot
class TestDistUpgrader(TestCase):
def setUp(self):
super(TestDistUpgrader, self).setUp()
self.temp_dir = self.makeTemporaryDirectory()
self.pubconf = FakeConfig(self.temp_dir)
self.suite = "distroseries"
# CustomUpload.installFiles requires a umask of 022.
old_umask = os.umask(022)
self.addCleanup(os.umask, old_umask)
def openArchive(self, version):
self.path = os.path.join(
self.temp_dir, "dist-upgrader_%s_all.tar.gz" % version)
self.buffer = open(self.path, "wb")
self.archive = LaunchpadWriteTarFile(self.buffer)
def process(self):
self.archive.close()
self.buffer.close()
process_dist_upgrader(self.pubconf, self.path, self.suite)
def getUpgraderPath(self):
return os.path.join(
self.temp_dir, "dists", self.suite, "main", "dist-upgrader-all")
def test_basic(self):
# Processing a simple correct tar file works.
self.openArchive("20060302.0120")
self.archive.add_file("20060302.0120/hello", "world")
self.process()
def test_already_exists(self):
# If the target directory already exists, processing fails.
self.openArchive("20060302.0120")
self.archive.add_file("20060302.0120/hello", "world")
os.makedirs(os.path.join(self.getUpgraderPath(), "20060302.0120"))
self.assertRaises(CustomUploadAlreadyExists, self.process)
def test_bad_umask(self):
# The umask must be 022 to avoid incorrect permissions.
self.openArchive("20060302.0120")
self.archive.add_file("20060302.0120/file", "foo")
os.umask(002) # cleanup already handled by setUp
self.assertRaises(CustomUploadBadUmask, self.process)
def test_current_symlink(self):
# A "current" symlink is created to the last version.
self.openArchive("20060302.0120")
self.archive.add_file("20060302.0120/hello", "world")
self.process()
upgrader_path = self.getUpgraderPath()
self.assertContentEqual(
["20060302.0120", "current"], os.listdir(upgrader_path))
self.assertEqual(
"20060302.0120",
os.readlink(os.path.join(upgrader_path, "current")))
self.assertContentEqual(
["hello"],
os.listdir(os.path.join(upgrader_path, "20060302.0120")))
def test_bad_version(self):
# Bad versions in the tarball are refused.
self.openArchive("20070219.1234")
self.archive.add_file("foobar/foobar/dapper.tar.gz", "")
self.assertRaises(DistUpgraderBadVersion, self.process)
def test_getSeriesKey_extracts_architecture(self):
# getSeriesKey extracts the architecture from an upload's filename.
self.openArchive("20060302.0120")
self.assertEqual("all", DistUpgraderUpload.getSeriesKey(self.path))
def test_getSeriesKey_returns_None_on_mismatch(self):
# getSeriesKey returns None if the filename does not match the
# expected pattern.
self.assertIsNone(DistUpgraderUpload.getSeriesKey("argh_1.0.jpg"))
def test_getSeriesKey_refuses_names_with_wrong_number_of_fields(self):
# getSeriesKey requires exactly three fields.
self.assertIsNone(DistUpgraderUpload.getSeriesKey(
"package_1.0.tar.gz"))
self.assertIsNone(DistUpgraderUpload.getSeriesKey(
"one_two_three_four_5.tar.gz"))
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/archivepublisher/tests/test_dist_upgrader.py
|
Python
|
agpl-3.0
| 4,270
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from txmoney/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("txmoney", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='dj-txmoney',
version=version,
description="""Django package for working with money and currencies with automatic rate updates.""",
long_description=readme + '\n\n' + history,
author='Mateu Cànaves Albertí',
author_email='mateu.canaves@gmail.com',
url='https://github.com/txerpa/dj-txmoney',
packages=[
'txmoney',
],
include_package_data=True,
install_requires=["Django>=1.8", "celery>=3.1.0", "requests>= 2.12"],
license="MIT",
zip_safe=False,
keywords='dj-txmoney txmoney money currency finance rates',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python'
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
|
txerpa/dj-txmoney
|
setup.py
|
Python
|
mit
| 2,449
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Rietveld."""
import binascii
import calendar
import datetime
import email # see incoming_mail()
import email.utils
import itertools
import json
import logging
import md5
import os
import random
import re
import tarfile
import tempfile
import time
import urllib
from cStringIO import StringIO
from xml.etree import ElementTree
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.datastore import datastore_query
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.runtime import DeadlineExceededError
from google.appengine.runtime import apiproxy_errors
from django import forms
# Import settings as django_settings to avoid name conflict with settings().
from django.conf import settings as django_settings
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response
import django.template
from django.template import RequestContext
from django.utils import encoding
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
import httplib2
from oauth2client.appengine import _parse_state_value
from oauth2client.appengine import _safe_html
from oauth2client.appengine import CredentialsNDBModel
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import xsrf_secret_key
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import xsrfutil
from codereview import auth_utils
from codereview import engine
from codereview import library
from codereview import models
from codereview import patching
from codereview import utils
from codereview.common import IS_DEV
from codereview.exceptions import FetchError
from codereview.responses import HttpTextResponse, HttpHtmlResponse, respond
import codereview.decorators as deco
# Add our own custom template tags library.
django.template.add_to_builtins('codereview.library')
### Constants ###
OAUTH_DEFAULT_ERROR_MESSAGE = 'OAuth 2.0 error occurred.'
_ACCESS_TOKEN_TEMPLATE_ROOT = 'http://localhost:%(port)d?'
ACCESS_TOKEN_REDIRECT_TEMPLATE = (_ACCESS_TOKEN_TEMPLATE_ROOT +
'access_token=%(token)s')
ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE = (_ACCESS_TOKEN_TEMPLATE_ROOT +
'error=%(error)s')
# Maximum forms fields length
MAX_SUBJECT = 100
MAX_DESCRIPTION = 10000
MAX_URL = 2083
MAX_REVIEWERS = 1000
MAX_CC = 2000
MAX_MESSAGE = 10000
MAX_FILENAME = 255
MAX_DB_KEY_LENGTH = 1000
DB_WRITE_TRIES = 3
DB_WRITE_PAUSE = 4
### Form classes ###
class AccountInput(forms.TextInput):
# Associates the necessary css/js files for the control. See
# http://docs.djangoproject.com/en/dev/topics/forms/media/.
#
# Don't forget to place {{formname.media}} into html header
# when using this html control.
class Media:
css = {
'all': ('autocomplete/jquery.autocomplete.css',)
}
js = (
'autocomplete/lib/jquery.js',
'autocomplete/lib/jquery.bgiframe.min.js',
'autocomplete/lib/jquery.ajaxQueue.js',
'autocomplete/jquery.autocomplete.js'
)
def render(self, name, value, attrs=None):
output = super(AccountInput, self).render(name, value, attrs)
if models.Account.current_user_account is not None:
# TODO(anatoli): move this into .js media for this form
data = {'name': name, 'url': reverse(account),
'multiple': 'true'}
if self.attrs.get('multiple', True) == False:
data['multiple'] = 'false'
output += mark_safe(u'''
<script type="text/javascript">
jQuery("#id_%(name)s").autocomplete("%(url)s", {
max: 10,
highlight: false,
multiple: %(multiple)s,
multipleSeparator: ", ",
scroll: true,
scrollHeight: 300,
matchContains: true,
formatResult : function(row) {
return row[0].replace(/ .+/gi, '');
}
});
</script>''' % data)
return output
class IssueBaseForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=MAX_DESCRIPTION,
widget=forms.Textarea(attrs={'cols': 60}))
branch = forms.ChoiceField(required=False, label='Base URL')
base = forms.CharField(required=False,
max_length=MAX_URL,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(required=False, initial=False)
def get_base(self):
base = self.cleaned_data.get('base')
if not base:
key = self.cleaned_data['branch']
if key:
branch = models.Branch.get_by_id(key)
if branch is not None:
base = branch.url
if not base:
self.errors['base'] = ['You must specify a base']
return base or None
class UploadForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT)
description = forms.CharField(max_length=MAX_DESCRIPTION, required=False)
project = forms.CharField(required=False)
content_upload = forms.BooleanField(required=False)
separate_patches = forms.BooleanField(required=False)
base = forms.CharField(max_length=MAX_URL, required=False)
data = forms.FileField(required=False)
issue = forms.IntegerField(required=False)
reviewers = forms.CharField(max_length=MAX_REVIEWERS, required=False)
cc = forms.CharField(max_length=MAX_CC, required=False)
private = forms.BooleanField(required=False, initial=False)
send_mail = forms.BooleanField(required=False)
base_hashes = forms.CharField(required=False)
repo_guid = forms.CharField(required=False, max_length=MAX_URL)
def clean_base(self):
base = self.cleaned_data.get('base')
if not base and not self.cleaned_data.get('content_upload', False):
raise forms.ValidationError, 'Base URL is required.'
return self.cleaned_data.get('base')
def get_base(self):
return self.cleaned_data.get('base')
class UploadContentForm(forms.Form):
filename = forms.CharField(max_length=MAX_FILENAME)
status = forms.CharField(required=False, max_length=20)
checksum = forms.CharField(max_length=32)
file_too_large = forms.BooleanField(required=False)
is_binary = forms.BooleanField(required=False)
is_current = forms.BooleanField(required=False)
def clean(self):
# Check presence of 'data'. We cannot use FileField because
# it disallows empty files.
super(UploadContentForm, self).clean()
if not self.files and 'data' not in self.files:
raise forms.ValidationError, 'No content uploaded.'
return self.cleaned_data
def get_uploaded_content(self):
return self.files['data'].read()
class UploadPatchForm(forms.Form):
filename = forms.CharField(max_length=MAX_FILENAME)
content_upload = forms.BooleanField(required=False)
def get_uploaded_patch(self):
return self.files['data'].read()
class EditLocalBaseForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=MAX_DESCRIPTION,
widget=forms.Textarea(attrs={'cols': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(
required=False, initial=False, label='Protected',
help_text=(
'Only viewable by @chromium and @google accounts.'
'<div class="if_checked">'
'Please, avoid mailing lists in the CC and Reviewers fields.'
'</div>'))
closed = forms.BooleanField(required=False)
def get_base(self):
return None
class RepoForm(forms.Form):
name = forms.CharField()
url = forms.URLField()
guid = forms.CharField(required=False)
class BranchForm(forms.Form):
category = forms.CharField(
widget=forms.Select(choices=[(ch, ch)
for ch in models.BRANCH_CATEGORY_CHOICES]))
name = forms.CharField()
url = forms.URLField()
class PublishForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=MAX_MESSAGE,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
in_reply_to = forms.CharField(required=False,
max_length=MAX_DB_KEY_LENGTH,
widget=forms.HiddenInput())
automated = forms.BooleanField(required=False, widget=forms.HiddenInput(),
initial=True)
verbose = forms.BooleanField(required=False, widget=forms.HiddenInput())
class MiniPublishForm(forms.Form):
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=MAX_MESSAGE,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
automated = forms.BooleanField(required=False, widget=forms.HiddenInput(),
initial=True)
verbose = forms.BooleanField(required=False, widget=forms.HiddenInput())
class BlockForm(forms.Form):
blocked = forms.BooleanField(
required=False,
help_text='Should this user be blocked')
FORM_CONTEXT_VALUES = [(z, '%d lines' % z) for z in models.CONTEXT_CHOICES]
FORM_CONTEXT_VALUES.append(('', 'Whole file'))
class SettingsForm(forms.Form):
nickname = forms.CharField(max_length=30)
context = forms.IntegerField(
widget=forms.Select(choices=FORM_CONTEXT_VALUES),
required=False,
label='Context')
column_width = forms.IntegerField(
initial=django_settings.DEFAULT_COLUMN_WIDTH,
min_value=django_settings.MIN_COLUMN_WIDTH,
max_value=django_settings.MAX_COLUMN_WIDTH)
notify_by_email = forms.BooleanField(required=False,
widget=forms.HiddenInput())
def clean_nickname(self):
nickname = self.cleaned_data.get('nickname')
# Check for allowed characters
match = re.match(r'[\w\.\-_\(\) ]+$', nickname, re.UNICODE|re.IGNORECASE)
if not match:
raise forms.ValidationError('Allowed characters are letters, digits, '
'".-_()" and spaces.')
# Check for sane whitespaces
if re.search(r'\s{2,}', nickname):
raise forms.ValidationError('Use single spaces between words.')
if len(nickname) != len(nickname.strip()):
raise forms.ValidationError('Leading and trailing whitespaces are '
'not allowed.')
if nickname.lower() == 'me':
raise forms.ValidationError('Choose a different nickname.')
# Look for existing nicknames
query = models.Account.query(
models.Account.lower_nickname == nickname.lower())
if any(
account.key != models.Account.current_user_account.key
for account in query):
raise forms.ValidationError('This nickname is already in use.')
return nickname
class MigrateEntitiesForm(forms.Form):
account = forms.CharField(label='Your previous email address')
_user = None
def set_user(self, user):
"""Sets the _user attribute.
A user object is needed for validation. This method has to be
called before is_valid() is called to allow us to validate if a
email address given in account belongs to the same user.
"""
self._user = user
def clean_account(self):
"""Verifies that an account with this emails exists and returns it.
This method is executed by Django when Form.is_valid() is called.
"""
if self._user is None:
raise forms.ValidationError('No user given.')
account = models.Account.get_account_for_email(self.cleaned_data['account'])
if account is None:
raise forms.ValidationError('No such email.')
if account.user.email() == self._user.email():
raise forms.ValidationError(
'Nothing to do. This is your current email address.')
if account.user.user_id() != self._user.user_id():
raise forms.ValidationError(
'This email address isn\'t related to your account.')
return account
ORDER_CHOICES = (
'__key__',
'owner',
'created',
'modified',
)
class SearchForm(forms.Form):
format = forms.ChoiceField(
required=False,
choices=(
('html', 'html'),
('json', 'json')),
widget=forms.HiddenInput(attrs={'value': 'html'}))
keys_only = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
with_messages = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
cursor = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={'value': ''}))
limit = forms.IntegerField(
required=False,
min_value=1,
max_value=1000,
widget=forms.HiddenInput(attrs={'value': '30'}))
closed = forms.NullBooleanField(required=False)
owner = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
reviewer = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
repo_guid = forms.CharField(required=False, max_length=MAX_URL,
label="Repository ID")
base = forms.CharField(required=False, max_length=MAX_URL)
private = forms.NullBooleanField(required=False)
created_before = forms.DateTimeField(
required=False, label='Created before',
help_text='Format: YYYY-MM-DD and optional: hh:mm:ss')
created_after = forms.DateTimeField(
required=False, label='Created on or after')
modified_before = forms.DateTimeField(required=False, label='Modified before')
modified_after = forms.DateTimeField(
required=False, label='Modified on or after')
order = forms.ChoiceField(
required=False, help_text='Order: Name of one of the datastore keys',
choices=sum(
([(x, x), ('-' + x, '-' + x)] for x in ORDER_CHOICES),
[('', '(default)')]))
def _clean_accounts(self, key):
"""Cleans up autocomplete field.
The input is validated to be zero or one name/email and it's
validated that the users exists.
Args:
key: the field name.
Returns an User instance or raises ValidationError.
"""
accounts = filter(None,
(x.strip()
for x in self.cleaned_data.get(key, '').split(',')))
if len(accounts) > 1:
raise forms.ValidationError('Only one user name is allowed.')
elif not accounts:
return None
account = accounts[0]
if '@' in account:
acct = models.Account.get_account_for_email(account)
else:
acct = models.Account.get_account_for_nickname(account)
if not acct:
raise forms.ValidationError('Unknown user')
return acct.user
def clean_owner(self):
return self._clean_accounts('owner')
def clean_reviewer(self):
user = self._clean_accounts('reviewer')
if user:
return user.email()
class StringListField(forms.CharField):
def prepare_value(self, value):
if value is None:
return ''
return ','.join(value)
def to_python(self, value):
if not value:
return []
return [list_value.strip() for list_value in value.split(',')]
class ClientIDAndSecretForm(forms.Form):
"""Simple form for collecting Client ID and Secret."""
client_id = forms.CharField()
client_secret = forms.CharField()
additional_client_ids = StringListField()
class UpdateStatsForm(forms.Form):
tasks_to_trigger = forms.CharField(
required=True, max_length=2000,
help_text='Coma separated items.',
widget=forms.TextInput(attrs={'size': '100'}))
### Exceptions ###
class InvalidIncomingEmailError(Exception):
"""Exception raised by incoming mail handler when a problem occurs."""
### Helper functions ###
def _random_bytes(n):
"""Helper returning a string of random bytes of given length."""
return ''.join(map(chr, (random.randrange(256) for i in xrange(n))))
def _clean_int(value, default, min_value=None, max_value=None):
"""Helper to cast value to int and to clip it to min or max_value.
Args:
value: Any value (preferably something that can be casted to int).
default: Default value to be used when type casting fails.
min_value: Minimum allowed value (default: None).
max_value: Maximum allowed value (default: None).
Returns:
An integer between min_value and max_value.
"""
if not isinstance(value, (int, long)):
try:
value = int(value)
except (TypeError, ValueError):
value = default
if min_value is not None:
value = max(min_value, value)
if max_value is not None:
value = min(value, max_value)
return value
### Request handlers ###
def index(request):
"""/ - Show a list of review issues"""
if request.user is None:
return view_all(request, index_call=True)
else:
return mine(request)
DEFAULT_LIMIT = 20
def _url(path, **kwargs):
"""Format parameters for query string.
Args:
path: Path of URL.
kwargs: Keyword parameters are treated as values to add to the query
parameter of the URL. If empty no query parameters will be added to
path and '?' omitted from the URL.
"""
if kwargs:
encoded_parameters = urllib.urlencode(kwargs)
if path.endswith('?'):
# Trailing ? on path. Append parameters to end.
return '%s%s' % (path, encoded_parameters)
elif '?' in path:
# Append additional parameters to existing query parameters.
return '%s&%s' % (path, encoded_parameters)
else:
# Add query parameters to path with no query parameters.
return '%s?%s' % (path, encoded_parameters)
else:
return path
def _inner_paginate(request, issues, template, extra_template_params):
"""Display paginated list of issues.
Takes care of the private bit.
Args:
request: Request containing offset and limit parameters.
issues: Issues to be displayed.
template: Name of template that renders issue page.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
visible_issues = [i for i in issues if i.view_allowed]
_optimize_draft_counts(visible_issues)
_load_users_for_issues(visible_issues)
params = {
'issues': visible_issues,
'limit': None,
'newest': None,
'prev': None,
'next': None,
'nexttext': '',
'first': '',
'last': '',
}
if extra_template_params:
params.update(extra_template_params)
return respond(request, template, params)
def _paginate_issues(page_url,
request,
query,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
offset = _clean_int(request.GET.get('offset'), 0, 0)
limit = _clean_int(request.GET.get('limit'), DEFAULT_LIMIT, 1, 100)
nav_parameters = {'limit': str(limit)}
if extra_nav_parameters is not None:
nav_parameters.update(extra_nav_parameters)
params = {
'limit': limit,
'first': offset + 1,
'nexttext': 'Older',
}
# Fetch one more to see if there should be a 'next' link
logging.info('query during pagination is %r', query)
issues = query.fetch(limit+1, offset=offset)
if len(issues) > limit:
del issues[limit:]
params['next'] = _url(page_url, offset=offset + limit, **nav_parameters)
params['last'] = len(issues) > 1 and offset+len(issues) or None
if offset > 0:
params['prev'] = _url(page_url, offset=max(0, offset - limit),
**nav_parameters)
if offset > limit:
params['newest'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def _paginate_issues_with_cursor(page_url,
request,
query,
cursor,
limit,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues using a cursor instead of offset.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues
cursor: cursor object passed to web form and back again.
limit: Maximum number of issues to return.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
issues, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor)
nav_parameters = {}
if extra_nav_parameters:
nav_parameters.update(extra_nav_parameters)
nav_parameters['cursor'] = next_cursor.urlsafe() if next_cursor else ''
params = {
'limit': limit,
'cursor': nav_parameters['cursor'],
'nexttext': 'Next',
}
if has_more:
params['next'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def view_all(request, index_call=False):
"""/all - Show a list of up to DEFAULT_LIMIT recent issues."""
closed = request.GET.get('closed', '')
if closed in ('0', 'false'):
closed = False
elif closed in ('1', 'true'):
closed = True
elif index_call:
# for index we display only open issues by default
closed = False
else:
closed = None
nav_parameters = {}
if closed is not None:
nav_parameters['closed'] = int(closed)
query = models.Issue.query(
models.Issue.private == False).order(-models.Issue.modified)
if closed is not None:
# return only opened or closed issues
query = query.filter(models.Issue.closed == closed)
return _paginate_issues(reverse(view_all),
request,
query,
'all.html',
extra_nav_parameters=nav_parameters,
extra_template_params=dict(closed=closed))
def _optimize_draft_counts(issues):
"""Force _num_drafts to zero for issues that are known to have no drafts.
Args:
issues: list of model.Issue instances.
This inspects the drafts attribute of the current user's Account
instance, and forces the draft count to zero of those issues in the
list that aren't mentioned there.
If there is no current user, all draft counts are forced to 0.
"""
account = models.Account.current_user_account
if account is None:
issue_ids = None
else:
issue_ids = account.drafts
for issue in issues:
if issue_ids is None or issue.key.id() not in issue_ids:
issue._num_drafts = issue._num_drafts or {}
if account:
issue._num_drafts[account.email] = 0
@deco.login_required
def mine(request):
"""/mine - Show a list of issues created by the current user."""
request.user_to_show = request.user
return _show_user(request)
@deco.login_required
def starred(request):
"""/starred - Show a list of issues starred by the current user."""
stars = models.Account.current_user_account.stars
if not stars:
issues = []
else:
starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]
issues = [issue for issue in ndb.get_multi(starred_issue_keys)
if issue and issue.view_allowed]
_load_users_for_issues(issues)
_optimize_draft_counts(issues)
return respond(request, 'starred.html', {'issues': issues})
def _load_users_for_issues(issues):
"""Load all user links for a list of issues in one go."""
user_dict = {}
for i in issues:
for e in i.reviewers + i.cc + [i.owner.email()]:
# keeping a count lets you track total vs. distinct if you want
user_dict[e] = user_dict.setdefault(e, 0) + 1
library.get_links_for_users(user_dict.keys())
@deco.user_key_required
def show_user(request):
"""/user - Show the user's dashboard"""
return _show_user(request)
def _show_user(request):
user = request.user_to_show
if user == request.user:
draft_query = models.Comment.query(
models.Comment.draft == True, models.Comment.author == request.user)
draft_issue_keys = {
draft_key.parent().parent().parent()
for draft_key in draft_query.fetch(100, keys_only=True)}
draft_issues = ndb.get_multi(draft_issue_keys)
# Reduce the chance of someone trying to block himself.
show_block = False
else:
draft_issues = draft_issue_keys = []
show_block = request.user_is_admin
my_issues = [
issue for issue in models.Issue.query(
models.Issue.closed == False, models.Issue.owner == user).order(
-models.Issue.modified).fetch(100)
if issue.key not in draft_issue_keys and issue.view_allowed]
review_issues = [
issue for issue in models.Issue.query(
models.Issue.closed == False,
models.Issue.reviewers == user.email().lower()).order(
-models.Issue.modified).fetch(100)
if (issue.key not in draft_issue_keys and issue.owner != user
and issue.view_allowed)]
earliest_closed = datetime.datetime.utcnow() - datetime.timedelta(days=7)
closed_issues = [
issue for issue in models.Issue.query(
models.Issue.closed == True,
models.Issue.modified > earliest_closed,
models.Issue.owner == user).order(
-models.Issue.modified).fetch(100)
if issue.key not in draft_issue_keys and issue.view_allowed]
cc_issues = [
issue for issue in models.Issue.query(
models.Issue.closed == False, models.Issue.cc == user.email()).order(
-models.Issue.modified).fetch(100)
if (issue.key not in draft_issue_keys and issue.owner != user
and issue.view_allowed)]
all_issues = my_issues + review_issues + closed_issues + cc_issues
# Some of these issues may not have accurate updates_for information,
# so ask each issue to update itself.
futures = []
for issue in itertools.chain(draft_issues, all_issues):
ret = issue.calculate_and_save_updates_if_None()
if ret is not None:
futures.append(ret)
for f in futures:
f.get_result()
# When a CL is sent from upload.py using --send_mail we create an empty
# message. This might change in the future, either by not adding an empty
# message or by populating the message with the content of the email
# that was sent out.
outgoing_issues = [issue for issue in my_issues if issue.num_messages]
unsent_issues = [issue for issue in my_issues if not issue.num_messages]
_load_users_for_issues(all_issues)
_optimize_draft_counts(all_issues)
account = models.Account.get_account_for_user(request.user_to_show)
return respond(request, 'user.html',
{'viewed_account': account,
'outgoing_issues': outgoing_issues,
'unsent_issues': unsent_issues,
'review_issues': review_issues,
'closed_issues': closed_issues,
'cc_issues': cc_issues,
'draft_issues': draft_issues,
'show_block': show_block,
})
@deco.require_methods('POST')
@deco.login_required
@deco.patchset_required
@deco.xsrf_required
def edit_patchset_title(request):
"""/<issue>/edit_patchset_title - Edit the specified patchset's title."""
if request.user.email().lower() != request.issue.owner.email():
return HttpResponseBadRequest(
'Only the issue owner can edit patchset titles')
patchset = request.patchset
patchset.message = request.POST.get('patchset_title')
patchset.put()
return HttpResponse('OK', content_type='text/plain')
@deco.admin_required
@deco.user_key_required
@deco.xsrf_required
def block_user(request):
"""/user/<user>/block - Blocks a specific user."""
account = models.Account.get_account_for_user(request.user_to_show)
if request.method == 'POST':
form = BlockForm(request.POST)
if form.is_valid():
account.blocked = form.cleaned_data['blocked']
logging.debug(
'Updating block bit to %s for user %s',
account.blocked,
account.email)
account.put()
if account.blocked:
# Remove user from existing issues so that he doesn't participate in
# email communication anymore.
tbd = {}
email = account.user.email()
query = models.Issue.query(models.Issue.reviewers == email)
for issue in query:
issue.reviewers.remove(email)
issue.calculate_updates_for()
tbd[issue.key] = issue
# look for issues where blocked user is in cc only
query = models.Issue.query(models.Issue.cc == email)
for issue in query:
if issue.key in tbd:
# Update already changed instance instead. This happens when the
# blocked user is in both reviewers and ccs.
issue = tbd[issue.key]
issue.cc.remove(account.user.email())
tbd[issue.key] = issue
ndb.put_multi(tbd.values())
else:
form = BlockForm()
form.initial['blocked'] = account.blocked
templates = {
'viewed_account': account,
'form': form,
}
return respond(request, 'block_user.html', templates)
@deco.login_required
@deco.xsrf_required
def use_uploadpy(request):
"""Show an intermediate page about upload.py."""
if request.method == 'POST':
return HttpResponseRedirect(reverse(customized_upload_py))
return respond(request, 'use_uploadpy.html')
@deco.require_methods('POST')
@deco.upload_required
def upload(request):
"""/upload - Used by upload.py to create a new Issue and add PatchSet's to
existing Issues.
This generates a text/plain response.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Login required', status=401)
# Check against old upload.py usage.
if request.POST.get('num_parts') > 1:
return HttpTextResponse('Upload.py is too old, get the latest version.')
form = UploadForm(request.POST, request.FILES)
issue = None
patchset = None
if form.is_valid():
issue_id = form.cleaned_data['issue']
if issue_id:
action = 'updated'
issue = models.Issue.get_by_id(issue_id)
if issue is None:
form.errors['issue'] = ['No issue exists with that id (%s)' %
issue_id]
elif not form.cleaned_data.get('content_upload'):
form.errors['issue'] = ['Base files upload required for that issue.']
issue = None
else:
if not issue.edit_allowed:
form.errors['user'] = ['You (%s) don\'t own this issue (%s)' %
(request.user, issue_id)]
issue = None
elif issue.closed:
form.errors['issue'] = ['This issue is closed (%s)' % (issue_id)]
issue = None
else:
patchset = _add_patchset_from_form(request, issue, form, 'subject',
emails_add_only=True)
if not patchset:
issue = None
else:
action = 'created'
issue, patchset = _make_new(request, form)
if issue is None:
msg = 'Issue creation errors: %s' % repr(form.errors)
else:
msg = ('Issue %s. URL: %s' %
(action,
request.build_absolute_uri(
reverse('show_bare_issue_number', args=[issue.key.id()]))))
if (form.cleaned_data.get('content_upload') or
form.cleaned_data.get('separate_patches')):
# Extend the response message: 2nd line is patchset id.
msg +="\n%d" % patchset.key.id()
if form.cleaned_data.get('content_upload'):
# Extend the response: additional lines are the expected filenames.
issue.put()
base_hashes = {}
for file_info in form.cleaned_data.get('base_hashes').split("|"):
if not file_info:
break
checksum, filename = file_info.split(":", 1)
base_hashes[filename] = checksum
logging.info('base_hashes is %r', base_hashes)
content_entities = []
new_content_entities = []
patches = list(patchset.patches)
logging.info('len(patches) = %r', len(patches))
existing_patches = {}
patchsets = list(issue.patchsets)
if len(patchsets) > 1:
# Only check the last uploaded patchset for speed.
last_patch_list = patchsets[-2].patches
patchsets = None # Reduce memory usage.
for opatch in last_patch_list:
if opatch.content_key:
existing_patches[opatch.filename] = opatch
for patch in patches:
# Check if the base file is already uploaded in another patchset.
if (patch.filename in base_hashes and
patch.filename in existing_patches and
(base_hashes[patch.filename] ==
existing_patches[patch.filename].content_key.get().checksum)):
content_key = existing_patches[patch.filename].content_key
patch.status = existing_patches[patch.filename].status
patch.is_binary = existing_patches[patch.filename].is_binary
patch.content_key = content_key
existing_patches = None # Reduce memory usage.
for patch in patches:
id_string = patch.key.id()
if patch.content_key is not None:
# Base file not needed since we reused a previous upload. Send its
# patch id in case it's a binary file and the new content needs to
# be uploaded. We mark this by prepending 'nobase' to the id.
id_string = "nobase_" + str(id_string)
msg += "\n%s %s" % (id_string, patch.filename)
logging.info('upload response is:\n %s\n', msg)
ndb.put_multi(patches)
return HttpTextResponse(msg)
@ndb.transactional()
def _update_patch(patch_key, content_key, is_current, status, is_binary):
"""Store content-related info in a Patch."""
patch = patch_key.get()
patch.status = status
patch.is_binary = is_binary
if is_current:
patch.patched_content_key = content_key
else:
patch.content_key = content_key
patch.put()
@deco.require_methods('POST')
@deco.patch_required
@deco.upload_required
def upload_content(request):
"""/<issue>/upload_content/<patchset>/<patch> - Upload base file contents.
Used by upload.py to upload base files.
"""
form = UploadContentForm(request.POST, request.FILES)
if not form.is_valid():
return HttpTextResponse(
'ERROR: Upload content errors:\n%s' % repr(form.errors))
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Error: Login required', status=401)
if not request.issue.edit_allowed:
return HttpTextResponse('ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key.id()))
patch = request.patch
if form.cleaned_data['is_current']:
if patch.patched_content_key:
return HttpTextResponse('ERROR: Already have current content.')
else:
if patch.content_key:
return HttpTextResponse('ERROR: Already have base content.')
content = models.Content(is_uploaded=True, parent=patch.key)
if form.cleaned_data['file_too_large']:
content.file_too_large = True
else:
data = form.get_uploaded_content()
checksum = md5.new(data).hexdigest()
if checksum != request.POST.get('checksum'):
return HttpTextResponse('ERROR: Checksum mismatch.')
if form.cleaned_data['is_binary']:
content.data = data
else:
content.text = utils.to_dbtext(utils.unify_linebreaks(data))
content.checksum = checksum
for try_number in xrange(DB_WRITE_TRIES):
try:
content.put()
_update_patch(
patch.key, content.key, form.cleaned_data['is_current'],
form.cleaned_data['status'], form.cleaned_data['is_binary'])
return HttpTextResponse('OK')
except db.TransactionFailedError as err:
if not err.message.endswith('Please try again.'):
logging.exception(err)
# AppEngine datastore cannot write to the same entity group rapidly.
time.sleep(DB_WRITE_PAUSE + try_number * random.random())
return HttpTextResponse('Error: could not store data', status=500)
@deco.require_methods('POST')
@deco.patchset_required
@deco.upload_required
def upload_patch(request):
"""/<issue>/upload_patch/<patchset> - Upload patch to patchset.
Used by upload.py to upload a patch when the diff is too large to upload all
together.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Error: Login required', status=401)
if not request.issue.edit_allowed:
return HttpTextResponse(
'ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key.id()))
form = UploadPatchForm(request.POST, request.FILES)
if not form.is_valid():
return HttpTextResponse(
'ERROR: Upload patch errors:\n%s' % repr(form.errors))
patchset = request.patchset
if patchset.data:
return HttpTextResponse(
'ERROR: Can\'t upload patches to patchset with data.')
text = utils.to_dbtext(utils.unify_linebreaks(form.get_uploaded_patch()))
patch = models.Patch(
patchset_key=patchset.key, text=text,
filename=form.cleaned_data['filename'], parent=patchset.key)
patch.put()
msg = 'OK\n' + str(patch.key.id())
return HttpTextResponse(msg)
@deco.require_methods('POST')
@deco.issue_editor_required
@deco.upload_required
def upload_complete(request, patchset_id=None):
"""/<issue>/upload_complete/<patchset> - Patchset upload is complete.
/<issue>/upload_complete/ - used when no base files are uploaded.
The following POST parameters are handled:
- send_mail: If 'yes', a notification mail will be send.
- attach_patch: If 'yes', the patches will be attached to the mail.
"""
if patchset_id is not None:
patchset = models.PatchSet.get_by_id(int(patchset_id),
parent=request.issue.key)
if patchset is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % patchset_id, status=403)
# Add delta calculation task.
# TODO(jrobbins): If this task has transient failures, consider using cron.
taskqueue.add(url=reverse(task_calculate_delta),
params={'key': patchset.key.urlsafe()},
queue_name='deltacalculation')
else:
patchset = None
# Check for completeness
errors = []
if patchset is not None:
query = models.Patch.query(
models.Patch.is_binary == False, models.Patch.status == None,
ancestor=patchset.key)
# All uploaded files have a status, any with status==None are missing.
if query.count() > 0:
errors.append('Base files missing.')
if errors:
msg = ('The following errors occured:\n%s\n'
'Try to upload the changeset again.' % '\n'.join(errors))
logging.error('Returning error:\n %s', msg)
return HttpTextResponse(msg, status=500)
# Create (and send) a message if needed.
if request.POST.get('send_mail') == 'yes' or request.POST.get('message'):
msg = _make_message(request, request.issue, request.POST.get('message', ''),
send_mail=(request.POST.get('send_mail', '') == 'yes'))
request.issue.put()
msg.put()
return HttpTextResponse('OK')
def _make_new(request, form):
"""Creates new issue and fill relevant fields from given form data.
Sends notification about created issue (if requested with send_mail param).
Returns (Issue, PatchSet) or (None, None).
"""
if not form.is_valid():
return (None, None)
account = models.Account.get_account_for_user(request.user)
if account.blocked:
# Early exit for blocked accounts.
return (None, None)
data_url = _get_data_url(form)
if data_url is None:
return (None, None)
data, url, separate_patches = data_url
reviewers = _get_emails(form, 'reviewers')
if not form.is_valid() or reviewers is None:
return (None, None)
cc = _get_emails(form, 'cc')
if not form.is_valid():
return (None, None)
base = form.get_base()
if base is None:
return (None, None)
first_issue_id, _ = models.Issue.allocate_ids(1)
issue_key = ndb.Key(models.Issue, first_issue_id)
issue = models.Issue(subject=form.cleaned_data['subject'],
description=form.cleaned_data['description'],
project=form.cleaned_data['project'],
base=base,
repo_guid=form.cleaned_data.get('repo_guid', None),
reviewers=reviewers,
cc=cc,
private=form.cleaned_data.get('private', False),
n_comments=0,
key=issue_key)
issue.put()
first_ps_id, _ = models.PatchSet.allocate_ids(1, parent=issue.key)
ps_key = ndb.Key(models.PatchSet, first_ps_id, parent=issue.key)
patchset = models.PatchSet(issue_key=issue.key, data=data, url=url, key=ps_key)
patchset.put()
if not separate_patches:
try:
patches = engine.ParsePatchSet(patchset)
except:
# catch all exceptions happening in engine.ParsePatchSet,
# engine.SplitPatch. With malformed diffs a variety of exceptions could
# happen there.
logging.exception('Exception during patch parsing')
patches = []
if not patches:
patchset.key.delete()
issue.key.delete()
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return (None, None)
ndb.put_multi(patches)
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, '', '', True)
issue.put()
msg.put()
return (issue, patchset)
def _get_data_url(form):
"""Helper for _make_new().
Args:
form: Django form object.
Returns:
3-tuple (data, url, separate_patches).
data: the diff content, if available.
url: the url of the diff, if given.
separate_patches: True iff the patches will be uploaded separately for
each file.
"""
cleaned_data = form.cleaned_data
data = cleaned_data['data']
url = cleaned_data.get('url')
separate_patches = cleaned_data.get('separate_patches')
if not (data or url or separate_patches):
form.errors['data'] = ['You must specify a URL or upload a file (< 1 MB).']
return None
if data and url:
form.errors['data'] = ['You must specify either a URL or upload a file '
'but not both.']
return None
if separate_patches and (data or url):
form.errors['data'] = ['If the patches will be uploaded separately later, '
'you can\'t send some data or a url.']
return None
if data is not None:
data = db.Blob(utils.unify_linebreaks(data.read()))
url = None
elif url:
try:
fetch_result = urlfetch.fetch(url, validate_certificate=True)
except Exception as err:
form.errors['url'] = [str(err)]
return None
if fetch_result.status_code != 200:
form.errors['url'] = ['HTTP status code %s' % fetch_result.status_code]
return None
data = db.Blob(utils.unify_linebreaks(fetch_result.content))
return data, url, separate_patches
def _add_patchset_from_form(request, issue, form, message_key='message',
emails_add_only=False):
"""Helper for upload()."""
if form.is_valid():
data_url = _get_data_url(form)
if not form.is_valid():
return None
account = models.Account.get_account_for_user(request.user)
if account.blocked:
return None
if not issue.edit_allowed:
# This check is done at each call site but check again as a safety measure.
return None
data, url, separate_patches = data_url
message = form.cleaned_data[message_key]
first_id, _ = models.PatchSet.allocate_ids(1, parent=issue.key)
ps_key = ndb.Key(models.PatchSet, first_id, parent=issue.key)
patchset = models.PatchSet(
issue_key=issue.key, message=message, data=data, url=url, key=ps_key)
patchset.put()
if not separate_patches:
try:
patches = engine.ParsePatchSet(patchset)
except:
logging.exception('Exception during patchset parsing')
patches = []
if not patches:
patchset.key.delete()
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return None
ndb.put_multi(patches)
if emails_add_only:
emails = _get_emails(form, 'reviewers')
if not form.is_valid():
return None
issue.reviewers += [reviewer for reviewer in emails
if reviewer not in issue.reviewers]
emails = _get_emails(form, 'cc')
if not form.is_valid():
return None
issue.cc += [cc for cc in emails if cc not in issue.cc]
else:
issue.reviewers = _get_emails(form, 'reviewers')
issue.cc = _get_emails(form, 'cc')
issue.calculate_updates_for()
issue.put()
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, message, '', True)
issue.put()
msg.put()
return patchset
def _get_emails(form, label):
"""Helper to return the list of reviewers, or None for error."""
raw_emails = form.cleaned_data.get(label)
if raw_emails:
return _get_emails_from_raw(raw_emails.split(','), form=form, label=label)
return []
def _get_emails_from_raw(raw_emails, form=None, label=None):
emails = []
for email in raw_emails:
email = email.strip()
if email:
try:
if '@' not in email:
account = models.Account.get_account_for_nickname(email)
if account is None:
raise db.BadValueError('Unknown user: %s' % email)
db_email = account.user.email().lower()
elif email.count('@') != 1:
raise db.BadValueError('Invalid email address: %s' % email)
else:
_, tail = email.split('@')
if '.' not in tail:
raise db.BadValueError('Invalid email address: %s' % email)
db_email = email.lower()
except db.BadValueError as err:
if form:
form.errors[label] = [unicode(err)]
return None
if db_email not in emails:
emails.append(db_email)
# Remove blocked accounts
for account in models.Account.get_multiple_accounts_by_email(emails).values():
if account.blocked:
try:
emails.remove(account.email)
except IndexError:
pass
return emails
@deco.issue_required
def show(request):
"""/<issue> - Show an issue."""
patchsets = request.issue.get_patchset_info(request.user, None)
last_patchset = first_patch = None
if patchsets:
last_patchset = patchsets[-1]
if last_patchset.patches:
first_patch = last_patchset.patches[0]
messages = []
has_draft_message = False
for msg in request.issue.messages:
if not msg.draft:
messages.append(msg)
elif msg.draft and request.user and msg.sender == request.user.email():
has_draft_message = True
num_patchsets = len(patchsets)
return respond(request, 'issue.html', {
'first_patch': first_patch,
'has_draft_message': has_draft_message,
'is_editor': request.issue.edit_allowed,
'issue': request.issue,
'last_patchset': last_patchset,
'messages': messages,
'num_patchsets': num_patchsets,
'patchsets': patchsets,
})
@deco.patchset_required
def patchset(request):
"""/patchset/<key> - Returns patchset information."""
patchsets = request.issue.get_patchset_info(
request.user, request.patchset.key.id())
for ps in patchsets:
if ps.key.id() == request.patchset.key.id():
patchset = ps
return respond(request, 'patchset.html',
{'issue': request.issue,
'patchset': request.patchset,
'patchsets': patchsets,
'is_editor': request.issue.edit_allowed,
})
@deco.login_required
def account(request):
"""/account/?q=blah&limit=10×tamp=blah - Used for autocomplete."""
def searchAccounts(prop, domain, added, response):
prefix = request.GET.get('q').lower()
limit = _clean_int(request.GET.get('limit'), 10, 10, 100)
accounts_query = models.Account.query(
prop >= prefix, prop < prefix + u"\ufffd").order(prop)
for account in accounts_query:
if account.blocked:
continue
if account.key in added:
continue
if domain and not account.email.endswith(domain):
continue
if len(added) >= limit:
break
added.add(account.key)
response += '%s (%s)\n' % (account.email, account.nickname)
return added, response
added = set()
response = ''
domain = os.environ['AUTH_DOMAIN']
if domain != 'gmail.com':
# 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running
# on appspot.com and shouldn't prioritize the custom domain.
added, response = searchAccounts(
models.Account.lower_email, domain, added, response)
added, response = searchAccounts(
models.Account.lower_nickname, domain, added, response)
added, response = searchAccounts(
models.Account.lower_nickname, "", added, response)
added, response = searchAccounts(
models.Account.lower_email, "", added, response)
return HttpTextResponse(response)
@deco.issue_editor_required
@deco.xsrf_required
def edit(request):
"""/<issue>/edit - Edit an issue."""
issue = request.issue
base = issue.base
if request.method != 'POST':
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in issue.reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc)
for cc in issue.cc]
form = EditLocalBaseForm(initial={'subject': issue.subject,
'description': issue.description,
'base': base,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'closed': issue.closed,
'private': issue.private,
})
return respond(request, 'edit.html', {
'issue': issue,
'form': form,
'offer_delete': (issue.owner == request.user
or auth_utils.is_current_user_admin())
})
form = EditLocalBaseForm(request.POST)
if form.is_valid():
reviewers = _get_emails(form, 'reviewers')
if form.is_valid():
cc = _get_emails(form, 'cc')
if not form.is_valid():
return respond(request, 'edit.html', {'issue': issue, 'form': form})
cleaned_data = form.cleaned_data
was_closed = issue.closed
issue.subject = cleaned_data['subject']
issue.description = cleaned_data['description']
issue.closed = cleaned_data['closed']
issue.private = cleaned_data.get('private', False)
base_changed = (issue.base != base)
issue.base = base
issue.reviewers = reviewers
issue.cc = cc
if base_changed:
for patchset in issue.patchsets:
ndb.transaction(lambda: _delete_cached_contents(list(patchset.patches)))
issue.calculate_updates_for()
issue.put()
return HttpResponseRedirect(reverse(show, args=[issue.key.id()]))
def _delete_cached_contents(patch_list):
"""Transactional helper for edit() to delete cached contents."""
# TODO(guido): No need to do this in a transaction.
patches = []
content_keys = []
for patch in patch_list:
try:
content_key = patch.content_key
except db.Error:
content_key = None
try:
patched_content_key = patch.patched_content_key
except db.Error:
patched_content_key = None
if content_key is not None:
content_keys.append(content_key)
if patched_content_key is not None:
content_keys.append(patched_content_key)
patch.content_key = None
patch.patched_content_key = None
patches.append(patch)
if content_keys:
logging.info("Deleting %d contents", len(content_keys))
ndb.delete_multi(content_keys)
if patches:
logging.info("Updating %d patches", len(patches))
ndb.put_multi(patches)
@deco.require_methods('POST')
@deco.issue_editor_required
@deco.xsrf_required
def delete(request):
"""/<issue>/delete - Delete an issue. There is no way back."""
issue = request.issue
tbd = [issue]
for cls in [models.PatchSet, models.Patch, models.Comment,
models.Message, models.Content]:
tbd += cls.query(ancestor=issue.key)
ndb.delete_multi(entity.key for entity in tbd)
return HttpResponseRedirect(reverse(mine))
@deco.require_methods('POST')
@deco.patchset_editor_required
@deco.xsrf_required
def delete_patchset(request):
"""/<issue>/patch/<patchset>/delete - Delete a patchset.
There is no way back.
"""
request.patchset.nuke()
return HttpResponseRedirect(reverse(show, args=[request.issue.key.id()]))
@deco.require_methods('POST')
@deco.issue_editor_required
@deco.xsrf_required
def close(request):
"""/<issue>/close - Close an issue."""
issue = request.issue
issue.closed = True
if request.method == 'POST':
new_description = request.POST.get('description')
if new_description:
issue.description = new_description
issue.put()
return HttpTextResponse('Closed')
@deco.require_methods('POST')
@deco.issue_required
@deco.upload_required
def mailissue(request):
"""/<issue>/mail - Send mail for an issue.
This URL is deprecated and shouldn't be used anymore. However,
older versions of upload.py or wrapper scripts still may use it.
"""
if not request.issue.edit_allowed:
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
issue = request.issue
msg = _make_message(request, issue, '', '', True)
issue.put()
msg.put()
return HttpTextResponse('OK')
@deco.access_control_allow_origin_star
@deco.patchset_required
def download(request):
"""/download/<issue>_<patchset>.diff - Download a patch set."""
if request.patchset.data is None:
return HttpTextResponse(
'Patch set (%s) is too large.' % request.patchset.key.id(),
status=404)
padding = ''
user_agent = request.META.get('HTTP_USER_AGENT')
if user_agent and 'MSIE' in user_agent:
# Add 256+ bytes of padding to prevent XSS attacks on Internet Explorer.
padding = ('='*67 + '\n') * 4
return HttpTextResponse(padding + request.patchset.data)
@deco.patchset_required
def tarball(request):
"""/tarball/<issue>/<patchset>/[lr] - Returns a .tar.bz2 file
containing a/ and b/ trees of the complete files for the entire patchset."""
patches = (models.Patch
.query(models.Patch.patchset_key == request.patchset.key)
.order(models.Patch.filename)
.fetch(1000))
temp = tempfile.TemporaryFile()
tar = tarfile.open(mode="w|bz2", fileobj=temp)
def add_entry(prefix, content):
data = content.data
if data is None:
data = content.text
if isinstance(data, unicode):
data = data.encode("utf-8", "replace")
if data is None:
return
info = tarfile.TarInfo(prefix + patch.filename)
info.size = len(data)
# TODO(adonovan): set SYMTYPE/0755 when Rietveld supports symlinks.
info.type = tarfile.REGTYPE
info.mode = 0644
# datetime->time_t
delta = request.patchset.modified - datetime.datetime(1970, 1, 1)
info.mtime = int(delta.days * 86400 + delta.seconds)
tar.addfile(info, fileobj=StringIO(data))
for patch in patches:
if not patch.no_base_file:
try:
add_entry('a/', patch.get_content()) # before
except FetchError: # I/O problem?
logging.exception('tarball: patch(%s, %s).get_content failed' %
(patch.key.id(), patch.filename))
try:
add_entry('b/', patch.get_patched_content()) # after
except FetchError: # file deletion? I/O problem?
logging.exception('tarball: patch(%s, %s).get_patched_content failed' %
(patch.key.id(), patch.filename))
tar.close()
temp.flush()
wrapper = FileWrapper(temp)
response = HttpResponse(wrapper, mimetype='application/x-gtar')
response['Content-Disposition'] = (
'attachment; filename=patch%s_%s.tar.bz2' % (request.issue.key.id(),
request.patchset.key.id()))
response['Content-Length'] = temp.tell()
temp.seek(0)
return response
@deco.issue_required
@deco.upload_required
def description(request):
"""/<issue>/description - Gets/Sets an issue's description.
Used by upload.py or similar scripts.
"""
if request.method != 'POST':
description = request.issue.description or ""
return HttpTextResponse(description)
if not request.issue.edit_allowed:
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
issue = request.issue
issue.description = request.POST.get('description')
issue.put()
return HttpTextResponse('')
@deco.issue_required
@deco.upload_required
@deco.json_response
def fields(request):
"""/<issue>/fields - Gets/Sets fields on the issue.
Used by upload.py or similar scripts for partial updates of the issue
without a patchset..
"""
# Only recognizes a few fields for now.
if request.method != 'POST':
fields = request.GET.getlist('field')
response = {}
if 'reviewers' in fields:
response['reviewers'] = request.issue.reviewers or []
if 'description' in fields:
response['description'] = request.issue.description
if 'subject' in fields:
response['subject'] = request.issue.subject
return response
if not request.issue.edit_allowed:
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
fields = json.loads(request.POST.get('fields'))
issue = request.issue
if 'description' in fields:
issue.description = fields['description']
if 'reviewers' in fields:
issue.reviewers = _get_emails_from_raw(fields['reviewers'])
issue.calculate_updates_for()
if 'subject' in fields:
issue.subject = fields['subject']
issue.put()
return HttpTextResponse('')
@deco.patch_required
def patch(request):
"""/<issue>/patch/<patchset>/<patch> - View a raw patch."""
return patch_helper(request)
def patch_helper(request, nav_type='patch'):
"""Returns a unified diff.
Args:
request: Django Request object.
nav_type: the navigation used in the url (i.e. patch/diff/diff2). Normally
the user looks at either unified or side-by-side diffs at one time, going
through all the files in the same mode. However, if side-by-side is not
available for some files, we temporarly switch them to unified view, then
switch them back when we can. This way they don't miss any files.
Returns:
Whatever respond() returns.
"""
_add_next_prev(request.patchset, request.patch)
request.patch.nav_type = nav_type
parsed_lines = patching.ParsePatchToLines(request.patch.lines)
if parsed_lines is None:
return HttpTextResponse('Can\'t parse the patch to lines', status=404)
rows = engine.RenderUnifiedTableRows(request, parsed_lines)
return respond(request, 'patch.html',
{'patch': request.patch,
'patchset': request.patchset,
'view_style': 'patch',
'rows': rows,
'issue': request.issue,
'context': _clean_int(request.GET.get('context'), -1),
'column_width': _clean_int(request.GET.get('column_width'),
None),
})
@deco.access_control_allow_origin_star
@deco.image_required
def image(request):
"""/<issue>/content/<patchset>/<patch>/<content> - Return patch's content."""
response = HttpResponse(request.content.data, content_type=request.mime_type)
filename = re.sub(
r'[^\w\.]', '_', request.patch.filename.encode('ascii', 'replace'))
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response['Cache-Control'] = 'no-cache, no-store'
return response
@deco.access_control_allow_origin_star
@deco.patch_required
def download_patch(request):
"""/download/issue<issue>_<patchset>_<patch>.diff - Download patch."""
return HttpTextResponse(request.patch.text)
def _issue_as_dict(issue, messages, request=None):
"""Converts an issue into a dict."""
values = {
'owner': library.get_nickname(issue.owner, True, request),
'owner_email': issue.owner.email(),
'modified': str(issue.modified),
'created': str(issue.created),
'closed': issue.closed,
'cc': issue.cc,
'reviewers': issue.reviewers,
'patchsets': [p.key.id() for p in issue.patchsets],
'description': issue.description,
'subject': issue.subject,
'project': issue.project,
'issue': issue.key.id(),
'base_url': issue.base,
'private': issue.private,
}
if messages:
values['messages'] = sorted(
({
'sender': m.sender,
'recipients': m.recipients,
'date': str(m.date),
'text': m.text,
'approval': m.approval,
'disapproval': m.disapproval,
}
for m in models.Message.query(ancestor=issue.key)),
key=lambda x: x['date'])
return values
def _patchset_as_dict(patchset, comments, request):
"""Converts a patchset into a dict."""
issue = patchset.issue_key.get()
values = {
'patchset': patchset.key.id(),
'issue': issue.key.id(),
'owner': library.get_nickname(issue.owner, True, request),
'owner_email': issue.owner.email(),
'message': patchset.message,
'url': patchset.url,
'created': str(patchset.created),
'modified': str(patchset.modified),
'num_comments': patchset.num_comments,
'files': {},
}
for patch in models.Patch.query(models.Patch.patchset_key == patchset.key):
# num_comments and num_drafts are left out for performance reason:
# they cause a datastore query on first access. They could be added
# optionally if the need ever arises.
values['files'][patch.filename] = {
'id': patch.key.id(),
'is_binary': patch.is_binary,
'no_base_file': patch.no_base_file,
'num_added': patch.num_added,
'num_chunks': patch.num_chunks,
'num_removed': patch.num_removed,
'status': patch.status,
'property_changes': '\n'.join(patch.property_changes),
}
if comments:
visible_comments = []
requester_email = request.user.email() if request.user else 'no email'
query = (models.Comment
.query(models.Comment.patch_key == patch.key)
.order(models.Comment.date))
for c in query:
if not c.draft or requester_email == c.author.email():
visible_comments.append({
'author': library.get_nickname(c.author, True, request),
'author_email': c.author.email(),
'date': str(c.date),
'lineno': c.lineno,
'text': c.text,
'left': c.left,
'draft': c.draft,
'message_id': c.message_id,
})
values['files'][patch.filename]['messages'] = visible_comments
return values
@deco.access_control_allow_origin_star
@deco.issue_required
@deco.json_response
def api_issue(request):
"""/api/<issue> - Gets issue's data as a JSON-encoded dictionary."""
messages = request.GET.get('messages', 'false').lower() == 'true'
values = _issue_as_dict(request.issue, messages, request)
return values
@deco.access_control_allow_origin_star
@deco.patchset_required
@deco.json_response
def api_patchset(request):
"""/api/<issue>/<patchset> - Gets an issue's patchset data as a JSON-encoded
dictionary.
"""
comments = request.GET.get('comments', 'false').lower() == 'true'
values = _patchset_as_dict(request.patchset, comments, request)
return values
def _get_context_for_user(request):
"""Returns the context setting for a user.
The value is validated against models.CONTEXT_CHOICES.
If an invalid value is found, the value is overwritten with
django_settings.DEFAULT_CONTEXT.
"""
get_param = request.GET.get('context') or None
if 'context' in request.GET and get_param is None:
# User wants to see whole file. No further processing is needed.
return get_param
if request.user:
account = models.Account.current_user_account
default_context = account.default_context
else:
default_context = django_settings.DEFAULT_CONTEXT
context = _clean_int(get_param, default_context)
if context is not None and context not in models.CONTEXT_CHOICES:
context = django_settings.DEFAULT_CONTEXT
return context
def _get_column_width_for_user(request):
"""Returns the column width setting for a user."""
if request.user:
account = models.Account.current_user_account
default_column_width = account.default_column_width
else:
default_column_width = django_settings.DEFAULT_COLUMN_WIDTH
column_width = _clean_int(request.GET.get('column_width'),
default_column_width,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
return column_width
@deco.patch_filename_required
def diff(request):
"""/<issue>/diff/<patchset>/<patch> - View a patch as a side-by-side diff"""
if request.patch.no_base_file:
# Can't show side-by-side diff since we don't have the base file. Show the
# unified diff instead.
return patch_helper(request, 'diff')
patchset = request.patchset
patch = request.patch
patchsets = list(request.issue.patchsets)
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
if patch.is_binary:
rows = None
else:
try:
rows = _get_diff_table_rows(request, patch, context, column_width)
except FetchError as err:
return HttpTextResponse(str(err), status=404)
_add_next_prev(patchset, patch)
return respond(request, 'diff.html',
{'issue': request.issue,
'patchset': patchset,
'patch': patch,
'view_style': 'diff',
'rows': rows,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
})
def _get_diff_table_rows(request, patch, context, column_width):
"""Helper function that returns rendered rows for a patch.
Raises:
FetchError if patch parsing or download of base files fails.
"""
chunks = patching.ParsePatchToChunks(patch.lines, patch.filename)
if chunks is None:
raise FetchError('Can\'t parse the patch to chunks')
# Possible FetchErrors are handled in diff() and diff_skipped_lines().
content = request.patch.get_content()
rows = list(engine.RenderDiffTableRows(request, content.lines,
chunks, patch,
context=context,
colwidth=column_width))
if rows and rows[-1] is None:
del rows[-1]
# Get rid of content, which may be bad
if content.is_uploaded and content.text != None:
# Don't delete uploaded content, otherwise get_content()
# will fetch it.
content.is_bad = True
content.text = None
content.put()
else:
content.key.delete()
request.patch.content_key = None
request.patch.put()
return rows
@deco.patch_required
@deco.json_response
def diff_skipped_lines(request, id_before, id_after, where, column_width):
"""/<issue>/diff/<patchset>/<patch> - Returns a fragment of skipped lines.
*where* indicates which lines should be expanded:
'b' - move marker line to bottom and expand above
't' - move marker line to top and expand below
'a' - expand all skipped lines
"""
patch = request.patch
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
try:
rows = _get_diff_table_rows(request, patch, None, column_width)
except FetchError as err:
return HttpTextResponse('Error: %s; please report!' % err, status=500)
return _get_skipped_lines_response(rows, id_before, id_after, where, context)
# there's no easy way to put a control character into a regex, so brute-force it
# this is all control characters except \r, \n, and \t
_badchars_re = re.compile(
r'[\000\001\002\003\004\005\006\007\010\013\014\016\017'
r'\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037]')
def _strip_invalid_xml(s):
"""Remove control chars other than \r\n\t from a string to be put in XML."""
if _badchars_re.search(s):
return ''.join(c for c in s if c >= ' ' or c in '\r\n\t')
else:
return s
def _get_skipped_lines_response(rows, id_before, id_after, where, context):
"""Helper function that returns response data for skipped lines"""
response_rows = []
id_before_start = int(id_before)
id_after_end = int(id_after)
if context is not None:
id_before_end = id_before_start+context
id_after_start = id_after_end-context
else:
id_before_end = id_after_start = None
for row in rows:
m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', row)
if m:
curr_id = int(m.groupdict().get("rowcount"))
# expand below marker line
if (where == 'b'
and curr_id > id_after_start and curr_id <= id_after_end):
response_rows.append(row)
# expand above marker line
elif (where == 't'
and curr_id >= id_before_start and curr_id < id_before_end):
response_rows.append(row)
# expand all skipped lines
elif (where == 'a'
and curr_id >= id_before_start and curr_id <= id_after_end):
response_rows.append(row)
if context is not None and len(response_rows) >= 2*context:
break
# Create a usable structure for the JS part
response = []
response_rows = [_strip_invalid_xml(r) for r in response_rows]
dom = ElementTree.parse(StringIO('<div>%s</div>' % "".join(response_rows)))
for node in dom.getroot().getchildren():
content = [[x.items(), x.text] for x in node.getchildren()]
response.append([node.items(), content])
return response
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)
if ps_left is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_left_id, status=404)
ps_left.issue_key = request.issue.key
ps_right = models.PatchSet.get_by_id(
int(ps_right_id), parent=request.issue.key)
if ps_right is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_right_id, status=404)
ps_right.issue_key = request.issue.key
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset_key = ps_right.key
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.query(
models.Patch.patchset_key == ps_left.key,
models.Patch.filename == patch_filename).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows)
@deco.issue_required
def diff2(request, ps_left_id, ps_right_id, patch_filename):
"""/<issue>/diff2/... - View the delta between two different patch sets."""
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
ps_right = models.PatchSet.get_by_id(
int(ps_right_id), parent=request.issue.key)
patch_right = None
if ps_right:
patch_right = models.Patch.query(
models.Patch.patchset_key == ps_right.key,
models.Patch.filename == patch_filename).get()
if patch_right:
patch_id = patch_right.key.id()
elif patch_filename.isdigit():
# Perhaps it's an ID that's passed in, based on the old URL scheme.
patch_id = int(patch_filename)
else: # patch doesn't exist in this patchset
patch_id = None
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename)
if isinstance(data, HttpResponse) and data.status_code != 302:
return data
patchsets = list(request.issue.patchsets)
if data["patch_right"]:
_add_next_prev2(data["ps_left"], data["ps_right"], data["patch_right"])
return respond(request, 'diff2.html',
{'issue': request.issue,
'ps_left': data["ps_left"],
'patch_left': data["patch_left"],
'ps_right': data["ps_right"],
'patch_right': data["patch_right"],
'rows': data["rows"],
'patch_id': patch_id,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
'filename': patch_filename,
})
@deco.issue_required
@deco.json_response
def diff2_skipped_lines(request, ps_left_id, ps_right_id, patch_id,
id_before, id_after, where, column_width):
"""/<issue>/diff2/... - Returns a fragment of skipped lines"""
column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, 10000,
column_width)
if isinstance(data, HttpResponse) and data.status_code != 302:
return data
return _get_skipped_lines_response(data["rows"], id_before, id_after,
where, context)
def _get_comment_counts(account, patchset):
"""Helper to get comment counts for all patches in a single query.
The helper returns two dictionaries comments_by_patch and
drafts_by_patch with patch key as key and comment count as
value. Patches without comments or drafts are not present in those
dictionaries.
"""
# A key-only query won't work because we need to fetch the patch key
# in the for loop further down.
comment_query = models.Comment.query(ancestor=patchset.key)
# Get all comment counts with one query rather than one per patch.
comments_by_patch = {}
drafts_by_patch = {}
for c in comment_query:
pkey = c.patch_key
if not c.draft:
comments_by_patch[pkey] = comments_by_patch.setdefault(pkey, 0) + 1
elif account and c.author == account.user:
drafts_by_patch[pkey] = drafts_by_patch.setdefault(pkey, 0) + 1
return comments_by_patch, drafts_by_patch
def _add_next_prev(patchset, patch):
"""Helper to add .next and .prev attributes to a patch object."""
patch.prev = patch.next = None
patches = list(patchset.patches)
patchset.patches_cache = patches # Required to render the jump to select.
comments_by_patch, drafts_by_patch = _get_comment_counts(
models.Account.current_user_account, patchset)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch.filename:
found_patch = True
continue
p._num_comments = comments_by_patch.get(p.key, 0)
p._num_drafts = drafts_by_patch.get(p.key, 0)
if not found_patch:
last_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch.prev = last_patch
patch.next = next_patch
patch.prev_with_comment = last_patch_with_comment
patch.next_with_comment = next_patch_with_comment
def _add_next_prev2(ps_left, ps_right, patch_right):
"""Helper to add .next and .prev attributes to a patch object."""
patch_right.prev = patch_right.next = None
patches = list(ps_right.patches)
ps_right.patches_cache = patches # Required to render the jump to select.
n_comments, n_drafts = _get_comment_counts(
models.Account.current_user_account, ps_right)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch_right.filename:
found_patch = True
continue
p._num_comments = n_comments.get(p.key, 0)
p._num_drafts = n_drafts.get(p.key, 0)
if not found_patch:
last_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key.id() in p.delta):
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key.id() in p.delta):
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch_right.prev = last_patch
patch_right.next = next_patch
patch_right.prev_with_comment = last_patch_with_comment
patch_right.next_with_comment = next_patch_with_comment
def _add_or_update_comment(user, issue, patch, lineno, left, text, message_id):
comment = None
if message_id:
comment = models.Comment.get_by_id(message_id, parent=patch.key)
if comment is None or not comment.draft or comment.author != user:
comment = None
message_id = None
if not message_id:
# Prefix with 'z' to avoid key names starting with digits.
message_id = 'z' + binascii.hexlify(_random_bytes(16))
if not text.rstrip():
if comment is not None:
assert comment.draft and comment.author == user
comment.key.delete() # Deletion
comment = None
# Re-query the comment count.
models.Account.current_user_account.update_drafts(issue)
else:
if comment is None:
comment = models.Comment(id=message_id, parent=patch.key)
comment.patch_key = patch.key
comment.lineno = lineno
comment.left = left
comment.text = text
comment.message_id = message_id
comment.put()
# The actual count doesn't matter, just that there's at least one.
models.Account.current_user_account.update_drafts(issue, 1)
return comment
@deco.login_required
@deco.patchset_required
@deco.require_methods('POST')
@deco.json_response
def api_draft_comments(request):
"""/api/<issue>/<patchset>/draft_comments - Store a number of draft
comments for a particular issue and patchset.
This API differs from inline_draft in two ways:
1) api_draft_comments handles multiple comments at once so that
clients can upload draft comments in bulk.
2) api_draft_comments returns a response in JSON rather than
in HTML, which lets clients process the response programmatically.
Note: creating or editing draft comments is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
try:
def sanitize(comment):
patch = models.Patch.get_by_id(int(comment.patch_id),
parent=request.patchset.key)
assert not patch is None
message_id = str(comment.message_id) if message_id in comment else None,
return {
user: request.user,
issue: request.issue,
patch: patch,
lineno: int(comment.lineno),
left: bool(comment.left),
text: str(comment.text),
message_id: message_id,
}
return [
{message_id: _add_or_update_comment(**comment).message_id}
for comment in map(sanitize, json.load(request.data))
]
except Exception as err:
return HttpTextResponse('An error occurred.', status=500)
@deco.require_methods('POST')
def inline_draft(request):
"""/inline_draft - Ajax handler to submit an in-line draft comment.
This wraps _inline_draft(); all exceptions are logged and cause an
abbreviated response indicating something went wrong.
Note: creating or editing draft comments is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
try:
return _inline_draft(request)
except Exception as err:
logging.exception('Exception in inline_draft processing:')
# TODO(guido): return some kind of error instead?
# Return HttpResponse for now because the JS part expects
# a 200 status code.
return HttpHtmlResponse(
'<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _inline_draft(request):
"""Helper to submit an in-line draft comment."""
# TODO(guido): turn asserts marked with XXX into errors
# Don't use @login_required, since the JS doesn't understand redirects.
if not request.user:
# Don't log this, spammers have started abusing this.
return HttpTextResponse('Not logged in')
snapshot = request.POST.get('snapshot')
assert snapshot in ('old', 'new'), repr(snapshot)
left = (snapshot == 'old')
side = request.POST.get('side')
assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)
issue_id = int(request.POST['issue'])
issue = models.Issue.get_by_id(issue_id)
assert issue # XXX
patchset_id = int(request.POST.get('patchset') or
request.POST[side == 'a' and 'ps_left' or 'ps_right'])
patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue.key)
assert patchset # XXX
patch_id = int(request.POST.get('patch') or
request.POST[side == 'a' and 'patch_left' or 'patch_right'])
patch = models.Patch.get_by_id(int(patch_id), parent=patchset.key)
assert patch # XXX
text = request.POST.get('text')
lineno = int(request.POST['lineno'])
message_id = request.POST.get('message_id')
comment = _add_or_update_comment(user=request.user, issue=issue, patch=patch,
lineno=lineno, left=left,
text=text, message_id=message_id)
issue.calculate_draft_count_by_user()
issue_fut = issue.put_async()
query = models.Comment.query(
models.Comment.patch_key == patch.key, models.Comment.lineno == lineno,
models.Comment.left == left).order(models.Comment.date)
comments = list(c for c in query if not c.draft or c.author == request.user)
if comment is not None and comment.author is None:
# Show anonymous draft even though we don't save it
comments.append(comment)
issue_fut.get_result()
if not comments:
return HttpTextResponse(' ')
for c in comments:
c.complete()
return render_to_response('inline_comment.html',
{'user': request.user,
'patch': patch,
'patchset': patchset,
'issue': issue,
'comments': comments,
'lineno': lineno,
'snapshot': snapshot,
'side': side,
},
context_instance=RequestContext(request))
def _get_affected_files(issue, full_diff=False):
"""Helper to return a list of affected files from the latest patchset.
Args:
issue: Issue instance.
full_diff: If true, include the entire diff even if it exceeds 100 lines.
Returns:
2-tuple containing a list of affected files, and the diff contents if it
is less than 100 lines (otherwise the second item is an empty string).
"""
files = []
modified_count = 0
diff = ''
patchsets = list(issue.patchsets)
if len(patchsets):
patchset = patchsets[-1]
for patch in patchset.patches:
file_str = ''
if patch.status:
file_str += patch.status + ' '
file_str += patch.filename
files.append(file_str)
# No point in loading patches if the patchset is too large for email.
if full_diff or modified_count < 100:
modified_count += patch.num_added + patch.num_removed
if full_diff or modified_count < 100:
diff = patchset.data
return files, diff
def _get_mail_template(request, issue, full_diff=False):
"""Helper to return the template and context for an email.
If this is the first email sent by the owner, a template that lists the
reviewers, description and files is used.
"""
context = {}
template = 'mails/comment.txt'
if request.user == issue.owner:
query = models.Message.query(
models.Message.sender == request.user.email(), ancestor=issue.key)
if query.count(1) == 0:
template = 'mails/review.txt'
files, patch = _get_affected_files(issue, full_diff)
context.update({'files': files, 'patch': patch, 'base': issue.base})
return template, context
@deco.login_required
@deco.issue_required
@deco.xsrf_required
def publish(request):
""" /<issue>/publish - Publish draft comments and send mail."""
issue = request.issue
if issue.edit_allowed:
form_class = PublishForm
else:
form_class = MiniPublishForm
draft_message = None
if not request.POST.get('message_only', None):
query = models.Message.query(
models.Message.issue_key == issue.key,
models.Message.sender == request.user.email(),
models.Message.draft == True)
draft_message = query.get()
if request.method != 'POST':
reviewers = issue.reviewers[:]
cc = issue.cc[:]
if (request.user != issue.owner and
request.user.email() not in issue.reviewers and
not issue.is_collaborator(request.user)):
reviewers.append(request.user.email())
if request.user.email() in cc:
cc.remove(request.user.email())
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc) for cc in cc]
tbd, comments = _get_draft_comments(request, issue, True)
preview = _get_draft_details(request, comments)
if draft_message is None:
msg = ''
else:
msg = draft_message.text
form = form_class(initial={'subject': issue.subject,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'send_mail': True,
'message': msg,
})
return respond(request, 'publish.html', {'form': form,
'issue': issue,
'preview': preview,
'draft_message': draft_message,
})
# Supply subject so that if this is a bare request to /publish, it won't
# fail out if we've selected PublishForm (which requires a subject).
augmented_POST = request.POST.copy()
if issue.subject:
augmented_POST.setdefault('subject', issue.subject)
form = form_class(augmented_POST)
# If the user is blocked, intentionally redirects him to the form again to
# confuse him.
account = models.Account.get_account_for_user(request.user)
if account.blocked or not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
if issue.edit_allowed:
issue.subject = form.cleaned_data['subject']
if form.is_valid() and not form.cleaned_data.get('message_only', False):
reviewers = _get_emails(form, 'reviewers')
else:
reviewers = issue.reviewers
if (request.user != issue.owner and
request.user.email() not in reviewers and
not issue.is_collaborator(request.user)):
reviewers.append(db.Email(request.user.email()))
if form.is_valid() and not form.cleaned_data.get('message_only', False):
cc = _get_emails(form, 'cc')
else:
cc = issue.cc
# The user is in the reviewer list, remove them from CC if they're there.
if request.user.email() in cc:
cc.remove(request.user.email())
if not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
issue.reviewers = reviewers
issue.cc = cc
if not form.cleaned_data.get('message_only', False):
tbd, comments = _get_draft_comments(request, issue)
else:
tbd = []
comments = []
issue.update_comment_count(len(comments))
tbd.append(issue)
if comments:
logging.warn('Publishing %d comments', len(comments))
msg = _make_message(request, issue,
form.cleaned_data['message'],
comments,
form.cleaned_data['send_mail'],
draft=draft_message,
in_reply_to=form.cleaned_data.get('in_reply_to'))
tbd.append(msg)
for obj in tbd:
obj.put()
# There are now no comments here (modulo race conditions)
models.Account.current_user_account.update_drafts(issue, 0)
if form.cleaned_data.get('no_redirect', False):
return HttpTextResponse('OK')
return HttpResponseRedirect(reverse(show, args=[issue.key.id()]))
@deco.login_required
@deco.issue_required
@deco.xsrf_required
def delete_drafts(request):
"""Deletes all drafts of the current user for an issue."""
query = models.Comment.query(
models.Comment.author == request.user, models.Comment.draft == True,
ancestor=request.issue.key)
keys = query.fetch(keys_only=True)
ndb.delete_multi(keys)
request.issue.calculate_draft_count_by_user()
request.issue.put()
return HttpResponseRedirect(
reverse(publish, args=[request.issue.key.id()]))
def _encode_safely(s):
"""Helper to turn a unicode string into 8-bit bytes."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return s
def _get_draft_comments(request, issue, preview=False):
"""Helper to return objects to put() and a list of draft comments.
If preview is True, the list of objects to put() is empty to avoid changes
to the datastore.
Args:
request: Django Request object.
issue: Issue instance.
preview: Preview flag (default: False).
Returns:
2-tuple (put_objects, comments).
"""
comments = []
tbd = []
# XXX Should request all drafts for this issue once, now we can.
for patchset in issue.patchsets:
ps_comments = list(models.Comment.query(
models.Comment.author == request.user,
models.Comment.draft == True, ancestor=patchset.key))
if ps_comments:
patches = dict((p.key, p) for p in patchset.patches)
for p in patches.itervalues():
p.patchset_key = patchset.key
for c in ps_comments:
c.draft = False
# Get the patch key value without loading the patch entity.
# NOTE: Unlike the old version of this code, this is the
# recommended and documented way to do this!
pkey = c.patch_key
if pkey in patches:
patch = patches[pkey]
c.patch_key = patch.key
if not preview:
tbd.extend(ps_comments)
patchset.update_comment_count(len(ps_comments))
tbd.append(patchset)
ps_comments.sort(key=lambda c: (c.patch_key.get().filename, not c.left,
c.lineno, c.date))
comments += ps_comments
return tbd, comments
def _patchlines2cache(patchlines, left):
"""Helper that converts return value of ParsePatchToLines for caching.
Each line in patchlines is (old_line_no, new_line_no, line). When
comment is on the left we store the old_line_no, otherwise
new_line_no.
"""
if left:
it = ((old, line) for old, _, line in patchlines)
else:
it = ((new, line) for _, new, line in patchlines)
return dict(it)
def _get_draft_details(request, comments):
"""Helper to display comments with context in the email message."""
last_key = None
output = []
linecache = {} # Maps (c.patch_key, c.left) to mapping (lineno, line)
modified_patches = []
fetch_base_failed = False
for c in comments:
patch = c.patch_key.get()
if (patch.key, c.left) != last_key:
url = request.build_absolute_uri(
reverse(diff, args=[request.issue.key.id(),
patch.patchset_key.id(),
patch.filename]))
output.append('\n%s\nFile %s (%s):' % (url, patch.filename,
c.left and "left" or "right"))
last_key = (patch.key, c.left)
if patch.no_base_file:
linecache[last_key] = _patchlines2cache(
patching.ParsePatchToLines(patch.lines), c.left)
else:
try:
if c.left:
old_lines = patch.get_content().text.splitlines(True)
linecache[last_key] = dict(enumerate(old_lines, 1))
else:
new_lines = patch.get_patched_content().text.splitlines(True)
linecache[last_key] = dict(enumerate(new_lines, 1))
except FetchError:
linecache[last_key] = _patchlines2cache(
patching.ParsePatchToLines(patch.lines), c.left)
fetch_base_failed = True
context = linecache[last_key].get(c.lineno, '').strip()
url = request.build_absolute_uri(
'%s#%scode%d' % (reverse(diff, args=[request.issue.key.id(),
patch.patchset_key.id(),
patch.filename]),
c.left and "old" or "new",
c.lineno))
output.append('\n%s\n%s:%d: %s\n%s' % (url, patch.filename, c.lineno,
context, c.text.rstrip()))
if modified_patches:
ndb.put_multi(modified_patches)
return '\n'.join(output)
def _get_modified_counts(issue):
"""Helper to determine the modified line counts of the latest patch set."""
modified_added_count = 0
modified_removed_count = 0
# Count the modified lines in the patchset.
patchsets = list(issue.patchsets)
if patchsets:
for patch in patchsets[-1].patches:
modified_added_count += patch.num_added
modified_removed_count += patch.num_removed
return modified_added_count, modified_removed_count
def _make_message(request, issue, message, comments=None, send_mail=False,
draft=None, in_reply_to=None):
"""Helper to create a Message instance and optionally send an email."""
attach_patch = request.POST.get("attach_patch") == "yes"
template, context = _get_mail_template(request, issue, full_diff=attach_patch)
# Decide who should receive mail
my_email = db.Email(request.user.email())
to = ([db.Email(issue.owner.email())] +
issue.reviewers +
[db.Email(email) for email in issue.collaborator_emails()])
cc = issue.cc[:]
if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:
cc.append(db.Email(django_settings.RIETVELD_INCOMING_MAIL_ADDRESS))
reply_to = to + cc
if my_email in to and len(to) > 1: # send_mail() wants a non-empty to list
to.remove(my_email)
if my_email in cc:
cc.remove(my_email)
issue_id = issue.key.id()
subject = issue.mail_subject()
patch = None
if attach_patch:
subject = 'PATCH: ' + subject
if 'patch' in context:
patch = context['patch']
del context['patch']
if issue.num_messages:
subject = 'Re: ' + subject
if comments:
details = _get_draft_details(request, comments)
else:
details = ''
message = message.replace('\r\n', '\n')
text = ((message.strip() + '\n\n' + details.strip())).strip()
if draft is None:
msg = models.Message(issue_key=issue.key,
subject=subject,
sender=my_email,
recipients=reply_to,
text=text,
parent=issue.key,
issue_was_closed=issue.closed)
else:
msg = draft
msg.subject = subject
msg.recipients = reply_to
msg.text = text
msg.draft = False
msg.date = datetime.datetime.now()
msg.issue_was_closed = issue.closed
issue.calculate_updates_for(msg)
if in_reply_to:
try:
replied_msg_id = int(in_reply_to)
replied_msg = models.Message.get_by_id(replied_msg_id, parent=issue.key)
msg.in_reply_to_key = replied_msg.key
replied_issue_id = replied_msg.issue_key.id()
if replied_issue_id != issue_id:
logging.warn('In-reply-to Message is for a different issue: '
'%s instead of %s', replied_issue_id, issue_id)
msg.in_reply_to_key = None
except (db.KindError, db.BadKeyError, ValueError):
logging.warn('Invalid in-reply-to Message or key given: %s', in_reply_to)
if send_mail:
# Limit the list of files in the email to approximately 200
if 'files' in context and len(context['files']) > 210:
num_trimmed = len(context['files']) - 200
del context['files'][200:]
context['files'].append('[[ %d additional files ]]' % num_trimmed)
url = request.build_absolute_uri(reverse(show, args=[issue.key.id()]))
reviewer_nicknames = ', '.join(library.get_nickname(rev_temp, True,
request)
for rev_temp in issue.reviewers)
cc_nicknames = ', '.join(library.get_nickname(cc_temp, True, request)
for cc_temp in cc)
my_nickname = library.get_nickname(request.user, True, request)
reply_to = ', '.join(reply_to)
description = (issue.description or '').replace('\r\n', '\n')
home = request.build_absolute_uri(reverse(index))
modified_added_count, modified_removed_count = _get_modified_counts(issue)
context.update({'reviewer_nicknames': reviewer_nicknames,
'cc_nicknames': cc_nicknames,
'my_nickname': my_nickname, 'url': url,
'message': message, 'details': details,
'description': description, 'home': home,
'added_lines' : modified_added_count,
'removed_lines': modified_removed_count,
})
for key, value in context.iteritems():
if isinstance(value, str):
try:
encoding.force_unicode(value)
except UnicodeDecodeError:
logging.error('Key %s is not valid unicode. value: %r' % (key, value))
# The content failed to be decoded as utf-8. Enforce it as ASCII.
context[key] = value.decode('ascii', 'replace')
body = django.template.loader.render_to_string(
template, context, context_instance=RequestContext(request))
logging.warn('Mail: to=%s; cc=%s', ', '.join(to), ', '.join(cc))
send_args = {'sender': my_email,
'to': [_encode_safely(address) for address in to],
'subject': _encode_safely(subject),
'body': _encode_safely(body),
'reply_to': _encode_safely(reply_to)}
if cc:
send_args['cc'] = [_encode_safely(address) for address in cc]
if patch:
send_args['attachments'] = [('issue_%s_patch.diff' % issue.key.id(),
patch)]
attempts = 0
while True:
try:
mail.send_mail(**send_args)
break
except mail.InvalidSenderError:
if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:
previous_sender = send_args['sender']
if previous_sender not in send_args['to']:
send_args['to'].append(previous_sender)
send_args['sender'] = django_settings.RIETVELD_INCOMING_MAIL_ADDRESS
else:
raise
except apiproxy_errors.DeadlineExceededError:
# apiproxy_errors.DeadlineExceededError is raised when the
# deadline of an API call is reached (e.g. for mail it's
# something about 5 seconds). It's not the same as the lethal
# runtime.DeadlineExeededError.
attempts += 1
if attempts >= 3:
raise
if attempts:
logging.warning("Retried sending email %s times", attempts)
return msg
@deco.require_methods('POST')
@deco.login_required
@deco.xsrf_required
@deco.issue_required
def star(request):
"""Add a star to an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
keyid = request.issue.key.id()
if keyid not in account.stars:
account.stars.append(keyid)
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@deco.require_methods('POST')
@deco.login_required
@deco.issue_required
@deco.xsrf_required
def unstar(request):
"""Remove the star from an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
keyid = request.issue.key.id()
if keyid in account.stars:
account.stars[:] = [i for i in account.stars if i != keyid]
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@deco.login_required
@deco.issue_required
def draft_message(request):
"""/<issue>/draft_message - Retrieve, modify and delete draft messages.
Note: creating or editing draft messages is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
query = models.Message.query(
models.Message.issue_key == request.issue.key,
models.Message.sender == request.user.email(),
models.Message.draft == True)
if query.count() == 0:
draft_message = None
else:
draft_message = query.get()
if request.method == 'GET':
return _get_draft_message(draft_message)
elif request.method == 'POST':
return _post_draft_message(request, draft_message)
elif request.method == 'DELETE':
return _delete_draft_message(draft_message)
return HttpTextResponse('An error occurred.', status=500)
def _get_draft_message(draft):
"""Handles GET requests to /<issue>/draft_message.
Arguments:
draft: A Message instance or None.
Returns the content of a draft message or an empty string if draft is None.
"""
return HttpTextResponse(draft.text if draft else '')
def _post_draft_message(request, draft):
"""Handles POST requests to /<issue>/draft_message.
If draft is None a new message is created.
Arguments:
request: The current request.
draft: A Message instance or None.
"""
if draft is None:
draft = models.Message(
issue_key=request.issue.key, parent=request.issue.key,
sender=request.user.email(), draft=True)
draft.text = request.POST.get('reviewmsg')
draft.put()
return HttpTextResponse(draft.text)
def _delete_draft_message(draft):
"""Handles DELETE requests to /<issue>/draft_message.
Deletes a draft message.
Arguments:
draft: A Message instance or None.
"""
if draft is not None:
draft.key.delete()
return HttpTextResponse('OK')
@deco.json_response
def search(request):
"""/search - Search for issues or patchset.
Returns HTTP 500 if the corresponding index is missing.
"""
if request.method == 'GET':
form = SearchForm(request.GET)
if not form.is_valid() or not request.GET:
return respond(request, 'search.html', {'form': form})
else:
form = SearchForm(request.POST)
if not form.is_valid():
return HttpTextResponse('Invalid arguments', status=400)
logging.info('%s' % form.cleaned_data)
keys_only = form.cleaned_data['keys_only'] or False
requested_format = form.cleaned_data['format'] or 'html'
limit = form.cleaned_data['limit']
with_messages = form.cleaned_data['with_messages']
if requested_format == 'html':
keys_only = False
limit = limit or DEFAULT_LIMIT
else:
if not limit:
if keys_only:
# It's a fast query.
limit = 1000
elif with_messages:
# It's an heavy query.
limit = 10
else:
limit = 100
q = models.Issue.query(default_options=ndb.QueryOptions(keys_only=keys_only))
encoded_cursor = form.cleaned_data['cursor'] or None
if encoded_cursor:
cursor = datastore_query.Cursor(urlsafe=encoded_cursor)
else:
cursor = None
if form.cleaned_data['closed'] is not None:
q = q.filter(models.Issue.closed == form.cleaned_data['closed'])
if form.cleaned_data['owner']:
q = q.filter(models.Issue.owner == form.cleaned_data['owner'])
if form.cleaned_data['reviewer']:
q = q.filter(models.Issue.reviewers == form.cleaned_data['reviewer'])
if form.cleaned_data['cc']:
q = q.filter(models.Issue.cc == form.cleaned_data['cc'])
if form.cleaned_data['private'] is not None:
q = q.filter(models.Issue.private == form.cleaned_data['private'])
if form.cleaned_data['repo_guid']:
q = q.filter(models.Issue.repo_guid == form.cleaned_data['repo_guid'])
if form.cleaned_data['base']:
q = q.filter(models.Issue.base == form.cleaned_data['base'])
# Calculate a default value depending on the query parameter.
# Prefer sorting by modified date over created date and showing
# newest first over oldest.
default_sort = '-modified'
if form.cleaned_data['created_after']:
q = q.filter(models.Issue.created >= form.cleaned_data['created_after'])
default_sort = 'created'
if form.cleaned_data['modified_after']:
q = q.filter(models.Issue.modified >= form.cleaned_data['modified_after'])
default_sort = 'modified'
if form.cleaned_data['created_before']:
q = q.filter(models.Issue.created < form.cleaned_data['created_before'])
default_sort = '-created'
if form.cleaned_data['modified_before']:
q = q.filter(models.Issue.modified < form.cleaned_data['modified_before'])
default_sort = '-modified'
sorted_by = form.cleaned_data['order'] or default_sort
direction = (
datastore_query.PropertyOrder.DESCENDING
if sorted_by.startswith('-') else datastore_query.PropertyOrder.ASCENDING)
q = q.order(datastore_query.PropertyOrder(sorted_by.lstrip('-'), direction))
# Update the cursor value in the result.
if requested_format == 'html':
nav_params = dict(
(k, v) for k, v in form.cleaned_data.iteritems() if v is not None)
return _paginate_issues_with_cursor(
reverse(search),
request,
q,
cursor,
limit,
'search_results.html',
extra_nav_parameters=nav_params)
# We do not simply use fetch_page() because we do some post-filtering which
# could lead to under-filled pages. Instead, we iterate, filter and keep
# going until we have enough post-filtered results, then return those along
# with the cursor after the last item.
filtered_results = []
next_cursor = None
query_iter = q.iter(start_cursor=cursor, produce_cursors=True)
for result in query_iter:
if keys_only:
# There's not enough information to filter. The only thing that is leaked
# is the issue's key.
filtered_results.append(result)
elif result.view_allowed:
filtered_results.append(result)
if len(filtered_results) >= limit:
break
# If any results are returned, also include a cursor to try to get more.
if filtered_results:
next_cursor = query_iter.cursor_after()
data = {
'cursor': next_cursor.urlsafe() if next_cursor else '',
}
if keys_only:
data['results'] = [i.id() for i in filtered_results]
else:
data['results'] = [_issue_as_dict(i, with_messages, request)
for i in filtered_results]
return data
### Repositories and Branches ###
def repos(request):
"""/repos - Show the list of known Subversion repositories."""
# Clean up garbage created by buggy edits
bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(
100, keys_only=True)
if bad_branch_keys:
ndb.delete_multi(bad_branch_keys)
repo_map = {}
for repo in models.Repository.query().fetch(1000, batch_size=100):
repo_map[repo.key] = repo
branches = []
for branch in models.Branch.query().fetch(2000, batch_size=100):
repo_key = branch.repo_key
if repo_key in repo_map:
branch.repository = repo_map[repo_key]
branches.append(branch)
branches.sort(key=lambda b: map(
unicode.lower, (b.repository.name, b.category, b.name)))
return respond(request, 'repos.html', {'branches': branches})
@deco.login_required
@deco.xsrf_required
def repo_new(request):
"""/repo_new - Create a new Subversion repository record."""
if request.method != 'POST':
form = RepoForm()
return respond(request, 'repo_new.html', {'form': form})
form = RepoForm(request.POST)
errors = form.errors
if not errors:
try:
repo = models.Repository(
name=form.cleaned_data.get('name'),
url=form.cleaned_data.get('url'),
guid=form.cleaned_data.get('guid'),
)
except (db.BadValueError, ValueError) as err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'repo_new.html', {'form': form})
repo.put()
branch_url = repo.url
if not branch_url.endswith('/'):
branch_url += '/'
branch_url += 'trunk/'
branch = models.Branch(repo_key=repo.key, repo_name=repo.name,
category='*trunk*', name='Trunk',
url=branch_url)
branch.put()
return HttpResponseRedirect(reverse(repos))
SVN_ROOT = 'http://svn.python.org/view/*checkout*/python/'
BRANCHES = [
# category, name, url suffix
('*trunk*', 'Trunk', 'trunk/'),
('branch', '2.5', 'branches/release25-maint/'),
('branch', 'py3k', 'branches/py3k/'),
]
# TODO: Make this a POST request to avoid XSRF attacks.
@deco.admin_required
def repo_init(_request):
"""/repo_init - Initialze the list of known Subversion repositories."""
python = models.Repository.query(models.Repository.name == 'Python').get()
if python is None:
python = models.Repository(name='Python', url=SVN_ROOT)
python.put()
pybranches = []
else:
pybranches = list(models.Branch.query(models.Branch.repo_key == python.key))
for category, name, url in BRANCHES:
url = python.url + url
for br in pybranches:
if (br.category, br.name, br.url) == (category, name, url):
break
else:
br = models.Branch(repo_key=python.key, repo_name='Python',
category=category, name=name, url=url)
br.put()
return HttpResponseRedirect(reverse(repos))
@deco.login_required
@deco.xsrf_required
def branch_new(request, repo_id):
"""/branch_new/<repo> - Add a new Branch to a Repository record."""
repo = models.Repository.get_by_id(int(repo_id))
if request.method != 'POST':
form = BranchForm(initial={'url': repo.url,
'category': 'branch',
})
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
form = BranchForm(request.POST)
errors = form.errors
if not errors:
try:
branch = models.Branch(
repo_key=repo.key,
category=form.cleaned_data.get('category'),
name=form.cleaned_data.get('name'),
url=form.cleaned_data.get('url'),
)
except (db.BadValueError, ValueError) as err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
branch.repo_name = repo.name
branch.put()
return HttpResponseRedirect(reverse(repos))
@deco.login_required
@deco.xsrf_required
def branch_edit(request, branch_id):
"""/branch_edit/<branch> - Edit a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpTextResponse('You do not own this branch', status=403)
if request.method != 'POST':
form = BranchForm(initial={'category': branch.category,
'name': branch.name,
'url': branch.url,
})
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
form = BranchForm(request.POST)
errors = form.errors
if not errors:
try:
branch.category = form.cleaned_data.get('category')
branch.name = form.cleaned_data.get('name')
branch.url = form.cleaned_data.get('url')
except (db.BadValueError, ValueError) as err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
branch.put()
return HttpResponseRedirect(reverse(repos))
@deco.require_methods('POST')
@deco.login_required
@deco.xsrf_required
def branch_delete(request, branch_id):
"""/branch_delete/<branch> - Delete a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpTextResponse('You do not own this branch', status=403)
repo_key = branch.repo_key
branch.key.delete()
num_branches = models.Branch.query(models.Branch.repo_key == repo_key).count()
if not num_branches:
# Even if we don't own the repository? Yes, I think so! Empty
# repositories have no representation on screen.
repo_key.delete()
return HttpResponseRedirect(reverse(repos))
### User Profiles ###
@deco.login_required
@deco.xsrf_required
def settings(request):
account = models.Account.current_user_account
if request.method != 'POST':
nickname = account.nickname
default_context = account.default_context
default_column_width = account.default_column_width
form = SettingsForm(initial={'nickname': nickname,
'context': default_context,
'column_width': default_column_width,
'notify_by_email': account.notify_by_email,
})
return respond(request, 'settings.html', {'form': form})
form = SettingsForm(request.POST)
if form.is_valid():
account.nickname = form.cleaned_data.get('nickname')
account.default_context = form.cleaned_data.get('context')
account.default_column_width = form.cleaned_data.get('column_width')
account.notify_by_email = form.cleaned_data.get('notify_by_email')
account.fresh = False
account.put()
else:
return respond(request, 'settings.html', {'form': form})
return HttpResponseRedirect(reverse(mine))
@deco.require_methods('POST')
@deco.login_required
@deco.xsrf_required
def account_delete(_request):
account = models.Account.current_user_account
account.key.delete()
return HttpResponseRedirect(users.create_logout_url(reverse(index)))
@deco.login_required
@deco.xsrf_required
def migrate_entities(request):
"""Migrates entities from the specified user to the signed in user."""
msg = None
if request.method == 'POST':
form = MigrateEntitiesForm(request.POST)
form.set_user(request.user)
if form.is_valid():
# verify that the account belongs to the user
old_account = form.cleaned_data['account']
old_account_key = str(old_account.key)
new_account_key = str(models.Account.current_user_account.key)
for kind in ('Issue', 'Repository', 'Branch'):
taskqueue.add(url=reverse(task_migrate_entities),
params={'kind': kind,
'old': old_account_key,
'new': new_account_key},
queue_name='migrate-entities')
msg = (u'Migration job started. The issues, repositories and branches'
u' created with your old account (%s) will be moved to your'
u' current account (%s) in a background task and should'
u' be visible for your current account shortly.'
% (old_account.user.email(), request.user.email()))
else:
form = MigrateEntitiesForm()
return respond(request, 'migrate_entities.html', {'form': form, 'msg': msg})
@deco.task_queue_required('migrate-entities')
def task_migrate_entities(request):
"""/restricted/tasks/migrate_entities - Migrates entities from one account to
another.
"""
kind = request.POST.get('kind')
old = request.POST.get('old')
new = request.POST.get('new')
batch_size = 20
if kind is None or old is None or new is None:
logging.warning('Missing parameters')
return HttpResponse()
if kind not in ('Issue', 'Repository', 'Branch'):
logging.warning('Invalid kind: %s' % kind)
return HttpResponse()
old_account = ndb.Key(models.Account, old).get()
new_account = ndb.Key(models.Account, new).get()
if old_account is None or new_account is None:
logging.warning('Invalid accounts')
return HttpResponse()
# make sure that accounts match
if old_account.user.user_id() != new_account.user.user_id():
logging.warning('Accounts don\'t match')
return HttpResponse()
model = getattr(models, kind)
encoded_key = request.POST.get('key')
model_cls = model.__class__
query = model.query(model_cls.owner == old_account.user).order(model_cls.key)
if encoded_key:
query = query.filter(model_cls.key > ndb.Key(urlsafe=encoded_key))
tbd = []
for entity in query.fetch(batch_size):
entity.owner = new_account.user
tbd.append(entity)
if tbd:
ndb.put_multi(tbd)
taskqueue.add(url=reverse(task_migrate_entities),
params={'kind': kind, 'old': old, 'new': new,
'key': str(tbd[-1].key)},
queue_name='migrate-entities')
return HttpResponse()
@deco.user_key_required
def user_popup(request):
"""/user_popup - Pop up to show the user info."""
try:
return _user_popup(request)
except Exception as err:
logging.exception('Exception in user_popup processing:')
# Return HttpResponse because the JS part expects a 200 status code.
return HttpHtmlResponse(
'<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _user_popup(request):
user = request.user_to_show
popup_html = memcache.get('user_popup:' + user.email())
if popup_html is None:
num_issues_created = models.Issue.query(
models.Issue.closed == False, models.Issue.owner == user).count()
num_issues_reviewed = models.Issue.query(
models.Issue.closed == False,
models.Issue.reviewers == user.email()).count()
user.nickname = models.Account.get_nickname_for_email(user.email())
popup_html = render_to_response('user_popup.html',
{'user': user,
'num_issues_created': num_issues_created,
'num_issues_reviewed': num_issues_reviewed,
},
context_instance=RequestContext(request))
# Use time expired cache because the number of issues will change over time
memcache.add('user_popup:' + user.email(), popup_html, 60)
return popup_html
@deco.require_methods('POST')
def incoming_mail(request, recipients):
"""/_ah/mail/(.*)
Handle incoming mail messages.
The issue is not modified. No reviewers or CC's will be added or removed.
"""
try:
_process_incoming_mail(request.raw_post_data, recipients)
except InvalidIncomingEmailError as err:
logging.debug(str(err))
return HttpTextResponse('')
def _process_incoming_mail(raw_message, recipients):
"""Process an incoming email message."""
recipients = [x[1] for x in email.utils.getaddresses([recipients])]
incoming_msg = mail.InboundEmailMessage(raw_message)
if 'X-Google-Appengine-App-Id' in incoming_msg.original:
raise InvalidIncomingEmailError('Mail sent by App Engine')
# Use the subject to find the issue number.
# Originally the tag was (issueNNN).
# Then we changed it to be (issue NNN by WHO).
# We want to match either of these, and we need to deal with
# the fact that some mail readers will fold the long subject,
# turning a single space into "\r\n ".
# We use "issue\s*" to handle all these forms,
# and we omit the closing ) to accept both the original and the "by WHO" form.
subject = incoming_msg.subject or ''
match = re.search(r'\(issue\s*(?P<id>\d+)', subject)
if match is None:
raise InvalidIncomingEmailError('No issue id found: %s', subject)
issue_id = int(match.groupdict()['id'])
issue = models.Issue.get_by_id(issue_id)
if issue is None:
raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)
sender = email.utils.parseaddr(incoming_msg.sender)[1]
body = None
for _, payload in incoming_msg.bodies('text/plain'):
# FIXME(andi): Remove this when issue 2383 is fixed.
# 8bit encoding results in UnknownEncodingError, see
# http://code.google.com/p/googleappengine/issues/detail?id=2383
# As a workaround we try to decode the payload ourselves.
if payload.encoding == '8bit' and payload.charset:
body = payload.payload.decode(payload.charset)
# If neither encoding not charset is set, but payload contains
# non-ASCII chars we can't use payload.decode() because it returns
# payload.payload unmodified. The later type cast to db.Text fails
# with a UnicodeDecodeError then.
elif payload.encoding is None and payload.charset is None:
# assume utf-8 but set replace flag to go for sure.
body = payload.payload.decode('utf-8', 'replace')
else:
body = payload.decode()
break
if body is None or not body.strip():
raise InvalidIncomingEmailError('Ignoring empty message.')
elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:
# see issue325, truncate huge bodies
trunc_msg = '... (message truncated)'
end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)
body = body[:end]
body += trunc_msg
# If the subject is long, this might come wrapped into more than one line.
subject = ' '.join([x.strip() for x in subject.splitlines()])
msg = models.Message(issue_key=issue.key, parent=issue.key,
subject=subject,
sender=sender,
recipients=[x for x in recipients],
date=datetime.datetime.now(),
text=body,
draft=False)
# Add sender to reviewers if needed.
all_emails = [str(x).lower()
for x in ([issue.owner.email()] +
issue.reviewers +
issue.cc +
issue.collaborator_emails())]
if sender.lower() not in all_emails:
query = models.Account.query(models.Account.lower_email == sender.lower())
account = query.get()
if account is not None:
issue.reviewers.append(account.email) # e.g. account.email is CamelCase
else:
issue.reviewers.append(db.Email(sender))
issue.calculate_updates_for(msg)
issue.put()
msg.put()
@deco.login_required
def xsrf_token(request):
"""/xsrf_token - Return the user's XSRF token.
This is used by tools like git-cl that need to be able to interact with the
site on the user's behalf. A custom header named X-Requesting-XSRF-Token must
be included in the HTTP request; an error is returned otherwise.
"""
if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):
return HttpTextResponse(
'Please include a header named X-Requesting-XSRF-Token '
'(its content doesn\'t matter).',
status=400)
return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())
def customized_upload_py(request):
"""/static/upload.py - Return patched upload.py with appropiate auth type and
default review server setting.
This is used to let the user download a customized upload.py script
for hosted Rietveld instances.
"""
f = open(django_settings.UPLOAD_PY_SOURCE)
source = f.read()
f.close()
# When served from a Google Apps instance, the account namespace needs to be
# switched to "Google Apps only".
if ('AUTH_DOMAIN' in request.META
and request.META['AUTH_DOMAIN'] != 'gmail.com'):
source = source.replace('AUTH_ACCOUNT_TYPE = "GOOGLE"',
'AUTH_ACCOUNT_TYPE = "HOSTED"')
# On a non-standard instance, the default review server is changed to the
# current hostname. This might give weird results when using versioned appspot
# URLs (eg. 1.latest.codereview.appspot.com), but this should only affect
# testing.
if request.META['HTTP_HOST'] != 'codereview.appspot.com':
review_server = request.META['HTTP_HOST']
if request.is_secure():
review_server = 'https://' + review_server
source = source.replace('DEFAULT_REVIEW_SERVER = "codereview.appspot.com"',
'DEFAULT_REVIEW_SERVER = "%s"' % review_server)
return HttpResponse(source, content_type='text/x-python; charset=utf-8')
@deco.task_queue_required('deltacalculation')
def task_calculate_delta(request):
"""/restricted/tasks/calculate_delta - Calculate deltas for a patchset.
This URL is called by taskqueue to calculate deltas behind the
scenes. Returning a HttpResponse with any 2xx status means that the
task was finished successfully. Raising an exception means that the
taskqueue will retry to run the task.
"""
ps_key = request.POST.get('key')
if not ps_key:
logging.error('No patchset key given.')
return HttpResponse()
try:
patchset = ndb.Key(urlsafe=ps_key).get()
except (db.KindError, db.BadKeyError) as err:
logging.error('Invalid PatchSet key %r: %s' % (ps_key, err))
return HttpResponse()
if patchset is None: # e.g. PatchSet was deleted inbetween
logging.error('Missing PatchSet key %r' % ps_key)
return HttpResponse()
patchset.calculate_deltas()
return HttpResponse()
def _build_state_value(django_request, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Meant to be similar to oauth2client.appengine._build_state_value.
Args:
django_request: Django HttpRequest object, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
relative_path = django_request.get_full_path().encode('utf-8')
uri = django_request.build_absolute_uri(relative_path)
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _create_flow(django_request):
"""Create the Flow object.
The Flow is calculated using mostly fixed values and constants retrieved
from other modules.
Args:
django_request: Django HttpRequest object, The request.
Returns:
oauth2client.client.OAuth2WebServerFlow object.
"""
redirect_path = reverse(oauth2callback)
redirect_uri = django_request.build_absolute_uri(redirect_path)
client_id, client_secret, _ = auth_utils.SecretKey.get_config()
return OAuth2WebServerFlow(client_id, client_secret, auth_utils.EMAIL_SCOPE,
redirect_uri=redirect_uri,
approval_prompt='force')
def _validate_port(port_value):
"""Makes sure the port value is valid and can be used by a non-root user.
Args:
port_value: Integer or string version of integer.
Returns:
Integer version of port_value if valid, otherwise None.
"""
try:
port_value = int(port_value)
except (ValueError, TypeError):
return None
if not (1024 <= port_value <= 49151):
return None
return port_value
@deco.login_required
def get_access_token(request):
"""/get-access-token - Facilitates OAuth 2.0 dance for client.
Meant to take a 'port' query parameter and redirect to localhost with that
port and the user's access token appended.
"""
user = request.user
flow = _create_flow(request)
flow.params['state'] = _build_state_value(request, user)
credentials = StorageByKeyName(
CredentialsNDBModel, user.user_id(), 'credentials').get()
authorize_url = flow.step1_get_authorize_url()
redirect_response_object = HttpResponseRedirect(authorize_url)
if credentials is None or credentials.invalid:
return redirect_response_object
# Find out if credentials is expired
refresh_failed = False
if credentials.access_token is None or credentials.access_token_expired:
try:
credentials.refresh(httplib2.Http())
except AccessTokenRefreshError:
return redirect_response_object
except:
refresh_failed = True
port_value = _validate_port(request.GET.get('port'))
if port_value is None:
return HttpTextResponse('Access Token: %s' % (credentials.access_token,))
# Send access token along to localhost client
redirect_template_args = {'port': port_value}
if refresh_failed:
quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)
redirect_template_args['error'] = quoted_error
client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args
else:
quoted_access_token = urllib.quote(credentials.access_token)
redirect_template_args['token'] = quoted_access_token
client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args
return HttpResponseRedirect(client_uri)
@deco.login_required
def oauth2callback(request):
"""/oauth2callback - Callback handler for OAuth 2.0 redirect.
Handles redirect and moves forward to the rest of the application.
"""
error = request.GET.get('error')
if error:
error_msg = request.GET.get('error_description', error)
return HttpTextResponse(
'The authorization request failed: %s' % _safe_html(error_msg))
else:
user = request.user
flow = _create_flow(request)
credentials = flow.step2_exchange(request.GET)
StorageByKeyName(
CredentialsNDBModel, user.user_id(), 'credentials').put(credentials)
redirect_uri = _parse_state_value(str(request.GET.get('state')),
user)
return HttpResponseRedirect(redirect_uri)
@deco.admin_required
def set_client_id_and_secret(request):
"""/restricted/set-client-id-and-secret - Allows admin to set Client ID and
Secret.
These values, from the Google APIs console, are required to validate
OAuth 2.0 tokens within auth_utils.py.
"""
if request.method == 'POST':
form = ClientIDAndSecretForm(request.POST)
if form.is_valid():
client_id = form.cleaned_data['client_id']
client_secret = form.cleaned_data['client_secret']
additional_client_ids = form.cleaned_data['additional_client_ids']
auth_utils.SecretKey.set_config(client_id, client_secret,
additional_client_ids)
return HttpResponseRedirect(reverse(set_client_id_and_secret))
else:
form = ClientIDAndSecretForm()
return respond(request, 'set_client_id_and_secret.html', {'form': form})
### Statistics.
DATE_FORMAT = '%Y-%m-%d'
def update_stats(request):
"""Endpoint that will trigger a taskqueue to update the score of all
AccountStatsBase derived entities.
"""
if IS_DEV:
# Sadly, there is no way to know the admin port.
dashboard = 'http://%s:8000/taskqueue' % os.environ['SERVER_NAME']
else:
# Do not use app_identity.get_application_id() since we need the 's~'.
appid = os.environ['APPLICATION_ID']
versionid = os.environ['CURRENT_VERSION_ID']
dashboard = (
'https://appengine.google.com/queues?queue_name=update-stats&'
'app_id=%s&version_id=%s&' % (appid, versionid))
msg = ''
if request.method != 'POST':
form = UpdateStatsForm()
return respond(
request,
'admin_update_stats.html',
{'form': form, 'dashboard': dashboard, 'msg': msg})
form = UpdateStatsForm(request.POST)
if not form.is_valid():
form = UpdateStatsForm()
msg = 'Invalid form data.'
return respond(
request,
'admin_update_stats.html',
{'form': form, 'dashboard': dashboard, 'msg': msg})
tasks_to_trigger = form.cleaned_data['tasks_to_trigger'].split(',')
tasks_to_trigger = filter(None, (t.strip().lower() for t in tasks_to_trigger))
today = datetime.datetime.utcnow().date()
tasks = []
if not tasks_to_trigger:
msg = 'No task to trigger.'
# Special case 'refresh'.
elif (len(tasks_to_trigger) == 1 and
tasks_to_trigger[0] in ('destroy', 'refresh')):
taskqueue.add(
url=reverse(task_refresh_all_stats_score),
params={'destroy': str(int(tasks_to_trigger[0] == 'destroy'))},
queue_name='refresh-all-stats-score')
msg = 'Triggered %s.' % tasks_to_trigger[0]
else:
tasks = []
for task in tasks_to_trigger:
if task in ('monthly', '30'):
tasks.append(task)
elif models.verify_account_statistics_name(task):
if task.count('-') == 2:
tasks.append(task)
else:
# It's a month. Add every single day of the month as long as it's
# before today.
year, month = map(int, task.split('-'))
days = calendar.monthrange(year, month)[1]
tasks.extend(
'%s-%02d' % (task, d + 1) for d in range(days)
if datetime.date(year, month, d + 1) < today)
else:
msg = 'Invalid item.'
break
else:
if len(set(tasks)) != len(tasks):
msg = 'Duplicate items found.'
else:
taskqueue.add(
url=reverse(task_update_stats),
params={'tasks': json.dumps(tasks), 'date': str(today)},
queue_name='update-stats')
msg = 'Triggered the following tasks: %s.' % ', '.join(tasks)
logging.info(msg)
return respond(
request,
'admin_update_stats.html',
{'form': form, 'dashboard': dashboard, 'msg': msg})
def cron_update_yesterday_stats(_request):
"""Daily cron job to trigger all the necessary task queue.
- Triggers a task to update daily summaries.
- This task will then trigger a task to update rolling summaries.
- This task will then trigger a task to update monthly summaries.
Using 3 separate tasks to space out datastore contention and reduces the
scope of each task so the complete under 10 minutes, making retries softer
on the system when the datastore throws exceptions or the load for the day
is high.
"""
today = datetime.datetime.utcnow().date()
day = str(today - datetime.timedelta(days=1))
tasks = [day, '30', 'monthly']
taskqueue.add(
url=reverse(task_update_stats),
params={'tasks': json.dumps(tasks), 'date': str(today)},
queue_name='update-stats')
out = 'Triggered tasks for day %s: %s' % (day, ', '.join(tasks))
logging.info(out)
return HttpTextResponse(out)
def figure_out_real_accounts(people_involved, people_caches):
"""Removes people that are known to be role accounts or mailing lists.
Sadly, Account instances are created even for mailing lists (!) but mailing
lists never create an issue, so assume that a reviewer that never created an
issue is a nobody.
Arguments:
people_involved: set or list of email addresses to scan.
people_caches: a lookup cache of already resolved email addresses.
Returns:
list of the email addresses that are not nobodies.
"""
# Using '+' as a filter removes a fair number of WATCHLISTS entries.
people_involved = set(
i for i in people_involved
if ('+' not in i and
not i.startswith('commit-bot') and
not i.endswith('gserviceaccount.com')))
people_involved -= people_caches['fake']
# People we are still unsure about that need to be looked up.
people_to_look_for = list(people_involved - people_caches['real'])
futures = [
models.Issue.query(models.Issue.owner == users.User(r)).fetch(
limit=1, keys_only=True)
for r in people_to_look_for
]
for i, future in enumerate(futures):
account_email = people_to_look_for[i]
if not list(future):
people_caches['fake'].add(account_email)
people_involved.remove(account_email)
else:
people_caches['real'].add(account_email)
return people_involved
def search_relevant_first_email_for_user(
issue_owner, messages, user, people_caches):
"""Calculates which Message is representative for the request latency for this
review for this user.
Returns:
- index in |messages| that is the most representative for this user as a
reviewer or None if no Message is relevant at all. In that case, the caller
should fall back to Issue.created.
- bool if it looks like a drive-by.
It is guaranteed that the index returned is a Message sent either by
|issue_owner| or |user|.
"""
# Shortcut. No need to calculate the value.
if issue_owner == user:
return None, False
# Search for the first of:
# - message by the issue owner sent to the user or to mailing lists.
# - message by the user, for DRIVE_BY and NOT_REQUESTED.
# Otherwise, return None.
last_owner_message_index = None
for i, m in enumerate(messages):
if m.sender == issue_owner:
last_owner_message_index = i
if user in m.recipients:
return i, False
# Detect the use case where a request for review is sent to a mailing list
# and a random reviewer picks it up. We don't want to downgrade the
# reviewer from a proper review down to DRIVE_BY, so mark it as the
# important message for everyone. A common usecase is code reviews on
# golang-dev@googlegroups.com.
recipients = set(m.recipients) - set([m.sender, issue_owner])
if not figure_out_real_accounts(recipients, people_caches):
return i, False
elif m.sender == user:
# The issue owner didn't send a request specifically to this user but the
# dude replied anyway. It can happen if the user was on the cc list with
# user+cc@example.com. In that case, use the last issue owner email.
# We want to use this message for latency calculation DRIVE_BY and
# NOT_REQUESTED.
if last_owner_message_index is not None:
return last_owner_message_index, True
# issue_owner is MIA.
return i, True
else:
# Maybe a reviewer added 'user' on the review on its behalf. Likely
# m.sender wants to defer the review to someone else.
if user in m.recipients:
return i, False
# Sends the last Message index if there is any.
return last_owner_message_index, False
def process_issue(
start, day_to_process, message_index, drive_by, issue_owner, messages,
user):
"""Calculates 'latency', 'lgtms' and 'review_type' for a reviewer on an Issue.
Arguments:
- start: moment to use to calculate the latency. Can be either the moment a
Message was sent or Issue.created if no other signal exists.
- day_to_process: the day to look for for new 'events'.
- message_index: result of search_relevant_first_email_for_user().
- drive_by: the state of things looks like a DRIVE_BY or a NOT_REQUESTED.
- issue_owner: shortcut for issue.owner.email().
- messages: shortcut for issue.messages sorted by date. Cannot be empty.
- user: user to calculate latency.
A Message must have been sent on day_to_process that would imply data,
otherwise None, None is returned.
"""
assert isinstance(start, datetime.datetime), start
assert isinstance(day_to_process, datetime.date), day_to_process
assert message_index is None or 0 <= message_index < len(messages), (
message_index)
assert drive_by in (True, False), drive_by
assert issue_owner.count('@') == 1, issue_owner
assert all(isinstance(m, models.Message) for m in messages), messages
assert user.count('@') == 1, user
lgtms = sum(
m.sender == user and
m.find(models.Message.LGTM_RE, owner_allowed=True) and
not m.find(models.Message.NOT_LGTM_RE, owner_allowed=True)
for m in messages)
# TODO(maruel): Check for the base username part, e.g.:
# if user.split('@', 1)[0] == issue_owner.split('@', 1)[0]:
# For example, many people have both matching @google.com and @chromium.org
# accounts.
if user == issue_owner:
if not any(m.date.date() == day_to_process for m in messages):
return -1, None, None
# There's no concept of review latency for OUTGOING reviews.
return -1, lgtms, models.AccountStatsBase.OUTGOING
if message_index is None:
# Neither issue_owner nor user sent an email, ignore.
return -1, None, None
if drive_by:
# Tricky case. Need to determine the difference between NOT_REQUESTED and
# DRIVE_BY. To determine if an issue is NOT_REQUESTED, look if the owner
# never sent a request for review in the previous messages.
review_type = (
models.AccountStatsBase.NOT_REQUESTED
if messages[message_index].sender == user
else models.AccountStatsBase.DRIVE_BY)
else:
review_type = models.AccountStatsBase.NORMAL
for m in messages[message_index:]:
if m.sender == user:
if m.date.date() < day_to_process:
# It was already updated on a previous day. Skip calculation.
return -1, None, None
return int((m.date - start).total_seconds()), lgtms, review_type
# 'user' didn't send a message, so no latency can be calculated.
assert not lgtms, lgtms
return -1, lgtms, models.AccountStatsBase.IGNORED
def yield_people_issue_to_update(day_to_process, issues, messages_looked_up):
"""Yields all the combinations of user-day-issue that needs to be updated.
Arguments:
- issues: set() of all the Issue touched.
- messages_looked_up: list of one int to count the number of Message looked
up.
Yields:
- tuple user, day, issue_id, latency, lgtms, review_type.
"""
assert isinstance(day_to_process, datetime.datetime), day_to_process
assert not issues and isinstance(issues, set), issues
assert [0] == messages_looked_up, messages_looked_up
day_to_process_date = day_to_process.date()
# Cache people that are valid accounts or not to reduce datastore lookups.
people_caches = {'fake': set(), 'real': set()}
# dict((user, day) -> set(issue_id)) mapping of
# the AccountStatsDay that will need to be recalculated.
need_to_update = {}
# TODO(maruel): Use asynchronous programming to start moving on to the next
# issue right away. This means creating our own Future instances.
cursor = None
while True:
query = models.Message.query(
models.Message.date >= day_to_process,
default_options=ndb.QueryOptions(keys_only=True)).order(
models.Message.date)
# Someone sane would ask: why the hell do this? I don't know either but
# that's the only way to not have it throw an exception after 60 seconds.
message_keys, cursor, more = query.fetch_page(100, start_cursor=cursor)
if not message_keys:
# We're done, no more cursor.
break
for message_key in message_keys:
# messages_looked_up may be overcounted, as the messages on the next day
# on issues already processed will be accepted as valid, until a new issue
# is found.
messages_looked_up[0] += 1
issue_key = message_key.parent()
issue_id = issue_key.id()
if issue_id in issues:
# This issue was already processed.
continue
# Aggressively fetch data concurrently.
message_future = message_key.get_async()
issue_future = issue_key.get_async()
messages_future = models.Message.query(ancestor=issue_key).fetch_async(
batch_size=1000)
if message_future.get_result().date.date() > day_to_process_date:
# Now on the next day. It is important to stop, especially when looking
# at very old CLs.
messages_looked_up[0] -= 1
cursor = None
break
# Make sure to not process this issue a second time.
issues.add(issue_id)
issue = issue_future.get_result()
# Sort manually instead of using .order('date') to save one index. Strips
# off any Message after day_to_process.
messages = sorted(
(m for m in messages_future.get_result()
if m.date.date() <= day_to_process_date),
key=lambda x: x.date)
# Updates the dict of the people-day pairs that will need to be updated.
issue_owner = issue.owner.email()
# Ignore issue.reviewers since it can change over time. Sadly m.recipients
# also contains people cc'ed so take care of these manually.
people_to_consider = set(m.sender for m in messages)
people_to_consider.add(issue_owner)
for m in messages:
for r in m.recipients:
if (any(n.sender == r for n in messages) or
r in issue.reviewers or
r not in issue.cc):
people_to_consider.add(r)
# 'issue_owner' is by definition a real account. Save one datastore
# lookup.
people_caches['real'].add(issue_owner)
for user in figure_out_real_accounts(people_to_consider, people_caches):
message_index, drive_by = search_relevant_first_email_for_user(
issue_owner, messages, user, people_caches)
if (message_index == None or
( drive_by and
messages[message_index].sender == user and
not any(m.sender == issue_owner
for m in messages[:message_index]))):
# There's no important message, calculate differently by using the
# issue creation date.
start = issue.created
else:
start = messages[message_index].date
# Note that start != day_to_process_date
start_str = str(start.date())
user_issue_set = need_to_update.setdefault((user, start_str), set())
if not issue_id in user_issue_set:
user_issue_set.add(issue_id)
latency, lgtms, review_type = process_issue(
start, day_to_process_date, message_index, drive_by, issue_owner,
messages, user)
if review_type is None:
# process_issue() determined there is nothing to update.
continue
yield user, start_str, issue_id, latency, lgtms, review_type
if not cursor:
break
@deco.task_queue_required('update-stats')
def task_update_stats(request):
"""Dispatches the relevant task to execute.
Can dispatch either update_daily_stats, update_monthly_stats or
update_rolling_stats.
"""
tasks = json.loads(request.POST.get('tasks'))
date_str = request.POST.get('date')
cursor = ndb.Cursor(urlsafe=request.POST.get('cursor'))
countdown = 15
if not tasks:
msg = 'Nothing to execute!?'
logging.warning(msg)
out = HttpTextResponse(msg)
else:
# Dispatch the task to execute.
task = tasks.pop(0)
logging.info('Running %s.', task)
if task.count('-') == 2:
out, cursor = update_daily_stats(
cursor, datetime.datetime.strptime(task, DATE_FORMAT))
elif task == 'monthly':
# The only reason day is used is in case a task queue spills over the next
# day.
day = datetime.datetime.strptime(date_str, DATE_FORMAT)
out, cursor = update_monthly_stats(cursor, day)
elif task == '30':
yesterday = (
datetime.datetime.strptime(date_str, DATE_FORMAT)
- datetime.timedelta(days=1)).date()
out, cursor = update_rolling_stats(cursor, yesterday)
else:
msg = 'Unknown task %s, ignoring.' % task
cursor = ''
logging.error(msg)
out = HttpTextResponse(msg)
if cursor:
# Not done yet!
tasks.insert(0, task)
countdown = 0
if out.status_code == 200 and tasks:
logging.info('%d tasks to go!\n%s', len(tasks), ', '.join(tasks))
# Space out the task queue execution by 15s to reduce the risk of
# datastore inconsistency to get in the way, since no transaction is used.
# This means to process a full month, it'll include 31*15s = 7:45 minutes
# delay. 15s is not a lot but we are in an hurry!
taskqueue.add(
url=reverse(task_update_stats),
params={
'tasks': json.dumps(tasks),
'date': date_str,
'cursor': cursor.urlsafe() if cursor else ''},
queue_name='update-stats',
countdown=countdown)
return out
def update_daily_stats(cursor, day_to_process):
"""Updates the statistics about every reviewer for the day.
Note that joe@google != joe@chromium, so make sure to always review with the
right email address or your stats will suffer.
The goal here is:
- detect all the active reviewers in the past day.
- for each of them, update their statistics for the past day.
There can be thousands of CLs modified in a single day so throughput
efficiency is important here, as it has only 10 minutes to complete.
"""
start = time.time()
# Look at all messages sent in the day. The issues associated to these
# messages are the issues we care about.
issues = set()
# Use a list so it can be modified inside the generator.
messages_looked_up = [0]
total = 0
try:
chunk_size = 10
max_futures = 200
futures = []
items = []
for packet in yield_people_issue_to_update(
day_to_process, issues, messages_looked_up):
user, day, issue_id, latency, lgtms, review_type = packet
account_key = ndb.Key('Account', models.Account.get_id_for_email(user))
found = False
for item in items:
# A user could touch multiple issues in a single day.
if item.key.id() == day and item.key.parent() == account_key:
found = True
break
else:
# Find the object and grab it. Do not use get_or_insert() to save a
# transaction and double-write.
item = models.AccountStatsDay.get_by_id(
day, parent=account_key, use_cache=False)
if not item:
# Create a new one.
item = models.AccountStatsDay(id=day, parent=account_key)
if issue_id in item.issues:
# It was already there, update.
i = item.issues.index(issue_id)
if (item.latencies[i] == latency and
item.lgtms[i] == lgtms and
item.review_types[i] == review_type):
# Was already calculated, skip.
continue
# Make sure to not "downgrade" the object.
if item.lgtms[i] > lgtms:
# Never lower the number of lgtms.
continue
if item.latencies[i] >= 0 and latency == -1:
# Unchanged or "lower priority", no need to store again.
continue
if (item.latencies[i] >= 0 and latency >= 0 and
item.latencies[i] != latency):
# That's rare, the new calculated latency doesn't match the previously
# calculated latency. File an error but let it go.
logging.error(
'New calculated latency doesn\'t match previously calculated '
'value.\n%s != %s\nItem %d in:\n%s',
item.latencies[i], latency, i, item)
item.latencies[i] = latency
item.lgtms[i] = lgtms
item.review_types[i] = review_type
else:
# TODO(maruel): Sort?
item.issues.append(issue_id)
item.latencies.append(latency)
item.lgtms.append(lgtms)
item.review_types.append(review_type)
if not found:
items.append(item)
if len(items) == chunk_size:
futures.extend(ndb.put_multi_async(items, use_cache=False))
total += chunk_size
items = []
futures = [f for f in futures if not f.done()]
while len(futures) > max_futures:
# Slow down to limit memory usage.
ndb.Future.wait_any(futures)
futures = [f for f in futures if not f.done()]
if items:
futures.extend(ndb.put_multi_async(items, use_cache=False))
total += len(items)
ndb.Future.wait_all(futures)
result = 200
except (db.Timeout, DeadlineExceededError):
result = 500
out = (
'%s\n'
'%d messages\n'
'%d issues\n'
'Updated %d items\n'
'In %.1fs\n') % (
day_to_process.date(), messages_looked_up[0], len(issues),
total, time.time() - start)
if result == 200:
logging.info(out)
else:
logging.error(out)
return HttpTextResponse(out, status=result), ''
def update_rolling_stats(cursor, reference_day):
"""Looks at all accounts and recreates all the rolling 30 days
AccountStatsMulti summaries.
Note that during the update, the leaderboard will be inconsistent.
Only do 1000 accounts at a time since there's a memory leak in the function.
"""
assert isinstance(cursor, ndb.Cursor), cursor
assert isinstance(reference_day, datetime.date), reference_day
start = time.time()
total = 0
total_deleted = 0
try:
# Process *all* the accounts.
duration = '30'
chunk_size = 10
futures = []
items = []
to_delete = []
accounts = 0
while True:
query = models.Account.query()
account_keys, next_cursor, more = query.fetch_page(
100, keys_only=True, start_cursor=cursor)
if not account_keys:
# We're done, no more cursor.
next_cursor = None
break
a_key = ''
for a_key in account_keys:
accounts += 1
# TODO(maruel): If date of each issue was saved in the entity, this
# would not be necessary, assuming the entity doesn't become itself
# corrupted.
rolling_future = models.AccountStatsMulti.get_by_id_async(
duration, parent=a_key)
days = [
str(reference_day - datetime.timedelta(days=i))
for i in xrange(int(duration))
]
days_keys = [
ndb.Key(models.AccountStatsDay, d, parent=a_key) for d in days
]
valid_days = filter(None, ndb.get_multi(days_keys))
if not valid_days:
rolling = rolling_future.get_result()
if rolling:
to_delete.append(rolling.key)
if len(to_delete) == chunk_size:
futures.extend(ndb.delete_multi_async(to_delete))
total_deleted += chunk_size
to_delete = []
futures = [f for f in futures if not f.done()]
continue
# Always override the content.
rolling = models.AccountStatsMulti(id=duration, parent=a_key)
# Sum all the daily instances into the rolling summary. Always start
# over because it's not just adding data, it's also removing data from
# the day that got excluded from the rolling summary.
if models.sum_account_statistics(rolling, valid_days):
items.append(rolling)
if len(items) == chunk_size:
futures.extend(ndb.put_multi_async(items))
total += chunk_size
items = []
futures = [f for f in futures if not f.done()]
if accounts == 1000 or (time.time() - start) > 300:
# Limit memory usage.
logging.info('%d accounts, last was %s', accounts, a_key.id()[1:-1])
break
if items:
futures.extend(ndb.put_multi_async(items))
total += len(items)
if to_delete:
futures.extend(ndb.delete_multi_async(to_delete))
total_deleted += len(to_delete)
ndb.Future.wait_all(futures)
result = 200
except (db.Timeout, DeadlineExceededError):
result = 500
out = '%s\nLooked up %d accounts\nStored %d items\nDeleted %d\nIn %.1fs\n' % (
reference_day, accounts, total, total_deleted, time.time() - start)
if result == 200:
logging.info(out)
else:
logging.error(out)
return HttpTextResponse(out, status=result), next_cursor
def update_monthly_stats(cursor, day_to_process):
"""Looks at all AccountStatsDay instance updated on that day and updates the
corresponding AccountStatsMulti instance.
This taskqueue updates all the corresponding monthly AccountStatsMulti
summaries by looking at all AccountStatsDay.modified.
"""
today = datetime.datetime.utcnow().date()
start = time.time()
total = 0
skipped = 0
try:
# The biggest problem here is not time but memory usage so limit the number
# of ongoing futures.
max_futures = 200
futures = []
days_stats_fetched = 0
yielded = 0
q = models.AccountStatsDay.query(
models.AccountStatsDay.modified >= day_to_process,
default_options=ndb.QueryOptions(keys_only=True))
months_to_regenerate = set()
while True:
day_stats_keys, cursor, more = q.fetch_page(100, start_cursor=cursor)
if not day_stats_keys:
cursor = None
break
days_stats_fetched += len(day_stats_keys)
if not (days_stats_fetched % 1000):
logging.info('Scanned %d AccountStatsDay.', days_stats_fetched)
# Create a batch of items to process.
batch = []
for key in day_stats_keys:
month_name = key.id().rsplit('-', 1)[0]
account_name = key.parent().id()
lookup_key = '%s-%s' % (month_name, account_name)
if not lookup_key in months_to_regenerate:
batch.append((month_name, account_name))
months_to_regenerate.add(lookup_key)
for month_name, account_id in batch:
yielded += 1
if not (yielded % 1000):
logging.info(
'%d items done, %d skipped, %d yielded %d futures.',
total, skipped, yielded, len(futures))
account_key = ndb.Key(models.Account, account_id)
monthly = models.AccountStatsMulti.get_by_id(
month_name, parent=account_key, use_cache=False)
if not monthly:
# Create a new one.
monthly = models.AccountStatsMulti(id=month_name, parent=account_key)
elif monthly.modified.date() == today:
# It was modified today, skip it.
skipped += 1
continue
days_in_month = calendar.monthrange(*map(int, month_name.split('-')))[1]
days_name = [
month_name + '-%02d' % (i + 1) for i in range(days_in_month)
]
days_keys = [
ndb.Key(models.AccountStatsDay, d, parent=account_key)
for d in days_name
]
days = [d for d in ndb.get_multi(days_keys, use_cache=False) if d]
assert days, (month_name, account_id)
if models.sum_account_statistics(monthly, days):
futures.extend(ndb.put_multi_async([monthly], use_cache=False))
total += 1
while len(futures) > max_futures:
# Slow down to limit memory usage.
ndb.Future.wait_any(futures)
futures = [f for f in futures if not f.done()]
else:
skipped += 1
if (time.time() - start) > 400:
break
ndb.Future.wait_all(futures)
result = 200
except (db.Timeout, DeadlineExceededError) as e:
logging.error(str(e))
result = 500
out = '%s\nStored %d items\nSkipped %d\nIn %.1fs\n' % (
day_to_process.date(), total, skipped, time.time() - start)
if result == 200:
logging.info(out)
else:
logging.error(out)
return HttpTextResponse(out, status=result), cursor
@deco.task_queue_required('refresh-all-stats-score')
def task_refresh_all_stats_score(request):
"""Updates all the scores or destroy them all.
- Updating score is necessary when models.compute_score() is changed.
- Destroying the instances is necessary if
search_relevant_first_email_for_user() or process_issue() are modified.
"""
start = time.time()
cls_name = request.POST.get('cls') or 'Day'
destroy = int(request.POST.get('destroy', '0'))
cursor = datastore_query.Cursor(urlsafe=request.POST.get('cursor'))
task_count = int(request.POST.get('task_count', '0'))
assert cls_name in ('Day', 'Multi'), cls_name
cls = (
models.AccountStatsDay
if cls_name == 'Day' else models.AccountStatsMulti)
# Task queues are given 10 minutes. Do it in 9 minutes chunks to protect
# against most timeout conditions.
timeout = 540
updated = 0
skipped = 0
try:
futures = []
chunk_size = 10
items = []
more = True
if destroy:
options = ndb.QueryOptions(keys_only=True)
else:
options = ndb.QueryOptions()
while more:
batch, cursor, more = cls.query(default_options=options).fetch_page(
20, start_cursor=cursor)
if destroy:
futures.extend(ndb.delete_multi_async(batch))
updated += len(batch)
else:
for i in batch:
score = models.compute_score(i)
if i.score != score:
items.append(i)
if len(items) == chunk_size:
futures.extend(ndb.put_multi_async(items))
updated += chunk_size
items = []
futures = [f for f in futures if not f.done()]
else:
skipped += 1
if time.time() - start >= timeout:
break
if items:
futures.extend(ndb.put_multi_async(items))
updated += chunk_size
ndb.Future.wait_all(futures)
if not more and cls_name == 'Day':
# Move to the Multi instances.
more = True
cls_name = 'Multi'
cursor = datastore_query.Cursor()
if more:
taskqueue.add(
url=reverse(task_refresh_all_stats_score),
params={
'cls': cls_name,
'cursor': cursor.urlsafe() if cursor else '',
'destroy': str(destroy),
'task_count': str(task_count+1),
},
queue_name='refresh-all-stats-score')
result = 200
except (db.Timeout, DeadlineExceededError):
result = 500
out = 'Index: %d\nType = %s\nStored %d items\nSkipped %d\nIn %.1fs\n' % (
task_count, cls.__name__, updated, skipped, time.time() - start)
if result == 200:
logging.info(out)
else:
logging.error(out)
return HttpTextResponse(out, status=result)
def quarter_to_months(when):
"""Manually handles the forms 'YYYY' or 'YYYY-QX'."""
today = datetime.datetime.utcnow().date()
if when.isdigit() and 2008 <= int(when) <= today.year:
# Select the whole year.
year = int(when)
if year == today.year:
out = ['%04d-%02d' % (year, i + 1) for i in range(today.month)]
else:
out = ['%04d-%02d' % (year, i + 1) for i in range(12)]
else:
quarter = re.match(r'^(\d\d\d\d-)[qQ]([1-4])$', when)
if not quarter:
return None
prefix = quarter.group(1)
# Convert the quarter into 3 months group.
base = (int(quarter.group(2)) - 1) * 3 + 1
out = ['%s%02d' % (prefix, i) for i in range(base, base+3)]
logging.info('Expanded to %s' % ', '.join(out))
return out
def show_user_impl(user, when):
months = None
if not models.verify_account_statistics_name(when):
months = quarter_to_months(when)
if not months:
return None
account_key = ndb.Key(models.Account, models.Account.get_id_for_email(user))
# Determines which entity class should be loaded by the number of '-'.
cls = (
models.AccountStatsDay
if when.count('-') == 2 else models.AccountStatsMulti)
if months:
# Normalize to 'q'.
when = when.lower()
# Loads the stats for the 3 months and merge them.
keys = [ndb.Key(cls, i, parent=account_key) for i in months]
values = filter(None, ndb.get_multi(keys))
stats = cls(id=when, parent=account_key)
models.sum_account_statistics(stats, values)
else:
stats = cls.get_by_id(when, parent=account_key)
if not stats:
# It's a valid date or rolling summary key, so if there's nothing, just
# return the fact there's no data with an empty object.
stats = cls(id=when, parent=account_key)
return stats
@deco.user_key_required
def show_user_stats(request, when):
stats = show_user_impl(request.user_to_show.email(), when)
if not stats:
return HttpResponseNotFound()
incoming = [
{
'issue': stats.issues[i],
'latency': stats.latencies[i],
'lgtms': stats.lgtms[i],
'review_type':
models.AccountStatsBase.REVIEW_TYPES[stats.review_types[i]],
} for i in xrange(len(stats.issues))
if stats.review_types[i] != models.AccountStatsBase.OUTGOING
]
outgoing = [
{
'issue': stats.issues[i],
'lgtms': stats.lgtms[i],
} for i in xrange(len(stats.issues))
if stats.review_types[i] == models.AccountStatsBase.OUTGOING
]
return respond(
request,
'user_stats.html',
{
'viewed_account': request.user_to_show,
'incoming': incoming,
'outgoing': outgoing,
'stats': stats,
'when': when,
})
@deco.json_response
@deco.user_key_required
def show_user_stats_json(request, when):
stats = show_user_impl(request.user_to_show.email(), when)
if not stats:
return {deco.STATUS_CODE: 404}
return stats.to_dict()
def leaderboard_impl(when, limit):
"""Returns the leaderboard for this Rietveld instance on |when|.
It returns the list of the reviewers sorted by their score for
the past weeks, a specific day or month or a quarter.
"""
when = when.lower()
months = None
if not models.verify_account_statistics_name(when):
months = quarter_to_months(when)
if not months:
return None
cls = (
models.AccountStatsDay
if when.count('-') == 2 else models.AccountStatsMulti)
if months:
# Use the IN operator to simultaneously select the 3 months.
results = cls.query(cls.name.IN(months)).order(cls.score).fetch(limit)
# Then merge all the results accordingly.
tops = {}
for i in results:
tops.setdefault(i.user, []).append(i)
for key, values in tops.iteritems():
values.sort(key=lambda x: x.name)
out = models.AccountStatsMulti(id=when, parent=values[0].key.parent())
models.sum_account_statistics(out, values)
tops[key] = out
tops = sorted(tops.itervalues(), key=lambda x: x.score)
else:
# Grabs the pre-calculated entities or daily entity.
tops = cls.query(cls.name == when).order(cls.score).fetch(limit)
# Remove anyone with a None score.
return [t for t in tops if t.score is not None]
def stats_to_dict(t):
"""Adds value 'user'.
It is a meta property so it is not included in to_dict() by default.
"""
o = t.to_dict()
o['user'] = t.user
return o
@deco.json_response
def leaderboard_json(request, when):
limit = _clean_int(request.GET.get('limit'), 300, 1, 1000)
data = leaderboard_impl(when, limit)
if data is None:
return {deco.STATUS_CODE: 404}
return [stats_to_dict(t) for t in data]
def leaderboard(request, when):
"""Prints the leaderboard for this Rietveld instance."""
limit = _clean_int(request.GET.get('limit'), 300, 1, 1000)
data = leaderboard_impl(when, limit)
if data is None:
return HttpResponseNotFound()
tops = []
shame = []
for i in data:
if i.score == models.AccountStatsBase.NULL_SCORE:
shame.append(i)
else:
tops.append(i)
return respond(
request, 'leaderboard.html', {'tops': tops, 'shame': shame, 'when': when})
|
rietveld-codereview/rietveld
|
codereview/views.py
|
Python
|
apache-2.0
| 173,739
|
from django import forms
class CartForm(forms.Form):
email = forms.CharField(max_length=100, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':""}))
name = forms.CharField(max_length=100, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':""}))
address_name= forms.CharField(max_length=100, widget=forms.TextInput(
attrs={'class':"autosave", 'placeholder':""}))
address1 = forms.CharField(max_length=100, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':"address line 1"}))
address2 = forms.CharField(max_length=100, widget=forms.TextInput(
attrs={'class':"autosave", 'placeholder':"address line 2"}))
city = forms.CharField(max_length=50, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':"city"}))
state = forms.CharField(max_length=50, widget=forms.TextInput(
attrs={'class':"autosave", 'placeholder':"state/region"}))
postal_code = forms.CharField(max_length=15, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':"zip code/post code"}))
country = forms.CharField(max_length=50, widget=forms.TextInput(
attrs={'class':"autosave required", 'placeholder':"country"}))
class CheckoutForm(forms.Form):
receipt = forms.CharField(widget=forms.Textarea(
attrs={'placeholder': "payment data (and URL if available)",
'rows': "7"
}))
notes = forms.CharField(widget=forms.Textarea(
attrs={'placeholder': "sale details",
'rows': "7"
}))
total_charge = forms.DecimalField(widget=forms.TextInput())
total_discount = forms.DecimalField(widget=forms.TextInput())
total_paid = forms.DecimalField(widget=forms.TextInput())
total_refunded = forms.DecimalField(widget=forms.TextInput())
currency = forms.CharField(max_length=3, widget=forms.TextInput())
|
tomcounsell/Cobra
|
apps/public/views/forms.py
|
Python
|
gpl-2.0
| 2,154
|
import ConfigParser
import logging
from miro import api
from miro import app
from miro.test.framework import MiroTestCase
from miro.test import mock
# define stubs to allow us to use this module as an extension module
def unload():
pass
ext_context = None
def load(context):
global ext_context
ext_context = context
class ExtensionTestBase(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.reset_ext_storage_manager()
def tearDown(self):
self.reset_ext_storage_manager()
MiroTestCase.tearDown(self)
def reset_ext_storage_manager(self):
global ext_context
ext_context = None
def make_extension_config(self):
"""Generate a SafeConfigParser object to use as a test extension."""
config = ConfigParser.SafeConfigParser()
config.add_section('extension')
config.set('extension', 'name', 'Unittest Extension')
config.set('extension', 'version', 'core')
config.set('extension', 'enabled_by_default', 'False')
config.set('extension', 'module', 'miro.test.extensiontest')
return config
def create_extension(self):
"""Write a .miroext file using a SafeConfigParser object."""
config = self.make_extension_config()
self.ext_path, fp = self.make_temp_path_fileobj('.miroext')
config.write(fp)
fp.close()
app.extension_manager.load_extensions()
def load_extension(self):
ext = app.extension_manager.get_extension_by_name(
"Unittest Extension")
app.extension_manager.import_extension(ext)
app.extension_manager.load_extension(ext)
self.storage_manager = ext_context.storage_manager
def unload_extension(self):
ext = app.extension_manager.get_extension_by_name(
"Unittest Extension")
app.extension_manager.unload_extension(ext)
class ExtensionStorageTest(ExtensionTestBase):
# test extensions storing data
def setUp(self):
ExtensionTestBase.setUp(self)
# load our extension
self.create_extension()
self.load_extension()
def check_simple_set(self, key, value):
self.storage_manager.set_value(key, value)
stored_value = self.storage_manager.get_value(key)
if stored_value != value:
raise AssertionError("Error storing %s: set %r, got %r" % (key,
value, stored_value))
def test_simple_store(self):
self.check_simple_set('a', 'foo')
self.check_simple_set('b', 200)
self.check_simple_set('c', 3.0)
# try some unicode values
self.check_simple_set(u'd', [1, 2, 'three'])
self.check_simple_set(u'e\u03a0', {'key': 'value'})
# try clearing a value
self.storage_manager.clear_value("a")
self.assertRaises(KeyError, self.storage_manager.get_value, 'a')
# test key_exists()
self.assertEquals(self.storage_manager.key_exists('b'), True)
self.assertEquals(self.storage_manager.key_exists('c'), True)
self.assertEquals(self.storage_manager.key_exists('z'), False)
self.assertEquals(self.storage_manager.key_exists('a'), False)
def test_sqlite_store(self):
# test that the sqlite connection works
conn = self.storage_manager.get_sqlite_connection()
# all we need to test is if we have a real sqlite connection. Let's
# assume that if we can run a few SQL commands, we're good
cursor = conn.cursor()
cursor.execute("CREATE TABLE foo(a, b)")
cursor.execute("INSERT INTO foo (a, b) VALUES (?, ?)", (1, 'two'))
cursor.execute("INSERT INTO foo (a, b) VALUES (?, ?)", (3, 'four'))
cursor.execute("SELECT a, b FROM foo ORDER BY a ASC")
self.assertEquals(cursor.fetchall(), [(1, 'two'), (3, 'four')])
class ExtensionHookTest(ExtensionTestBase):
def setUp(self):
ExtensionTestBase.setUp(self)
# Make a Mock object to use as a hook function. Nest inside another
# Mock object to test hook parser better
global hook_holder
hook_holder = mock.Mock()
hook_holder.hook_func = mock.Mock()
self.mock_hook = hook_holder.hook_func
# make our extension
self.create_extension()
def make_extension_config(self):
config = ExtensionTestBase.make_extension_config(self)
# add hooks
config.add_section('hooks')
config.set('hooks', 'test_hook',
'miro.test.extensiontest:hook_holder.hook_func')
return config
def test_hook_invoke(self):
# test calling hook functions
self.load_extension()
# setup our mock function to return a value
self.mock_hook.return_value = 123
# invoke the hook
results1 = api.hook_invoke('test_hook', 1, 2, foo=3)
results2 = api.hook_invoke('test_hook', 4, 5, bar=6)
# check thath the function was called and the results are correct
self.assertEquals(self.mock_hook.call_count, 2)
self.assertEquals(self.mock_hook.call_args_list[0],
((1, 2), {'foo': 3}))
self.assertEquals(self.mock_hook.call_args_list[1],
((4, 5), {'bar': 6}))
self.assertEquals(results1, [123])
self.assertEquals(results2, [123])
def test_hook_exception(self):
# test hook functions raising exceptions
self.load_extension()
self.log_filter.reset_records()
# setup our mock function to throw an error
self.mock_hook.side_effect = ValueError("Bad Value")
# invoke the hook
results = api.hook_invoke('test_hook')
# check that the error isn't included in the results and that we
# logged the exception
self.log_filter.check_record_count(1)
self.log_filter.check_record_level(logging.ERROR)
self.assertEquals(results, [])
def test_unloaded_extension(self):
# check that unloaded extensions don't provide hooks
# before we load our extension, the hook shouldn't be registered
# invoking the hook shouldn't do anything now
results = api.hook_invoke('test_hook')
self.assertEquals(self.mock_hook.call_count, 0)
self.assertEquals(results, [])
# if we load, then unload our extension, the hook shouldn't be
# registered
self.load_extension()
self.unload_extensions()
results = api.hook_invoke('test_hook')
self.assertEquals(self.mock_hook.call_count, 0)
self.assertEquals(results, [])
|
debugger06/MiroX
|
lib/test/extensiontest.py
|
Python
|
gpl-2.0
| 6,603
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2008-2008 凯源吕鑫 lvxin@gmail.com <basic chart data>
# 维智众源 oldrev@gmail.com <states data>
# Copyright (C) 2012-2012 南京盈通 ccdos@intoerp.com <small business chart>
# Copyright (C) 2008-now 开阖软件 jeff@osbzr.com < PM and LTS >
# Copyright (C) 2018-now jeffery9@gmail.com
{
'name': 'China - Accounting',
'version': '1.8',
'category': 'Localization',
'author': 'www.openerp-china.org',
'maintainer': 'jeff@osbzr.com',
'website': 'http://openerp-china.org',
'description': """
Includes the following data for the Chinese localization
========================================================
Account Type/科目类型
State Data/省份数据
""",
'depends': ['base', 'account', 'l10n_multilang'],
'data': [
'data/res_country_state_data.xml',
'data/account_account_type_data.xml',
'data/account_tax_group_data.xml',
],
}
|
t3dev/odoo
|
addons/l10n_cn/__manifest__.py
|
Python
|
gpl-3.0
| 1,061
|
import pymel.core as pm
import os
import metautil.json_util as json_u
class Follicle():
'''Creates a hair Follicle in Maya and attaches it to a mesh.'''
def __init__(self, follicle):
if follicle and not follicle.listRelatives(shapes=True) and type(follicle) == pm.nt.Follicle:
self.follicle = follicle
self.follicle_transform = self.follicle.getParent()
elif follicle and follicle.listRelatives(shapes=True) and type(follicle.getShape()) == pm.nt.Follicle:
self.follicle = follicle.getShape()
self.follicle_transform = self.follicle
else:
raise ValueError('The node is not a follicle.')
self.u = self.follicle.parameterU.get()
self.v = self.follicle.parameterV.get()
self.uv = [self.follicle.parameterU.get(), self.follicle.parameterV.get()]
@classmethod
def create(cls, mesh, obj_position=None, follicle_name='follicle_constraint', uv=[]):
'''
Creates a Follicle attached to a mesh using an object position or the uv position
:param transform mesh: A tranform mesh in which you are attaching a follicle
:param transform obj_position: gets the position of the object to set as the UV position for the follicle
:param string follicle_name: name of the follice transform and follicle shape
:param float list uv: uv coordinates
:return object follicle: Return the shape node of the created follicle
'''
mesh_shape = mesh.getShape()
if not mesh_shape:
raise StandardError('You can only attach the follicle to an object that has a shape.')
if obj_position and type(obj_position) == pm.nt.Transform:
obj_position = pm.xform(obj_position, q=True, ws=True, t=True)
# else:
# raise StandardError('The object you are using for a position needs to be a transform.')
mesh_shape = None
if mesh and type(mesh) == pm.nt.Transform:
mesh_shape = mesh.getShape()
if isinstance(mesh_shape, pm.nt.Shape):
follicle = pm.createNode('follicle', n=follicle_name + '_shape')
follicle_parent = follicle.getParent()
follicle_parent.rename(follicle_name)
follicle.outTranslate >> follicle_parent.translate
follicle.outRotate >> follicle_parent.rotate
mesh_shape.worldMatrix[0] >> follicle.inputWorldMatrix
mesh_shape.worldMesh >> follicle.inputMesh
if not uv:
closest_node = pm.createNode('closestPointOnMesh', n='cpm_' + follicle_name)
mesh_shape.outMesh >> closest_node.inMesh
mesh_shape.worldMatrix[0] >> closest_node.inputMatrix
closest_node.inPositionX.set(obj_position[0])
closest_node.inPositionY.set(obj_position[1])
closest_node.inPositionZ.set(obj_position[2])
uv = [closest_node.parameterU.get(), closest_node.parameterV.get()]
pm.delete(closest_node)
follicle.parameterU.set(uv[0])
follicle.parameterV.set(uv[1])
return cls(follicle)
def __str__(self):
'''
Will return the string name rather than the class instance
'''
return self.follicle.__str__()
def __getattr__(self, attrname):
'''Returns the Pymel attributes'''
if attrname == 'pynode':
raise AttributeError("this instance of {0} has no pynode".format(self.__class__.__name__))
return getattr(self.follicle, attrname)
def __melobject__(self):
return self.follicle.__melobject__()
def __repr__(self):
'''Returns the Pymel string rather than the class instance'''
return self.follicle.__repr__()
class ExportFolliclePositions():
'''Exports Follicle data to a Json file. This class deals with multiple Follicles.'''
def __init__(self, data={}):
self.data = data
@classmethod
def create(cls, mesh, joints):
data = {}
if mesh and type(mesh) == pm.nt.Transform:
pm.select(joints)
locator = None
if pm.objExists('joint_position_loc'):
locator = pm.PyNode('joint_position_loc')
else:
locator = pm.spaceLocator(n='joint_position_loc')
if not pm.objExists('follicle_grp'):
follicle_grp = pm.group(em=True, n='follicle_grp')
for joint in joints:
follicles = []
follicle = None
name = None
pm.delete(pm.pointConstraint(joint, locator, w=1, mo=False))
name = str(joint.replace(str(joint), 'follicle_'+str(joint)))
follicle = Follicle.create(mesh=mesh, obj_position=locator, follicle_name=name)
follicles.append(follicle)
pm.parentConstraint(follicle.follicle_transform, joint, w=True, mo=False)
pm.parent(follicle.follicle_transform, follicle_grp)
data[str(joint)] = {}
data[str(joint)]['follicle'] = str(follicle.follicle_transform)
data[str(joint)]['uv'] = (follicle.parameterU.get(), follicle.parameterV.get())
data[str(joint)]['joint_ws_position'] = pm.xform(joint, q=True, ws=True, t=True)
pm.delete(locator)
pm.select(cl=True)
return cls(data)
def export(self, path, file_name):
''' Export follicle name, uv position, and joint ws position to a JSON File'''
if self.data:
if not os.path.isdir(path):
os.makedirs(path)
json_u.write_json(path, file_name, self.data)
return os.path.join(path, file_name)
|
deathglitch/metarigging
|
python/model/follicle.py
|
Python
|
mit
| 4,928
|
"""
This little helper takes Lebedev point and weight data from [1] and produces JSON files.
[1]
https://people.sc.fsu.edu/~jburkardt/datasets/sphere_lebedev_rule/sphere_lebedev_rule.html
"""
import os
import re
import numpy as np
def read(filename):
data = np.loadtxt(filename)
azimuthal_polar = data[:, :2] / 180.0
weights = data[:, 2]
return azimuthal_polar, weights
def chunk_data(weights):
# divide the weight vector into chunks of 6, 8, 12, 24, or 48
chunks = []
k = 0
ref_weight = 0.0
tol = 1.0e-12
while k < len(weights):
if len(chunks) > 0 and abs(weights[k] - ref_weight) < tol:
chunks[-1].append(k)
else:
chunks.append([k])
ref_weight = weights[k]
k += 1
return chunks
def sort_into_symmetry_classes(weights, azimuthal_polar):
data = {"a1": [], "a2": [], "a3": [], "pq0": [], "llm": [], "rsw": []}
for c in chunks:
if len(c) == 6:
data["a1"].append([weights[c[0]]])
elif len(c) == 12:
data["a2"].append([weights[c[0]]])
elif len(c) == 8:
data["a3"].append([weights[c[0]]])
elif len(c) == 24:
if any(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12):
# polar == pi/2 => X == [p, q, 0].
# Find the smallest positive phi that's paired with `polar ==
# pi/2`; the symmetry is fully characterized by that phi.
k = np.where(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12)[0]
assert len(k) == 8
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
azimuthal_min = np.min(azimuthal_polar[c, 0][k][k2])
data["pq0"].append([weights[c[0]], azimuthal_min])
else:
# X = [l, l, m].
# In this case, there must by exactly two phi with the value
# pi/4. Take the value of the smaller corresponding `polar`;
# all points are characterized by it.
k = np.where(abs(azimuthal_polar[c, 0] - 0.25) < 1.0e-12)[0]
assert len(k) == 2
k2 = np.where(azimuthal_polar[c, 1][k] > 0.0)[0]
polar_min = np.min(azimuthal_polar[c, 1][k][k2])
data["llm"].append([weights[c[0]], polar_min])
else:
assert len(c) == 48
# This most general symmetry is characterized by two angles; one
# could take any two here.
# To make things easier later on, out of the 6 smallest polar
# angle, take the one with the smallest positive phi.
min_polar = np.min(azimuthal_polar[c, 1])
k = np.where(abs(azimuthal_polar[c, 1] - min_polar) < 1.0e-12)[0]
k2 = np.where(azimuthal_polar[c, 0][k] > 0.0)[0]
min_azimuthal = np.min(azimuthal_polar[c, 0][k][k2])
data["rsw"].append([weights[c[0]], min_azimuthal, min_polar])
return data
def write_json(filename, d):
# Getting floats in scientific notation in python.json is almost impossible, so do
# some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.
class PrettyFloat(float):
def __repr__(self):
return f"{self:.16e}"
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(obj)
elif isinstance(obj, dict):
return {k: pretty_floats(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
with open(filename, "w") as f:
string = (
pretty_floats(d)
.__repr__()
.replace("'", '"')
.replace("{", "{\n ")
.replace("[[", "[\n [")
.replace("], [", "],\n [")
.replace(']], "', ']\n ],\n "')
.replace("}", "\n}")
.replace("]]", "]\n ]")
)
f.write(string)
return
if __name__ == "__main__":
directory = "data/"
for k, file in enumerate(os.listdir(directory)):
filename = os.fsdecode(file)
m = re.match("lebedev_([0-9]+)\\.txt", filename)
degree = int(m.group(1))
azimuthal_polar, weights = read(os.path.join("data", filename))
chunks = chunk_data(weights)
data = sort_into_symmetry_classes(weights, azimuthal_polar)
delete_list = []
for key in data:
if len(data[key]) == 0:
delete_list.append(key)
for key in delete_list:
data.pop(key)
data["degree"] = degree
write_json(f"lebedev_{degree:03d}.json", data)
|
nschloe/quadpy
|
tools/lebedev/import_lebedev.py
|
Python
|
mit
| 4,694
|
# -*- coding: utf-8 -*-
"""
define some help functions
"""
import io
import csv
import datetime
import config
import os
def format_stocks(stocks_result):
input_stream = io.StringIO(stocks_result)
csv_f = csv.reader(input_stream)
header_str = '{:<6} {:\u3000<5} {:\u3000<5} {:<7} {:<10} {:<10}'
format_str = '{:<10} {:\u3000<5} {:\u3000<5} {:<10} {:<14} {:<10}'
i = 0
for row in csv_f:
if i == 0:
print(header_str.format(*row))
else:
print(format_str.format(*row))
i += 1
def save_csv(stocks_result, date=None):
if not date:
date = datetime.date.today().strftime('%Y-%m-%d')
filename = 'stock_{}.csv'.format(date)
_check_folder(config.GURYUN_DATA_PATH)
filepath = os.path.join(config.GURYUN_DATA_PATH, filename)
input_stream = io.StringIO(stocks_result)
csv_f = csv.reader(input_stream)
with open(filepath, 'w') as f:
writer = csv.writer(f)
writer.writerows(csv_f)
def _check_folder(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
|
guryun/guryunSDK
|
utils.py
|
Python
|
mit
| 1,095
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.db import models
from django.utils import timezone
from utils.models import CNPJField
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
import re
class Contato(models.Model):
"""
Uma instância dessa classe representa um contato.
O método '__unicode__' Retorna o nome.
O método 'contato_ent' Retorna as entidades do contato.
A 'class Meta' Ordena os dados pelo campo 'nome'.
"""
primeiro_nome = models.CharField(_(u'Primeiro nome'), max_length=100, help_text=_(u'ex. João Andrade'))
ultimo_nome = models.CharField(_(u'Último nome'), max_length=45)
email = models.CharField(_(u'E-mail'), max_length=100, blank=True, help_text=_(u'ex. joao@joao.br'))
ativo = models.BooleanField(_(u'Ativo'), default=True)
tel = models.CharField(_(u'Telefone'), max_length=100,
help_text=_(u'ex. Com. (11)2222-2222, Cel. (11)9999-9999, Fax (11)3333-3333, ...'))
documento = models.CharField(max_length=30, null=True, blank=True)
@property
def nome(self):
if self.ultimo_nome:
return '%s %s' % (self.primeiro_nome, self.ultimo_nome or '')
else:
return self.primeiro_nome
# Retorna o nome.
def __unicode__(self):
return self.nome
# Retorna as entidades do contato.
def contato_ent(self):
ident = self.identificacao_set.all()
ent = []
for i in ident:
if i.endereco.entidade not in ent:
ent.append(i.endereco.entidade)
l = [e.sigla for e in ent]
e = ', '.join(l)
return u'%s' % e
contato_ent.short_description = _(u'Entidade')
# Define a ordenação dos dados pelo nome.
class Meta:
ordering = ('primeiro_nome', 'ultimo_nome')
class TipoDetalhe(models.Model):
nome = models.CharField(max_length=40)
def __unicode__(self):
return self.nome
class Meta:
ordering = ('nome',)
class EnderecoDetalhe(models.Model):
endereco = models.ForeignKey('identificacao.Endereco', limit_choices_to={'data_inatividade__isnull': True},
null=True, blank=True)
tipo = models.ForeignKey('identificacao.TipoDetalhe')
complemento = models.TextField()
detalhe = models.ForeignKey('identificacao.EnderecoDetalhe', verbose_name=u'ou Detalhe pai', null=True, blank=True)
ordena = models.CharField(editable=False, max_length=1000, null=True)
mostra_bayface = models.BooleanField(_(u'Mostra no bayface'), help_text=_(u''), default=False)
def save(self, *args, **kwargs):
if self.endereco is None and self.detalhe is None:
return
if self.endereco is not None and self.detalhe is not None:
return
self.ordena = self.__unicode__()
super(EnderecoDetalhe, self).save(*args, **kwargs)
def __unicode__(self):
if self.endereco_id:
return u'%s - %s' % (self.endereco, self.complemento)
else:
return u'%s - %s' % (self.detalhe, self.complemento)
def detalhes(self):
if self.endereco:
return self.complemento
else:
return u'%s - %s' % (self.detalhe.detalhes(), self.complemento)
def entidade(self):
"""
Busca entidade relacionada ao EnderecoDetalhe.
Como a Entidade está relacionada ao Endereço, ele faz a busca nos pais de
EnderecosDetalhes até encontrar um Endereco.
"""
retorno = ''
if self.endereco_id:
# Se tiver endereço, retorno a entidade do endereço
if self.endereco.entidade_id:
retorno = self.endereco.entidade.sigla
elif self.detalhe_id:
# Se pertencer a outro EnderecoDetalhe, tenta navegar para pegar o endereço pai
retorno = self.detalhe.entidade()
return retorno
@property
def end(self):
if self.endereco:
return self.endereco
return self.detalhe.end
class Meta:
ordering = ('ordena', )
# verbose_name = u'Detalhe do endereço'
# verbose_name_plural = u'Detalhes dos endereços'
class Endereco(models.Model):
"""
Uma instância dessa classe representa um endereco de uma identificação.
O método '__unicode__' Retorna os campos 'rua', 'num' e 'compl' (se existir).
A 'class Meta' Define a descrição do modelo (singular e plural), a ordenação dos dados e a unicidade que um
endereço pelos campos 'identificacao', 'rua', 'num', 'compl', 'bairro', 'cidade', 'cep', 'estado' e 'pais'.
"""
entidade = models.ForeignKey('identificacao.Entidade', verbose_name=_(u'Entidade'))
rua = models.CharField(_(u'Logradouro'), max_length=100, help_text=_(u'ex. R. Dr. Ovídio Pires de Campos'))
num = models.IntegerField(_(u'Num.'), help_text=_(u'ex. 215'), null=True, blank=True)
compl = models.CharField(_(u'Complemento'), max_length=100, blank=True,
help_text=_(u'ex. 2. andar - Prédio da PRODESP'))
bairro = models.CharField(_(u'Bairro'), max_length=50, blank=True, help_text=_(u'ex. Cerqueira César'))
cidade = models.CharField(_(u'Cidade'), max_length=50, blank=True, help_text=_(u'ex. São Paulo'))
cep = models.CharField(_(u'CEP'), max_length=8, blank=True, help_text=_(u'ex. 05403010'))
estado = models.CharField(_(u'Estado'), max_length=50, blank=True, help_text=_(u'ex. SP'))
pais = models.CharField(_(u'País'), max_length=50, blank=True, help_text=_(u'ex. Brasil'))
data_inatividade = models.DateField(_(u'Data de inatividade'), blank=True, null=True)
# Retorna os campos rua, num e compl (se existir).
def __unicode__(self):
return u'%s - %s' % (self.entidade.sigla, self.logradouro())
__unicode__.short_description = _(u'Logradouro')
def logradouro(self):
num = ', %s' % self.num if self.num else ''
compl = ', %s' % self.compl if self.compl else ''
return u'%s%s%s' % (self.rua, num, compl)
# Define a descricao do modelo, a ordenação dos dados pela cidade e a unicidade dos dados.
class Meta:
verbose_name = _(u'Endereço')
verbose_name_plural = _(u'Endereços')
ordering = ('entidade', )
unique_together = (('rua', 'num', 'compl', 'bairro', 'cidade', 'cep', 'estado', 'pais'),)
class ASN(models.Model):
"""
Tabela com os ASs da Internet
"""
numero = models.IntegerField(u'Número do AS')
entidade = models.ForeignKey('identificacao.Entidade', null=True, blank=True)
pais = models.CharField(u'País', null=True, blank=True, max_length=3)
def __unicode__(self):
if self.entidade:
return u'%s - %s' % (self.numero, self.entidade)
else:
return self.numero
class Meta:
verbose_name = u'ASN'
verbose_name_plural = u'ASNs'
ordering = ('numero', )
class Entidade(models.Model):
"""
Uma instância dessa classe representa uma entidade cadastrada no sistema.
O método '__unicode__' Retorna a sigla da entidade.
O método 'sigla_nome' Retorna a sigla e o nome da entidade.
O método 'save' Faz a validação do CNPJ e converte todos os caracteres da sigla para maiúsculo.
A 'class Meta' Define a ordenação dos dados pela sigla.
A unicidade dos dados é feita através do campo 'sigla'.
"""
TERREMARK_ID = 1
entidade = models.ForeignKey('identificacao.Entidade', verbose_name=_(u'Faz parte de'), null=True, blank=True,
related_name='entidade_em')
nome = models.CharField(_(u'Nome'), max_length=255,
help_text=_(u'Razão Social (ex. Telecomunicações de São Paulo S.A.)'))
url = models.URLField(_(u'URL'), blank=True, help_text=_(u'ex. www.telefonica.com.br'))
sigla = models.CharField(_(u'Sigla'), max_length=20, help_text=_(u'Nome Fantasia (ex. TELEFÔNICA)'), unique=True)
# asn = models.IntegerField(_(u'ASN'), blank=True, null=True, help_text=_(u' '))
cnpj = CNPJField(_(u'CNPJ'), blank=True, help_text=_(u'ex. 00.000.000/0000-00'))
fisco = models.BooleanField(_(u'Fisco'), help_text=_(u'ex. Ativo no site da Receita Federal?'), default=False)
recebe_doacao = models.BooleanField(_(u'Recebe doação de equipamentos?'), default=False)
# Retorna a sigla.
def __unicode__(self):
return self.sigla
def sigla_completa(self):
if self.entidade_id:
return u'%s - %s' % (self.entidade.sigla_completa(), self.sigla)
else:
return u'%s' % self.sigla
sigla_completa.short_description = _(u'Faz parte de')
# Retorna a sigla com 4 espaços iniciais para cada nível de entidade pai
def sigla_tabulada(self):
if self.entidade_id:
entidade_pai = self.entidade.sigla_tabulada()
# substitui qualquer string que não inicia com espaços por quatro espaços
retorno = re.sub('[^\s]+.+', ' ', entidade_pai)
return u'%s%s' % (retorno, self.sigla)
else:
return u'%s' % self.sigla
sigla_tabulada.short_description = _(u'Faz parte de')
# Retorna a sigla e o nome.
def sigla_nome(self):
return u'%s - %s' % (self.sigla, self.nome)
sigla_nome.short_description = _(u'Entidade')
# Grava o CNPJ no banco de dados com as devidas pontuações e converte a sigla em letras maiúsculas.
def save(self, force_insert=False, force_update=False, using=None):
if self.cnpj and len(self.cnpj) < 18:
a = list(self.cnpj)
p = [(2, '.'), (6, '.'), (10, '/'), (15, '-')]
for i in p:
if i[1] != a[i[0]]:
a.insert(i[0], i[1])
self.cnpj = ''.join(a)
self.sigla = self.sigla.upper()
super(Entidade, self).save(force_insert, force_update)
# Define a ordenação dos dados pela sigla.
class Meta:
ordering = ('sigla', )
class EntidadeHistorico(models.Model):
inicio = models.DateField()
termino = models.DateField(null=True, blank=True)
ativo = models.BooleanField(_(u'Ativo'), default=False)
obs = models.TextField(_(u'Observação'), blank=True, null=True)
entidade = models.ForeignKey('identificacao.Entidade')
tipo = models.ForeignKey('identificacao.TipoEntidade')
def __unicode__(self):
return u'%s %s %s' % (self.entidade.sigla, self.tipo.nome, self.inicio)
class Identificacao(models.Model):
"""
Uma instância dessa classe representa uma identificação de uma empresa, fornecedor ou contato.
O método 'formata_historico' Retorna o histórico no formato dd/mm/aa hh:mm
O método '__unicode__' Retorna sigla da entidade e o nome do contato.
A 'class Meta' Define a descrição do modelo (singular e plural), a ordenação dos dados pela entidade e a
unicidade dosdados pelos campos contato, entidade.
"""
# monitor = models.ForeignKey('rede.Monitor', verbose_name=_(u'Monitor'))
# entidade = models.ForeignKey('identificacao.Entidade', null=True, blank=True)
endereco = models.ForeignKey('identificacao.Endereco', limit_choices_to={'data_inatividade__isnull': True},
verbose_name=_(u'Entidade'))
contato = models.ForeignKey('identificacao.Contato', verbose_name=_(u'Contato'))
historico = models.DateTimeField(_(u'Histórico'), default=datetime.now, editable=False)
area = models.CharField(_(u'Área'), max_length=50, blank=True, help_text=_(u'ex. Administração'))
funcao = models.CharField(_(u'Função'), max_length=50, blank=True, help_text=_(u'ex. Gerente Administrativo'))
ativo = models.BooleanField(_(u'Ativo'), default=False)
# Define a descrição do modelo, a ordenação dos dados pela entidade e a unicidade dos dados.
class Meta:
verbose_name = _(u'Identificação')
verbose_name_plural = _(u'Identificações')
ordering = ('endereco', 'contato')
unique_together = (('endereco', 'contato'),)
# Retorna a sigla da entidade e o nome do contato.
def __unicode__(self):
if self.area:
return u'%s - %s - %s' % (self.endereco.entidade, self.area, self.contato.nome)
else:
return u'%s - %s' % (self.endereco.entidade, self.contato.nome)
# Retorna o histórico formatado.
def formata_historico(self):
historico = timezone.localtime(self.historico) if timezone.is_aware(self.historico) else historico
return historico.strftime('%d/%m/%y %H:%M')
formata_historico.short_description = _(u'Histórico')
class TipoArquivoEntidade(models.Model):
nome = models.CharField(max_length=100)
def __unicode__(self):
return self.nome
class Meta:
ordering = ('nome',)
class ArquivoEntidade(models.Model):
arquivo = models.FileField(upload_to='entidade')
entidade = models.ForeignKey('identificacao.Entidade')
data = models.DateField()
tipo = models.ForeignKey('identificacao.TipoArquivoEntidade')
def __unicode__(self):
return u'%s - %s' % (self.entidade.sigla, self.arquivo.name)
class Meta:
ordering = ('tipo', '-data')
class TipoEntidade(models.Model):
nome = models.CharField(max_length=100)
def __unicode__(self):
return self.nome
class Meta:
ordering = ('nome',)
class PapelEntidade(models.Model):
data = models.DateField()
tipo = models.ForeignKey('identificacao.TipoEntidade')
entidade = models.ForeignKey('identificacao.Entidade')
def __unicode__(self):
return u'%s - %s' % (self.entidade, self.tipo)
class Agenda(models.Model):
nome = models.CharField(max_length=40)
entidades = models.ManyToManyField('identificacao.Entidade', through='Agendado')
def __unicode__(self):
return u'%s' % self.nome
class Agendado(models.Model):
agenda = models.ForeignKey('identificacao.Agenda')
entidade = models.ForeignKey('identificacao.Entidade')
ativo = models.BooleanField(default=False)
def __unicode__(self):
return u'%s - %s' % (self.agenda.nome, self.entidade.sigla)
class Acesso(models.Model):
identificacao = models.ForeignKey('identificacao.Identificacao', verbose_name=u'Identificação')
niveis = models.ManyToManyField('identificacao.NivelAcesso', verbose_name=u'Níveis de acesso', null=True,
blank=True)
liberacao = models.DateTimeField(u'Liberação', null=True, blank=True)
encerramento = models.DateTimeField(null=True, blank=True)
obs = models.TextField(null=True, blank=True)
detalhe = models.ManyToManyField('identificacao.EnderecoDetalhe', null=True, blank=True)
def __unicode__(self):
return u'%s - %s' % (self.identificacao, self.lista_niveis())
def lista_niveis(self):
lista = ', '.join([n.nome for n in self.niveis.all()])
return lista
lista_niveis.short_description = 'Nivel de acesso'
class NivelAcesso(models.Model):
nome = models.CharField(max_length=50)
explicacao = models.TextField('Explicação')
def __unicode__(self):
return u'%s' % self.nome
class Meta:
verbose_name = u'Nível de acesso'
verbose_name_plural = u'Níveis de acesso'
class Ecossistema(models.Model):
identificacao = models.ForeignKey('identificacao.Identificacao', verbose_name=u'Entidade/contato')
incentivo = models.BooleanField(u'Incentivar a dar palestra?', default=False)
monitora = models.BooleanField(u'Monitorar o convite?', default=False)
data_envio = models.DateField(u'Data de envio do e-mail', null=True, blank=True)
data_resposta = models.DateField(u'Data de resposta ao e-mail', null=True, blank=True)
dar_palestra = models.BooleanField(u'Vai dar palestra?', default=False)
palestrante = models.CharField(max_length=50, null=True, blank=True)
tema = models.CharField(max_length=50, null=True, blank=True)
temas_adicionais = models.TextField(u'Temas adicionais sugeridos', null=True, blank=True)
data_envio_postal = models.DateField(u'Data de envio do material postal', null=True, blank=True)
inscricoes_solicitadas = models.IntegerField(u'Número de inscrições solicitadas', null=True, blank=True)
inscricoes_aceitas = models.IntegerField(u'Número de inscrições aceitas', null=True, blank=True)
comentarios = models.TextField(u'Comentários', null=True, blank=True)
hotel = models.BooleanField(u'Quer hotel?', default=False)
contato_pat = models.BooleanField(u'Contato para patrocínio?', default=False)
vip = models.BooleanField(default=False)
chaser = models.CharField(max_length=40, null=True, blank=True)
vai_pat = models.BooleanField(u'Vai patrocinar?', default=False)
def __unicode__(self):
return u'%s' % self.identificacao.endereco.entidade.sigla
class Meta:
ordering = ('identificacao__endereco__entidade__sigla',)
# Classe para definição de permissões de views e relatórios da app identificação
class Permission(models.Model):
class Meta:
# remover as permissões padrões, pois essa é uma classe para configurar permissões customizadas
default_permissions = ()
permissions = (
("rel_adm_agenda", "Rel. Adm. - Agenda"), # /identificacao/agenda
("rel_adm_ecossistema", "Rel. Adm. - Ecossistema"), # /identificacao/ecossistema/par
# movido de relatorio tecnico para administrativo
("rel_tec_arquivos", "Rel. Adm. - Documentos por entidade"), # /identificacao/relatorios/arquivos
)
|
ansp-2015/arquea
|
identificacao/models.py
|
Python
|
mpl-2.0
| 17,924
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SharedAccessAuthorizationRuleProperties(Model):
"""SharedAccessAuthorizationRule properties.
:param rights: The rights associated with the rule.
:type rights: list of str or :class:`AccessRights
<azure.mgmt.notificationhubs.models.AccessRights>`
"""
_attribute_map = {
'rights': {'key': 'rights', 'type': '[AccessRights]'},
}
def __init__(self, rights=None):
self.rights = rights
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/shared_access_authorization_rule_properties.py
|
Python
|
mit
| 954
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
# URL pattern for the UserListView
url(
regex=r'^$',
view=views.NotifyView.as_view(),
name='notify'
),
url(
regex=r'^me/$',
view=views.NotifyMeView.as_view(),
name='me'
),
]
|
TexasLAN/texaslan.org
|
texaslan/notify/urls.py
|
Python
|
mit
| 395
|
from unittest import TestCase
class TestAtributosDisponibles(TestCase):
@classmethod
def setUpClass(cls):
class Vacio:
pass
cls.fichero = Vacio()
cls.fichero.name = "/home/usuario/README.md"
def test_cambio_extension_fichero_nuevo(self):
from mzbackup.utils.pato import Pato
pato = Pato({}, {}, {'fichero': self.fichero})
pato.extension = "mkv"
self.assertEqual(str(pato), "/home/usuario/README.mkv")
def test_cambio_extension_fichero_existente(self):
from mzbackup.utils.pato import Pato
pato = Pato('cos', '13-12-11', {'fichero': None, 'base': '/opt/backup'})
pato.extension = "mkv"
self.assertEqual(str(pato), "/opt/backup/cos-13-12-11/cos.mkv")
|
VTacius/MZBackup
|
tests/utils/pato/test_atributos_disponibles.py
|
Python
|
lgpl-3.0
| 771
|
import _io
import collections
import unittest
from mock import patch, Mock, call, MagicMock, sentinel
import low_level
def mock_open(mock=None, read_data='', lines=None):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
if lines is not None:
handle.__iter__.return_value = iter(lines)
mock.return_value = handle
return mock
class TestLowLevel(unittest.TestCase):
@patch('urllib.request.urlopen')
def test_do_http_get(self, urlopen_mock):
# setup
url = 'http://www.google.ca'
http_response_string = 'response'
urlopen_mock.return_value.read.return_value.decode.return_value = http_response_string
# call method-under-test
ret = low_level.do_http_get(url)
# verify
urlopen_mock.assert_called_once_with(url)
urlopen_mock.return_value.read.return_value.decode.assert_called_once_with('utf8')
self.assertEqual(ret, http_response_string)
@patch('urllib.request.urlopen')
def test_do_http_get_with_params(self, urlopen_mock):
# setup
url = 'http://www.google.ca'
http_response_string = 'response'
params = collections.OrderedDict()
params['a'] = 1
params['b'] = 2
params['c'] = 3
urlopen_mock.return_value.read.return_value.decode.return_value = http_response_string
# call method-under-test
ret = low_level.do_http_get(url, params)
# verify
urlopen_mock.assert_called_once_with(url + "?a=1&b=2&c=3")
urlopen_mock.return_value.read.return_value.decode.assert_called_once_with('utf8')
self.assertEqual(ret, http_response_string)
@patch('csv.writer')
def test_write_csv_file(self, csv_writer_mock):
rows = [['a', 'b'], ['c', 'd']]
m = mock_open()
with patch('low_level.open', m, create=True):
low_level.write_csv_file('filename', rows)
m.assert_called_with('filename', 'w')
#handle = m()
#csv_writer_mock.assert_called_once_with(handle, delimeter=',')
expected_calls = [call(['a', 'b']), call(['c', 'd'])]
writer = csv_writer_mock.return_value
self.assertEqual(writer.writerow.call_args_list, expected_calls)
@patch('csv.writer')
def test_write_csv_file_with_header(self, csv_writer_mock):
# Setup
rows = [['a', 'b'], ['c', 'd']]
header = ['head', 'er']
m = mock_open()
handle = m()
with patch('low_level.open', m, create=True):
# Call method-under-test
low_level.write_csv_file('filename', rows, header=header)
# Verification
m.assert_called_with('filename', 'w')
#csv_writer_mock.assert_called_with(handle, delimeter=',')
expected_calls = [call(['head', 'er']), call(['a', 'b']), call(['c', 'd'])]
self.assertEqual(csv_writer_mock.return_value.writerow.call_args_list, expected_calls)
@patch('csv.reader')
def test_read_csv_file(self, csv_reader_mock):
# Setup
rows = [['a', 'b'], ['c', 'd']]
m = mock_open()
handle = m()
csv_reader_mock.return_value.__iter__.return_value = iter(rows)
with patch('low_level.open', m, create=True):
# Call method-under-test
ret = low_level.read_csv_file('filename')
# Verification
m.assert_called_with('filename')
csv_reader_mock.assert_called_once_with(handle, delimiter=',')
self.assertEqual(ret, rows)
@patch('hashlib.md5')
def test_md5sum_file(self, md5_mock):
# Setup
m = mock_open(lines=['a', 'b'])
md5_instance = md5_mock.return_value
md5_instance.digest.return_value = sentinel.digest
with patch('builtins.open', m, create=True):
# Call method-under-test
digest = low_level.md5sum_file('filename')
# Verification
m.assert_called_once_with('filename')
self.assertEqual(md5_instance.update.mock_calls,
[call('a'.encode('utf8')), call('b'.encode('utf8'))])
self.assertEqual(digest, sentinel.digest)
|
dgrant/brickowl2rebrickable
|
tests/test_low_level.py
|
Python
|
gpl-2.0
| 4,886
|
#!c:\users\домашний\dropbox\pycharmprojects\github\django_test\dj_env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
Daneev/Django_test
|
dj_env/Scripts/django-admin.py
|
Python
|
apache-2.0
| 203
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
itk.auto_progress(2)
dim = 2
SOType = itk.SpatialObject[dim]
InternalImageType = itk.Image[itk.F, dim]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, dim]
ellipse = itk.EllipseSpatialObject[dim].New(RadiusInObjectSpace=[10, 5])
ellipse.GetObjectToParentTransform().SetOffset([20, 20])
ellipse.Update()
box = itk.BoxSpatialObject[dim].New(SizeInObjectSpace=20)
box.GetObjectToParentTransform().SetOffset([20, 40])
box.Update()
gaussian = itk.GaussianSpatialObject[dim].New(RadiusInObjectSpace=100)
gaussian.GetObjectToParentTransform().SetOffset([60, 60])
gaussian.GetObjectToParentTransform().Scale(10)
gaussian.Update()
group = itk.GroupSpatialObject[dim].New()
group.AddChild(ellipse)
group.AddChild(box)
group.AddChild(gaussian)
filter_ = itk.SpatialObjectToImageFilter[SOType, InternalImageType].New(
group, Size=[100, 100], UseObjectValue=True
)
filter_.Update()
|
thewtex/ITK
|
Modules/Core/SpatialObjects/wrapping/test/SpatialObjectTest.py
|
Python
|
apache-2.0
| 1,668
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Shape-Out - meta data functionalities"""
from __future__ import division, unicode_literals
import hashlib
import pathlib
import warnings
import h5py
import imageio
import nptdms
from dclab.rtdc_dataset import config as rt_config
from dclab.rtdc_dataset import fmt_tdms
from .util import path_to_str
from . import settings
def collect_data_tree(directories):
"""Return projects (folders) and measurements therein
This is a convenience function for the GUI
"""
if not isinstance(directories, list):
directories = [directories]
directories = list(set(directories))
projectdict = {}
treelist = []
for directory in directories:
files = find_data(directory)
cols = ["Measurement"]
for ff in files:
if not verify_dataset(ff):
# Ignore broken measurements
continue
path = path_to_str(ff.parent)
project = get_sample_name(ff)
pid = path+project
# try to find the path in pathdict
if pid in projectdict:
dirindex = projectdict[pid]
else:
treelist.append([])
dirindex = len(treelist) - 1
projectdict[pid] = dirindex
# The first element of a tree contains the measurement name
treelist[dirindex].append((project, path))
# Get data from filename
mx = get_run_index(ff)
chip_region = get_chip_region(ff)
dn = u"M{} {}".format(mx, chip_region)
if not chip_region.lower() in ["reservoir"]:
# outlet (flow rate is not important)
dn += u" {:.5f} µls⁻¹".format(get_flow_rate(ff))
dn += " ({} events)".format(get_event_count(ff))
treelist[dirindex].append((dn, path_to_str(ff)))
return treelist, cols
def find_data(path):
"""Find tdms and rtdc data files in a directory"""
path = pathlib.Path(path)
def sort_path(path):
"""Sorting key for intuitive file sorting
This sorts a list of RT-DC files according to measurement number,
e.g. (M2_*.tdms is not sorted after M11_*.tdms):
/path/to/M1_*.tdms
/path/to/M2_*.tdms
/path/to/M10_*.tdms
/path/to/M11_*.tdms
Note that the measurement number of .rtdc files is extracted from
the hdf5 metadata and not from the file name.
"""
try:
# try to get measurement number as an integer
idx = get_run_index(path)
except BaseException:
# just use the given path
name = path.name
else:
# assign new "name" for sorting
name = "{:09d}_{}".format(idx, path.name)
return path.with_name(name)
tdmsfiles = fmt_tdms.get_tdms_files(path)
tdmsfiles = sorted(tdmsfiles, key=sort_path)
rtdcfiles = [r for r in path.rglob("*.rtdc") if r.is_file()]
rtdcfiles = sorted(rtdcfiles, key=sort_path)
files = [pathlib.Path(ff) for ff in rtdcfiles + tdmsfiles]
return files
def get_event_count(fname):
"""Get the number of events in a data set
Parameters
----------
fname: str
Path to an experimental data file. The file format is
determined from the file extension (tdms or rtdc).
Returns
-------
event_count: int
The number of events in the data set
Notes
-----
For tdms-based data sets, there are multiple ways of determining
the number of events, which are used in the following order
(according to which is faster):
1. The MX_log.ini file "Events" tag
2. The number of frames in the avi file
3. The tdms file (very slow, because it loads the entire tdms file)
The values obtained with this method are cached on disk to
speed up future calls with the same argument.
See Also
--------
get_event_count_cache: cached event counts from tdms/avi files
"""
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
if ext == ".rtdc":
with h5py.File(path_to_str(fname), mode="r") as h5:
event_count = h5.attrs["experiment:event count"]
elif ext == ".tdms":
mdir = fname.parent
mid = fname.name.split("_")[0]
# possible data sources
logf = mdir / (mid + "_log.ini")
avif = mdir / (mid + "_imaq.avi")
if logf.exists():
# 1. The MX_log.ini file "Events" tag
with logf.open(encoding='utf-8') as fd:
logd = fd.readlines()
for l in logd:
if l.strip().startswith("Events:"):
event_count = int(l.split(":")[1])
break
elif avif.exists():
# 2. The number of frames in the avi file
event_count = get_event_count_cache(avif)
else:
# 3. Open the tdms file
event_count = get_event_count_cache(fname)
else:
raise ValueError("`fname` must be an .rtdc or .tdms file!")
return event_count
def get_event_count_cache(fname):
"""Get the number of events from a tdms or avi file
Parameters
----------
fname: str
Path to an experimental data file (tdms or avi)
Returns
-------
event_count: int
The number of events in the data set
Notes
-----
The values for a file name are cached on disk using
the file name and the first 100kB of the file as a
key.
"""
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
# Generate key
with fname.open(mode="rb") as fd:
data = fd.read(100 * 1024)
strfname = str(fname).encode("zip")
fhash = hashlib.md5(data + strfname).hexdigest()
cfgec = settings.SettingsFileCache(name="shapeout_tdms_event_counts.txt")
try:
event_count = cfgec.get_int(fhash)
except KeyError:
if ext == ".avi":
with imageio.get_reader(fname) as video:
event_count = len(video)
elif ext == ".tdms":
tdmsfd = nptdms.TdmsFile(path_to_str(fname))
event_count = len(tdmsfd["Cell Track"]["time"].data)
else:
raise ValueError("unsupported file extension: {}".format(ext))
cfgec.set_int(fhash, event_count)
return event_count
def get_flow_rate(fname):
"""Get the flow rate of a data set
Parameters
----------
fname: str
Path to an experimental data file. The file format is
determined from the file extenssion (tdms or rtdc).
Returns
-------
flow_rate: float
The flow rate [µL/s] of the data set
"""
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
if ext == ".rtdc":
with h5py.File(path_to_str(fname), mode="r") as h5:
flow_rate = h5.attrs["setup:flow rate"]
elif ext == ".tdms":
name = fname.name
path = fname.parent
mx = name.split("_")[0]
para = path / (mx + "_para.ini")
if para.exists():
camcfg = rt_config.load_from_file(path_to_str(para))
flow_rate = camcfg["general"]["flow rate [ul/s]"]
else:
# analyze the filename
warnings.warn("{}: trying to manually find flow rate.".
format(fname))
flow_rate = float(fname.split("ul_s")[0].split("_")[-1])
else:
raise ValueError("`fname` must be an .rtdc or .tdms file!")
return flow_rate
def get_chip_region(fname):
"""Get the chip region of a data set
Parameters
----------
fname: str
Path to an experimental data file. The file format is
determined from the file extenssion (tdms or rtdc).
Returns
-------
chip_region: str
The chip region ("channel" or "reservoir")
"""
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
if ext == ".rtdc":
with h5py.File(path_to_str(fname), mode="r") as h5:
chip_region = h5.attrs["setup:chip region"]
elif ext == ".tdms":
name = fname.name
path = fname.parent
mx = name.split("_")[0]
para = path / (mx + "_para.ini")
if para.exists():
camcfg = rt_config.load_from_file(path_to_str(para))
chip_region = camcfg["General"]["Region"]
return chip_region.lower()
def get_run_index(fname):
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
if ext == ".rtdc":
with h5py.File(path_to_str(fname), mode="r") as h5:
run_index = h5.attrs["experiment:run index"]
elif ext == ".tdms":
name = fname.name
run_index = int(name.split("_")[0].strip("Mm "))
return run_index
def get_sample_name(fname):
fname = pathlib.Path(fname).resolve()
ext = fname.suffix
if ext == ".rtdc":
with h5py.File(path_to_str(fname), mode="r") as h5:
sample = h5.attrs["experiment:sample"]
elif ext == ".tdms":
sample = fmt_tdms.get_project_name_from_path(fname)
return sample
def verify_dataset(path, verbose=False):
"""Returns `True` if the data set is complete/usable"""
path = pathlib.Path(path).resolve()
if path.suffix == ".tdms":
is_ok = True
parent = path.parent
name = path.name
mx = name.split("_")[0]
# Check if all config files are present
if ((not (parent / (mx + "_para.ini")).exists()) or
(not (parent / (mx + "_camera.ini")).exists()) or
(not path.exists())
):
if verbose:
print("config files missing")
is_ok = False
# Check if we can perform all standard file operations
for test in [get_chip_region, get_flow_rate, get_event_count]:
try:
test(path)
except:
if verbose:
print("standard file operations failed")
is_ok = False
break
elif path.suffix == ".rtdc":
try:
with h5py.File(path_to_str(path), mode="r") as h5:
for key in ["experiment:event count",
"experiment:sample",
"experiment:run index",
"imaging:pixel size",
"setup:channel width",
"setup:chip region",
"setup:flow rate",
]:
if key not in h5.attrs:
if verbose:
print("fmt_rtdc keys missing")
is_ok = False
break
else:
is_ok = True
except IOError:
if verbose:
print("data file broken")
is_ok = False
else:
if verbose:
print("unsupported format")
is_ok = False
return is_ok
|
ZellMechanik-Dresden/ShapeOut
|
shapeout/meta_tool.py
|
Python
|
gpl-2.0
| 11,078
|
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
from os_win import utilsfactory
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LI, _LE
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialconsolehandler
LOG = logging.getLogger(__name__)
_console_handlers = {}
def instance_synchronized(func):
@functools.wraps(func)
def wrapper(self, instance_name, *args, **kwargs):
@utils.synchronized(instance_name)
def inner():
return func(self, instance_name, *args, **kwargs)
return inner()
return wrapper
class SerialConsoleOps(object):
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._pathutils = pathutils.PathUtils()
@instance_synchronized
def start_console_handler(self, instance_name):
# Cleanup existing workers.
self.stop_console_handler_unsync(instance_name)
handler = None
try:
handler = serialconsolehandler.SerialConsoleHandler(
instance_name)
handler.start()
_console_handlers[instance_name] = handler
except Exception as exc:
LOG.error(_LE('Instance %(instance_name)s serial console handler '
'could not start. Exception %(exc)s'),
{'instance_name': instance_name,
'exc': exc})
if handler:
handler.stop()
@instance_synchronized
def stop_console_handler(self, instance_name):
self.stop_console_handler_unsync(instance_name)
def stop_console_handler_unsync(self, instance_name):
handler = _console_handlers.get(instance_name)
if handler:
LOG.info(_LI("Stopping instance %(instance_name)s "
"serial console handler."),
{'instance_name': instance_name})
handler.stop()
del _console_handlers[instance_name]
@instance_synchronized
def get_serial_console(self, instance_name):
handler = _console_handlers.get(instance_name)
if not handler:
raise exception.ConsoleTypeUnavailable(console_type='serial')
return handler.get_serial_console()
@instance_synchronized
def get_console_output(self, instance_name):
console_log_paths = self._pathutils.get_vm_console_log_paths(
instance_name)
try:
log = b''
# Start with the oldest console log file.
for log_path in console_log_paths[::-1]:
if os.path.exists(log_path):
with open(log_path, 'rb') as fp:
log += fp.read()
return log
except IOError as err:
raise exception.ConsoleLogOutputException(
instance_id=instance_name, reason=six.text_type(err))
def start_console_handlers(self):
active_instances = self._vmutils.get_active_instances()
for instance_name in active_instances:
instance_path = self._pathutils.get_instance_dir(instance_name)
# Skip instances that are not created by Nova
if not os.path.exists(instance_path):
continue
self.start_console_handler(instance_name)
|
bigswitch/nova
|
nova/virt/hyperv/serialconsoleops.py
|
Python
|
apache-2.0
| 3,970
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from openstack_dashboard.api.rest import utils
class RestUtilsTestCase(unittest2.TestCase):
def assertStatusCode(self, response, expected_code):
if response.status_code == expected_code:
return
self.fail('status code %r != %r: %s' % (response.status_code,
expected_code,
response.content))
def _construct_request(self, **args):
mock_args = {
'user.is_authenticated.return_value': True,
'is_ajax.return_value': True,
'policy.check.return_value': True,
'body': ''
}
mock_args.update(args)
return mock.Mock(**mock_args)
def test_api_success(self):
@utils.ajax()
def f(self, request):
return 'ok'
request = self._construct_request()
response = f(None, request)
request.is_authenticated.assert_called_once()
self.assertStatusCode(response, 200)
self.assertEqual(response.content, '"ok"')
def test_api_success_no_auth_ok(self):
@utils.ajax(authenticated=False)
def f(self, request):
return 'ok'
request = self._construct_request()
response = f(None, request)
request.is_authenticated.assert_not_called_once()
self.assertStatusCode(response, 200)
self.assertEqual(response.content, '"ok"')
def test_api_auth_required(self):
@utils.ajax()
def f(self, request):
return 'ok'
request = self._construct_request(**{
'user.is_authenticated.return_value': False
})
response = f(None, request)
request.is_authenticated.assert_called_once()
self.assertStatusCode(response, 401)
self.assertEqual(response.content, '"not logged in"')
def test_api_success_204(self):
@utils.ajax()
def f(self, request):
pass
request = self._construct_request()
response = f(None, request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, '')
def test_api_error(self):
@utils.ajax()
def f(self, request):
raise utils.AjaxError(500, 'b0rk')
request = self._construct_request()
response = f(None, request)
self.assertStatusCode(response, 500)
self.assertEqual(response.content, '"b0rk"')
def test_api_malformed_json(self):
@utils.ajax()
def f(self, request):
assert False, "don't get here"
request = self._construct_request(**{'body': 'spam'})
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content, '"malformed JSON request: No JSON '
'object could be decoded"')
def test_api_not_found(self):
@utils.ajax()
def f(self, request):
raise utils.AjaxError(404, 'b0rk')
request = self._construct_request()
response = f(None, request)
self.assertStatusCode(response, 404)
self.assertEqual(response.content, '"b0rk"')
def test_post_with_no_data(self):
@utils.ajax(method='POST')
def f(self, request):
assert False, "don't get here"
request = self._construct_request()
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content, '"POST requires JSON body"')
def test_post_with_no_post_action(self):
self._test_bad_post('data')
def test_post_with_no_post_data(self):
self._test_bad_post('action')
def _test_bad_post(self, arg):
@utils.ajax(method='POST')
def f(self, request):
assert False, "don't get here"
request = self._construct_request(**{'body': '{"%s": true}' % arg})
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content, '"POST JSON missing action/data"')
def test_valid_post(self):
@utils.ajax(method='POST')
def f(self, request):
return 'OK'
request = self._construct_request(**{'body': '''
{"action": true, "data": true}
'''})
response = f(None, request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content, '"OK"')
def test_put_with_no_data(self):
@utils.ajax(method='PUT')
def f(self, request):
assert False, "don't get here"
request = self._construct_request()
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content, '"PUT requires JSON body"')
def test_valid_put(self):
@utils.ajax(method='PUT')
def f(self, request):
return 'OK'
request = self._construct_request(**{'body': '''
{"current": true, "update": true}
'''})
response = f(None, request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content, '"OK"')
def test_api_created_response(self):
@utils.ajax()
def f(self, request):
return utils.CreatedResponse('/api/spam/spam123')
request = self._construct_request()
response = f(None, request)
request.is_authenticated.assert_called_once()
self.assertStatusCode(response, 201)
self.assertEqual(response['location'], '/api/spam/spam123')
self.assertEqual(response.content, '')
def test_api_created_response_content(self):
@utils.ajax()
def f(self, request):
return utils.CreatedResponse('/api/spam/spam123', 'spam!')
request = self._construct_request()
response = f(None, request)
request.is_authenticated.assert_called_once()
self.assertStatusCode(response, 201)
self.assertEqual(response['location'], '/api/spam/spam123')
self.assertEqual(response.content, '"spam!"')
|
AlexOugh/horizon
|
openstack_dashboard/test/api_tests/rest_util_tests.py
|
Python
|
apache-2.0
| 6,677
|
# -*- coding: utf-8 -*-
# Copyright 2014 Leonidas Poulopoulos
# Copyright 2014 Costas Drogos
# Copyright 2014 Stavros Kroustouris
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bs4 import BeautifulSoup
import urllib
from datetime import timedelta
from django.utils import timezone
from django.core.management.base import NoArgsCommand
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.models import User
from muparse.models import *
from utils import get_v2_nodes, get_v1_nodes
class Command(NoArgsCommand):
def parseUrlSoup_v1(self, baseUrl, urlPage):
serverListPage = urllib.urlopen("%s/%s" % (baseUrl, urlPage))
htmlText = serverListPage.read()
serverListPage.close()
return BeautifulSoup(htmlText)
def parseUrlSoup_v2(self, baseUrl, urlPage):
if urlPage:
serverListPage = urllib.urlopen('%s/%s' % (baseUrl, urlPage))
else:
serverListPage = urllib.urlopen('%s' % (baseUrl))
htmlText = serverListPage.read()
serverListPage.close()
return BeautifulSoup(htmlText)
def delete_garbage(self):
self.stdout.write('Deleting junk from %s day(s) ago.\n' % settings.DATA_EXPIRES)
date_N_days_ago = timezone.now() - timedelta(days=int(settings.DATA_EXPIRES))
NodeGroup.objects.filter(updated__lt=date_N_days_ago).delete()
GraphCategory.objects.filter(updated__lt=date_N_days_ago).delete()
Node.objects.filter(updated__lt=date_N_days_ago).delete()
Graph.objects.filter(updated__lt=date_N_days_ago).delete()
NodeGraphs.objects.filter(updated__lt=date_N_days_ago).delete()
# delete cache
for u in User.objects.all():
cache.delete('user_%s_tree' % (u.pk))
cache.delete('user_%s_tree_cat' % (u.pk))
self.stdout.write('Done...\n')
def parse_v1(self):
mnodes = get_v1_nodes()
for mnode in mnodes:
mnode_dict = mnode[1]
baseUrl = mnode_dict['url']
cgiPath = mnode_dict['cgi_path']
soup = self.parseUrlSoup(_v1baseUrl, "index.html")
homePage = soup.find_all('span', attrs={'class': 'domain'})
for nodeGroup in homePage:
ng_url = "%s/%s" % (baseUrl, nodeGroup.a.get('href'))
ng, created = NodeGroup.objects.get_or_create(
name="%s@%s" % (
nodeGroup.a.text,
mnode_dict['name']
),
url=ng_url
)
self.stdout.write('Added nodeGroup: %s\n' % ng.name.encode('utf8'))
# print i.a.text, i.a.get('href')
for node in nodeGroup.findParent('li').findChildren(
'span',
attrs={'class': 'host'}
):
#print node.a.text, node.a.get('href')
n_url = "%s/%s" % (baseUrl, node.a.get('href'))
n, created = Node.objects.get_or_create(
name=node.a.text,
url=n_url,
group=ng
)
self.stdout.write('-Added node: %s\n' % n.name.encode('utf8'))
nodeSoup = self.parseUrlSoup_v1(baseUrl, node.a.get('href'))
metricsTable = nodeSoup.find_all(
'td',
{'class': 'graphbox'}
)
for metricGroup in metricsTable:
metricCategory = metricGroup.get('id')
graphCategories = metricGroup.find_all(
'div',
{'class': 'lighttext'}
)
gc, created = GraphCategory.objects.get_or_create(
name=metricCategory
)
self.stdout.write(
'-Added Category: %s\n' % gc.name.encode('utf8')
)
for graphCategory in graphCategories:
pageUrl = "%s/%s/%s/%s" % (
baseUrl,
nodeGroup.a.text,
n.name,
graphCategory.a.get('href')
)
self.stdout.write(
'-Page URL: %s\n' % pageUrl.encode('utf8')
)
t = graphCategory.findParent('tr').find_next_sibling('tr')
g, created = Graph.objects.get_or_create(
name=graphCategory.a.text,
slug=t.img.get('src').split('/')[-1:][0].split('-')[0],
category=gc
)
self.stdout.write(
'--Added Graph: %s\n' % g.name.encode('utf8')
)
imageUrl = "%s/%s%s/%s/%s" % (
baseUrl,
cgiPath,
nodeGroup.a.text,
n.name,
g.slug
)
nodegraph, created = NodeGraphs.objects.get_or_create(
node=n,
graph=g,
baseurl=imageUrl,
pageurl=pageUrl
)
self.stdout.write('--Added NodeGraph: %s\n' % nodegraph)
def parse_v2(self):
MNODES = get_v2_nodes()
for mnode in MNODES:
mnode_dict = mnode[1]
baseUrl = mnode_dict['url']
soup = self.parseUrlSoup_v2(baseUrl, "index.html")
homePage = soup.find_all('span', attrs={'class': 'domain'})
for nodeGroup in homePage:
ng_url = '%s/%s' % (baseUrl, nodeGroup.a.get('href'))
ng, created = NodeGroup.objects.get_or_create(name='%s@%s' % (nodeGroup.a.text, mnode_dict['name']), url=ng_url)
self.stdout.write('NodeCategory: %s\n' % ng.name)
nodegroupSoup = self.parseUrlSoup_v2(ng_url, "")
nodes = nodegroupSoup.find('div', attrs={'id': 'content'})\
.find('ul', recursive=False).find_all('li', recursive=False)
for node in nodes:
node_name = node.find('span', attrs={'class': 'domain'}, recursive=False)
n_url = '%s/%s/%s' % (baseUrl, nodeGroup.a.text, node_name.a.get('href'))
n, created = Node.objects.get_or_create(name=node.a.text, url=n_url, group=ng)
self.stdout.write('-Node: %s\n' % (n.name))
services_categories = node.find_all('ul', recursive=False)
# get inside node
for service_category in services_categories:
category_group = service_category.find_all('li')
# a node has graph categories (class=host) that have services (class=service)
last_cat = None
for category in category_group:
cat = category.find('span', attrs={'class': 'host'})
if cat:
gc, created = GraphCategory.objects.get_or_create(name=cat.text)
self.stdout.write('--GraphCategory: %s\n' % gc.name)
last_cat = gc
else:
slug = category.a.get('href').replace(node_name.text, '').replace('.html', '')[1:].replace('/index', '')
g, created = Graph.objects.get_or_create(name=category.a.text.partition('for')[0], slug=slug.partition('/')[0], category=last_cat)
self.stdout.write('---Graph: %s\n' % (g.name))
services = category.findChildren('span', attrs={'class': 'service'})
for service in services:
if 'for' not in service.a.text:
graph_base_url = baseUrl + '/munin-cgi/munin-cgi-graph/' + nodeGroup.a.text + '/' + service.a.get('href').replace('.html', '').replace('/index', '')
pageurl = baseUrl + '/' + nodeGroup.a.text + '/' + service.a.get('href')
nodegraph, created = NodeGraphs.objects.get_or_create(node=n, graph=g, baseurl=graph_base_url, pageurl=pageurl)
# this is the link for the four final graphs (day, week, month, year)
self.stdout.write('---Service: %s, %s\n' % (service.a.text, pageurl))
def handle_noargs(self, **options):
self.delete_garbage()
nodes = {}
self.parse_v1()
self.parse_v2()
|
grnet/mupy
|
muparse/management/commands/parse_munin.py
|
Python
|
gpl-3.0
| 9,781
|
import pytest
from swimlane import exceptions
@pytest.fixture(autouse=True, scope='module')
def my_fixture(helpers):
# setup stuff
defaultApp = 'selection fields'
pytest.swimlane_instance = helpers.swimlane_instance
pytest.py_ver = helpers.py_ver
pytest.app, pytest.appid = helpers.findCreateApp(defaultApp)
pytest.single_select_falues = ['four', 'three', 'two', 'one']
yield
# teardown stuff
helpers.cleanupData()
class TestRequiredSelectionField:
def test_required_field(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
assert theRecord["Required Single-select"] == "a"
def test_required_field_not_set(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(**{"Single-select": "two"})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Required field "Required Single-select" is not set' % pytest.app.acronym
def test_required_field_not_set_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Required Single-select"] = None
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord.save()
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Required field "Required Single-select" is not set' % theRecord.tracking_id
class TestSingleSelectField:
def test_single_select_field(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": "two"})
assert theRecord["Single-select"] == "two"
def test_single_select_field_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Single-select"] = "two"
theRecord.save()
def test_single_select_field_multiple(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": ["two", "three"]})
assert str(excinfo.value) == "Validation failed for <Record: {} - New>. Reason: Field 'Single-select' expects one of '{}', got 'list' instead".format(
pytest.app.acronym, ("str", "basestring")[pytest.py_ver() == 2])
def test_single_select_field_multiple_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Single-select"] = ["two", "three"]
assert str(excinfo.value) == "Validation failed for <Record: {}>. Reason: Field 'Single-select' expects one of '{}', got 'list' instead".format(
theRecord.tracking_id, ("str", "basestring")[pytest.py_ver() == 2])
def test_single_select_field_empty(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": ""})
assert str(excinfo.value) == 'Validation failed for <Record: {} - New>. Reason: Field "Single-select" invalid value "". Valid options: {}'.format(
pytest.app.acronym, ', '.join(pytest.single_select_falues[::(-1, 1)[pytest.py_ver() == 2]]))
def test_single_select_field_empty_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Single-select"] = ""
assert str(excinfo.value) == 'Validation failed for <Record: {}>. Reason: Field "Single-select" invalid value "". Valid options: {}'.format(
theRecord.tracking_id, ', '.join(pytest.single_select_falues[::(-1, 1)[pytest.py_ver() == 2]]))
def test_single_select_field_none(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": None})
assert theRecord["Single-select"] == None
def test_single_select_field_none_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Single-select"] = None
theRecord.save()
def test_single_select_field_case_sensitive(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": "ONE"})
assert str(excinfo.value) == 'Validation failed for <Record: {} - New>. Reason: Field "Single-select" invalid value "ONE". Valid options: {}'.format(
pytest.app.acronym, ', '.join(pytest.single_select_falues[::(-1, 1)[pytest.py_ver() == 2]]))
def test_single_select_field_case_sensitive_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Single-select"] = "ONE"
assert str(excinfo.value) == 'Validation failed for <Record: {}>. Reason: Field "Single-select" invalid value "ONE". Valid options: {}'.format(
theRecord.tracking_id, ', '.join(pytest.single_select_falues[::(-1, 1)[pytest.py_ver() == 2]]))
def test_single_select_field_int(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required Single-select": "a", "Single-select": 1})
assert str(excinfo.value) == "Validation failed for <Record: {} - New>. Reason: Field 'Single-select' expects one of '{}', got 'int' instead".format(
pytest.app.acronym, ("str", "basestring")[pytest.py_ver() == 2])
def test_single_select_field_int_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Single-select"] = 123
assert str(excinfo.value) == "Validation failed for <Record: {}>. Reason: Field 'Single-select' expects one of '{}', got 'int' instead".format(
theRecord.tracking_id, ("str", "basestring")[pytest.py_ver() == 2])
class TestMultiSelectField:
def test_multi_select_field(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "fourth"]})
assert list(theRecord["Multi-select"]) == ["first", "fourth"]
def test_multi_select_field_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Multi-select"] = ["first", "fourth"]
theRecord.save()
@pytest.mark.xfail(reason="SPT-6356: Should we turn a string into a list??")
def test_multi_select_field_single_value(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": "first"})
assert list(theRecord["Multi-select"]) == ["first"]
@pytest.mark.xfail(reason="SPT-6356: Should we turn a string into a list??")
def test_multi_select_field_single_value_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Multi-select"] = "first"
theRecord.save()
def test_multi_select_field_deselect_value_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "fourth"]})
theRecord["Multi-select"].deselect("fourth")
theRecord.save()
assert list(theRecord["Multi-select"]) == ["first"]
def test_multi_select_field_deselect_unused_value_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "fourth"]})
with pytest.raises(KeyError) as excinfo:
theRecord["Multi-select"].deselect("third")
assert str(excinfo.value) == "'third'"
def test_multi_select_field_select_unused_value_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "second"]})
theRecord["Multi-select"].select("fourth")
theRecord.save()
assert list(theRecord["Multi-select"]) == ["first", "fourth", "second"]
def test_multi_select_field_select_used_value_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "second"]})
theRecord["Multi-select"].select("second")
theRecord.save()
assert list(theRecord["Multi-select"]) == ["first", "second"]
def test_multi_select_field_select_value_int_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "second"]})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select"].select(2)
assert str(excinfo.value) == "Validation failed for <Record: {}>. Reason: Field 'Multi-select' expects one of '{}', got 'int' instead".format(
theRecord.tracking_id, ("str", "basestring")[pytest.py_ver() == 2])
def test_multi_select_field_deselect_value_int_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Multi-select": ["first", "second"]})
with pytest.raises(KeyError) as excinfo:
theRecord["Multi-select"].deselect(2)
assert str(excinfo.value) == "2"
class TestReadOnlySelectField:
def test_read_only_field(helpers):
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required Single-select": "a", "Read-only Single-select": "aa"})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Cannot set readonly field \'Read-only Single-select\'' % pytest.app.acronym
def test_read_only_field_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Read-only Single-select"] = "aa"
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Cannot set readonly field \'Read-only Single-select\'' % theRecord.tracking_id
class TestDefaultSelectField:
def test_default_value_select_field(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
assert theRecord["Default Value Single-select"] == "y"
def test_default_value_select_field_set_on_create(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Default Value Single-select": "z"})
assert theRecord["Default Value Single-select"] == "z"
def test_default_value_select_field_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Default Value Single-select"] = "x"
theRecord.save()
assert theRecord["Default Value Single-select"] == "x"
class TestDefaultMultiSelectField:
def test_default_value_multi_select_field(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
assert list(theRecord["Default Value Multi-select"]
) == ["Adam", "Charlie"]
def test_default_value_multi_select_field_set_on_create(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a", "Default Value Multi-select": ["Brad"]})
assert list(theRecord["Default Value Multi-select"]) == ["Brad"]
def test_default_value_multi_select_field_on_save(helpers):
theRecord = pytest.app.records.create(
**{"Required Single-select": "a"})
theRecord["Default Value Multi-select"] = ["Davis"]
theRecord.save()
assert list(theRecord["Default Value Multi-select"]) == ["Davis"]
|
Swimlane/sw-python-client
|
functional_tests/driver_tests/test_selection_fields.py
|
Python
|
mit
| 12,296
|
# Copyright (C) 2008-2010, 2013-2014 Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Debugger packaging information"""
# To the extent possible we make this file look more like a
# configuration file rather than code like setup.py. I find putting
# configuration stuff in the middle of a function call in setup.py,
# which for example requires commas in between parameters, is a little
# less elegant than having it here with reduced code, albeit there
# still is some room for improvement.
# Things that change more often go here.
copyright = '''Copyright (C) 2008-2010, 2013 Rocky Bernstein <rocky@gnu.org>.'''
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
# The rest in alphabetic order
author = "Rocky Bernstein"
author_email = "rocky@gnu.org"
ftp_url = None
install_requires = ['columnize >= 0.3.4',
'import_relative >= 0.2.3',
'pyficache >= 0.2.3',
'pygments',
'tracer >= 0.3.2']
license = 'GPL'
mailing_list = 'python-debugger@googlegroups.com'
modname = 'trepan'
namespace_packages = [
'trepan',
'trepan.bwprocessor',
'trepan.interfaces',
'trepan.inout',
'trepan.lib',
'trepan.processor',
'trepan.processor.command',
# 'trepan.processor.command.ipython_magic',
'trepan.processor.command.info_subcmd',
'trepan.processor.command.set_subcmd',
'trepan.processor.command.show_subcmd'
]
packages = namespace_packages
py_modules = None
short_desc = 'Modular Python Debugger'
import os
import os.path, sys
def get_srcdir():
"""Get directory of caller as an absolute file name. *level* is
the number of frames to look back. So for import file which is
really doing work on behalf of *its* caller, we go back 2.
NB: f_code.co_filenames and thus this code kind of broken for
zip'ed eggs circa Jan 2009
"""
caller = sys._getframe(1)
filename = caller.f_code.co_filename
filename = os.path.normcase(os.path.dirname(os.path.abspath(filename)))
return os.path.realpath(filename)
# VERSION.py sets variable VERSION.
execfile(os.path.join(get_srcdir(), 'trepan', 'VERSION.py'))
version = VERSION
web = 'http://code.google.com/p/pydbgr/'
# tracebacks in zip files are funky and not debuggable
zip_safe = False
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = ( read("README.txt") + '\n\n' + read("NEWS") )
|
kamawanu/pydbgr
|
__pkginfo__.py
|
Python
|
gpl-3.0
| 3,628
|
from sandbox.dalz.data import ArticleCommentCountFileData, ArticlePublicationDateFileData, ArticleAuthorFileData, \
ArticleWordCountFileData, CommentAuthorCommentCountFilesDatas, AuthorArticleCountFilesData, \
AuthorArticlesCommentsCountAverageFilesData, AuthorArticlesWordsCountAverageFilesData, \
ArticlePublicationHourFileData, ArticlePatriceCommentCountFileData
from tde.Implode import Implode
class ArticleImplode(Implode):
_name = 'Articles'
_data_classes = [ArticleWordCountFileData,
ArticleCommentCountFileData,
ArticlePublicationDateFileData,
ArticlePublicationHourFileData,
ArticleAuthorFileData,
ArticlePatriceCommentCountFileData]
class AuthorImplode(Implode):
_name = 'Authors'
_data_classes = [AuthorArticleCountFilesData,
AuthorArticlesCommentsCountAverageFilesData,
AuthorArticlesWordsCountAverageFilesData]
|
buxx/TextDataExtractor
|
sandbox/dalz/implode.py
|
Python
|
gpl-2.0
| 1,003
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para longurl (acortador de url)
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
import urllib
DEBUG = config.get_setting("debug")
def get_server_list():
servers =[]
data = scrapertools.downloadpage("http://longurl.org/services")
data = scrapertools.unescape(data)
data = scrapertools.get_match(data,'<ol>(.*?)</ol>')
patron='<li>(.*?)</li>'
matches = re.compile(patron,re.DOTALL).findall(data)
#añadiendo algunos manualmente que no salen en la web
servers.append("sh.st")
for server in matches:
servers.append(server)
return servers
def get_long_urls(data):
logger.info("[longurl.py] get_long_urls ")
patron = '<a href="http://([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for short_url in matches:
if short_url.startswith(tuple(get_server_list())):
short_url="http://"+short_url
logger.info(short_url)
longurl_data = scrapertools.downloadpage("http://longurl.org/expand?url="+urllib.quote_plus(short_url))
if DEBUG: logger.info(longurl_data)
longurl_data = scrapertools.get_match(longurl_data,'<dt>Long URL:</dt>(.*?)</dd>')
long_url = scrapertools.get_match(longurl_data,'<a href="(.*?)">')
if (long_url<> ""):data=data.replace(short_url,long_url)
return data
def test():
location = get_long_urls("http://sh.st/saBL8")
ok = ("meuvideos.com" in location)
print "Funciona:",ok
return ok
|
prds21/repository-barrialTV
|
servers/longurl.py
|
Python
|
gpl-2.0
| 1,796
|
"""Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')
|
diego-d5000/MisValesMd
|
env/lib/python2.7/site-packages/django/core/mail/backends/base.py
|
Python
|
mit
| 1,627
|
import utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.date(2015, 5, 11)
),
]
def test_date_method():
for utc_dt, expected_date in TEST_CASES:
yield _assert_date_equals, utc_dt, expected_date
def _assert_date_equals(utc_dt, expected_date):
got = utc_dt.date()
assert_equal(expected_date, got)
|
paulfurley/python-utcdatetime
|
utcdatetime/tests/test_date.py
|
Python
|
mit
| 442
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rcpy documentation build configuration file, created by
# sphinx-quickstart on Sat May 20 09:36:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rcpy'
copyright = '2017, Mauricio C. de Oliveira'
author = 'Mauricio C. de Oliveira'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'rcpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rcpy.tex', 'Python 3 Interface for the Robotics Cape on the Beaglebone Black and the Beaglebone Blue',
'Mauricio C. de Oliveira', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rcpy', 'rcpy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rcpy', 'rcpy Documentation',
author, 'rcpy', 'One line description of project.',
'Miscellaneous'),
]
|
mcdeoliveira/rcpy
|
doc/conf.py
|
Python
|
mit
| 4,832
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('login_url',
default='http://localhost/auth/login/',
help="Login page for the dashboard"),
cfg.StrOpt('help_url',
default='http://docs.openstack.org/',
help="Dashboard help page url"),
]
IdentityGroup = [
cfg.StrOpt('username',
default='demo',
help="Username to use for non-admin API requests."),
cfg.StrOpt('password',
default='secretadmin',
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('admin_username',
default='admin',
help="Administrative Username to use for admin API "
"requests."),
cfg.StrOpt('admin_password',
default='secretadmin',
help="API key to use when authenticating as admin.",
secret=True),
]
ImageGroup = [
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
]
AvailableServiceGroup = [
cfg.BoolOpt('sahara',
default=True,
help='Whether is Sahara expected to be available')
]
SeleniumGroup = [
cfg.IntOpt('implicit_wait',
default=10,
help="Implicit wait timeout in seconds"),
cfg.IntOpt('explicit_wait',
default=300,
help="Explicit wait timeout in seconds"),
cfg.IntOpt('page_timeout',
default=30,
help="Page load timeout in seconds"),
]
ScenarioGroup = [
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for image file'),
]
def _get_config_files():
conf_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'integration_tests')
conf_file = os.environ.get('HORIZON_INTEGRATION_TESTS_CONFIG_FILE',
"%s/horizon.conf" % conf_dir)
return [conf_file]
def get_config():
cfg.CONF([], project='horizon', default_config_files=_get_config_files())
cfg.CONF.register_opts(DashboardGroup, group="dashboard")
cfg.CONF.register_opts(IdentityGroup, group="identity")
cfg.CONF.register_opts(AvailableServiceGroup, group="service_available")
cfg.CONF.register_opts(SeleniumGroup, group="selenium")
cfg.CONF.register_opts(ImageGroup, group="image")
cfg.CONF.register_opts(ScenarioGroup, group="scenario")
return cfg.CONF
|
FNST-OpenStack/horizon
|
openstack_dashboard/test/integration_tests/config.py
|
Python
|
apache-2.0
| 3,298
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.