code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from setuptools import setup, find_packages
setup(
name='pulp_ostree_common',
version='1.0.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',
author='Pulp Team',
author_email='pulp-list@redhat.com',
description='common code for pulp\'s ostree support',
)
| dkliban/pulp_ostree | common/setup.py | Python | gpl-2.0 | 321 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## trazabilidad_articulos.py - Trazabilidad de rollo, bala o bigbag
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
## 24 de mayo de 2006 -> Inicio
## 24 de mayo de 2006 -> It's alive!
###################################################################
## DONE: Imprimir toda la información en PDF sería lo suyo.
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, sqlobject
import sys, os
try:
import pclases
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pclases
import mx, mx.DateTime
sys.path.append(os.path.join('..', 'informes'))
from barcode import code39
from barcode.EANBarCode import EanBarCode
from reportlab.lib.units import cm
class TrazabilidadArticulos(Ventana):
def __init__(self, objeto = None, usuario = None):
self.usuario = usuario
Ventana.__init__(self, 'trazabilidad_articulos.glade', objeto,
self.usuario)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir
}
self.add_connections(connections)
#self.wids['e_num'].connect("key_press_event", self.pasar_foco)
self.wids['ventana'].resize(800, 600)
self.wids['ventana'].set_position(gtk.WIN_POS_CENTER)
if objeto != None:
self.rellenar_datos(objeto)
self.wids['e_num'].grab_focus()
gtk.main()
def imprimir(self, boton):
"""
Vuelca toda la información de pantalla en bruto a un PDF.
"""
import informes, geninformes
datos = "Código de trazabilidad: %s\n\n"%self.wids['e_num'].get_text()
for desc, txt in (("Producto:\n", self.wids['txt_producto']),
("Lote/Partida:\n", self.wids['txt_lp']),
("Albarán de salida:\n", self.wids['txt_albaran']),
("Producción:\n", self.wids['txt_produccion'])):
buffer = txt.get_buffer()
texto = buffer.get_text(buffer.get_start_iter(),
buffer.get_end_iter())
datos += desc + texto + "\n\n"
informes.abrir_pdf(geninformes.trazabilidad(datos))
def pasar_foco(self, widget, event):
if event.keyval == 65293 or event.keyval == 65421:
self.wids['b_buscar'].grab_focus()
def chequear_cambios(self):
pass
def buscar_bigbag(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Bigbag.select(pclases.Bigbag.q.numbigbag == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numbigbag, a.codigo) for a in ars]
idbigbag = utils.dialogo_resultado(filas,
titulo = "Seleccione bigbag",
cabeceras = ('ID', 'Número de bigbag', 'Código'),
padre = self.wids['ventana'])
if idbigbag > 0:
ar = pclases.Bigbag.get(idbigbag)
return ar
def buscar_bala(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Bala.select(pclases.Bala.q.numbala == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numbala, a.codigo) for a in ars]
idbala = utils.dialogo_resultado(filas,
titulo = "Seleccione bala",
cabeceras = ('ID', 'Número de bala', 'Código'),
padre = self.wids['ventana'])
if idbala > 0:
ar = pclases.Bala.get(idbala)
return ar
def buscar_rollo(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Rollo.select(pclases.Rollo.q.numrollo == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numrollo, a.codigo) for a in ars]
idrollo = utils.dialogo_resultado(filas,
titulo = "Seleccione rollo",
cabeceras = ('ID', 'Número de rollo', 'Código'),
padre = self.wids['ventana'])
if idrollo > 0:
ar = pclases.Rollo.get(idrollo)
return ar
def buscar_articulo(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Rollo.select(pclases.Rollo.q.numrollo == txt)
if ars.count() == 0:
ar = self.buscar_bala(txt)
elif ars.count() == 1:
ar = ars[0]
else:
ar = self.buscar_rollo(txt)
return ar
def buscar(self, b):
a_buscar = self.wids['e_num'].get_text().strip().upper()
if a_buscar.startswith(pclases.PREFIJO_ROLLO):
try:
objeto = pclases.Rollo.select(
pclases.Rollo.q.codigo == a_buscar)[0]
except IndexError:
objeto = self.buscar_rollo(a_buscar[1:])
elif a_buscar.startswith(pclases.PREFIJO_BALA):
try:
objeto = pclases.Bala.select(
pclases.Bala.q.codigo == a_buscar)[0]
except IndexError:
objeto = self.buscar_bala(a_buscar[1:])
elif a_buscar.startswith(pclases.PREFIJO_LOTECEM):
try:
loteCem = pclases.LoteCem.select(
pclases.LoteCem.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(titulo = "LOTE NO ENCONTRADO",
texto = "El lote de fibra de cemento %s no se encontró."
% (a_buscar),
padre = self.wids['ventana'])
loteCem = None
objeto = loteCem
elif a_buscar.startswith(pclases.PREFIJO_LOTE):
try:
lote = pclases.Lote.select(
pclases.Lote.q.numlote == int(a_buscar[2:]))[0]
except IndexError:
try:
lote = pclases.Lote.select(
pclases.Lote.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(titulo = "LOTE NO ENCONTRADO",
texto = "El lote de fibra %s no se encontró." % (
a_buscar),
padre = self.wids['ventana'])
lote = None
except ValueError:
utils.dialogo_info(titulo = "ERROR BUSCANDO LOTE",
texto = "El texto %s provocó un error en la búsqueda." % (
a_buscar),
padre = self.wids['ventana'])
lote = None
objeto = lote
elif a_buscar.startswith(pclases.PREFIJO_PARTIDA):
try:
partida = pclases.Partida.select(
pclases.Partida.q.numpartida == int(a_buscar[2:]))[0]
except IndexError:
try:
partida = pclases.Partida.select(
pclases.Partida.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(titulo = "PARTIDA NO ENCONTRADA",
texto = "La partida de geotextiles %s "
"no se encontró." % (a_buscar),
padre = self.wids['ventana'])
partida = None
except ValueError:
utils.dialogo_info(titulo = "ERROR BUSCANDO PARTIDA",
texto = "El texto %s provocó un error en la búsqueda." % (
a_buscar),
padre = self.wids['ventana'])
partida = None
objeto = partida
elif a_buscar.startswith(pclases.PREFIJO_PARTIDACEM):
try:
partidaCem = pclases.PartidaCem.select(
pclases.PartidaCem.q.numpartida == int(a_buscar[2:]))[0]
except IndexError:
try:
partidaCem = pclases.PartidaCem.select(
pclases.PartidaCem.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(
titulo = "PARTIDA DE FIBRA EMBOLSADA NO ENCONTRADA",
texto = "La partida de fibra embolsada %s "
"no se encontró." % (a_buscar),
padre = self.wids['ventana'])
partidaCem = None
except ValueError:
utils.dialogo_info(
titulo = "ERROR BUSCANDO PARTIDA DE FIBRA EMBOLSADA",
texto = "El texto %s provocó un error en la búsqueda." % (
a_buscar),
padre = self.wids['ventana'])
partidaCem = None
objeto = partidaCem
elif a_buscar.startswith(pclases.PREFIJO_PARTIDACARGA):
try:
partidacarga = pclases.PartidaCarga.select(
pclases.PartidaCarga.q.numpartida == int(a_buscar[2:]))[0]
except IndexError:
try:
partidacarga = pclases.PartidaCarga.select(
pclases.PartidaCarga.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(titulo="PARTIDA DE CARGA NO ENCONTRADA",
texto = "La partida de carga %s no se encontró." % (
a_buscar),
padre = self.wids['ventana'])
partidacarga = None
except ValueError:
utils.dialogo_info(titulo = "ERROR BUSCANDO PARTIDA DE CARGA",
texto = "El texto %s provocó un error en la búsqueda." % (
a_buscar),
padre = self.wids['ventana'])
partidacarga = None
objeto = partidacarga
elif a_buscar.startswith(pclases.PREFIJO_BIGBAG):
try:
objeto = pclases.Bigbag.select(
pclases.Bigbag.q.codigo == a_buscar)[0]
except IndexError:
objeto = self.buscar_bigbag(a_buscar[1:])
elif a_buscar.startswith(pclases.PREFIJO_ROLLODEFECTUOSO):
try:
objeto = pclases.RolloDefectuoso.select(
pclases.RolloDefectuoso.q.codigo == a_buscar)[0]
except IndexError:
objeto = None # O lo busca bien o que se vaya a "puirla".
elif a_buscar.startswith(pclases.PREFIJO_BALACABLE):
try:
objeto = pclases.BalaCable.select(
pclases.BalaCable.q.codigo == a_buscar)[0]
except IndexError:
objeto = None # O lo busca bien o que se vaya a "puirla".
elif a_buscar.startswith(pclases.PREFIJO_ROLLOC):
try:
objeto = pclases.RolloC.select(
pclases.RolloC.q.codigo == a_buscar)[0]
except IndexError:
objeto = None # O lo busca bien o que se vaya a "puirla".
elif a_buscar.startswith(pclases.PREFIJO_PALE):
try:
objeto = pclases.Pale.select(
pclases.Pale.q.codigo == a_buscar)[0]
except IndexError:
objeto = None # O lo busca bien o que se vaya a "puirla".
elif a_buscar.startswith(pclases.PREFIJO_CAJA):
try:
objeto = pclases.Caja.select(
pclases.Caja.q.codigo == a_buscar)[0]
except IndexError:
objeto = None # O lo busca bien o que se vaya a "puirla".
elif a_buscar.startswith(pclases.PREFIJO_BOLSA):
# No more bolsas como tal.
# try:
# objeto = pclases.Bolsa.select(
# pclases.Bolsa.q.codigo == a_buscar.upper())[0]
# except IndexError:
# objeto = None # O lo busca bien o que se vaya a "puirla".
# Voy a buscar la caja a la que pertenece la bolsa:
objeto = pclases.Caja.get_caja_from_bolsa(a_buscar)
else:
objeto = self.buscar_articulo(a_buscar)
if objeto != None:
objeto.sync()
if hasattr(objeto, "codigo"):
self.wids['e_num'].set_text(objeto.codigo)
else:
self.wids['e_num'].set_text("DEBUG: __hash__ %s" % (
objeto.__hash__()))
self.rellenar_datos(objeto)
else:
utils.dialogo_info(titulo = "NO ENCONTRADO",
texto = "Producto no encontrado",
padre = self.wids['ventana'])
def rellenar_datos(self, objeto):
"""
"objeto" es el objeto sobre el que se va a mostar la información.
"""
objeto.sync()
if isinstance(objeto, (pclases.Bala, pclases.Rollo, pclases.Bigbag,
pclases.RolloDefectuoso, pclases.BalaCable,
pclases.RolloC, pclases.Pale, pclases.Caja,
)):
# pclases.Bolsa)):
try:
objeto.articulo.sync()
except AttributeError:
pass # Es una caja o un palé. No tiene artículo concreto
# que sincronizar.
self.rellenar_producto(objeto)
self.rellenar_lotepartida(objeto)
self.rellenar_albaran(objeto)
self.rellenar_produccion(objeto)
self.wids['e_num'].set_text(objeto.codigo)
elif isinstance(objeto, pclases.PartidaCarga):
from ventana_progreso import VentanaActividad, VentanaProgreso
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
i = 0.0
tot = 5
vpro.set_valor(i/tot, "Buscando..."); i += 1
import time; time.sleep(0.5)
vpro.set_valor(i/tot, "Producto..."); i += 1
self.rellenar_producto_partida_carga(objeto)
vpro.set_valor(i/tot, "Lote/Partida..."); i += 1
self.rellenar_partida_carga(objeto)
vpro.set_valor(i/tot, "Albarán de salida..."); i += 1
self.rellenar_albaran_partida_carga(objeto)
vpro.set_valor(i/tot, "Producción..."); i += 1
self.rellenar_produccion_partida_carga(objeto)
self.wids['e_num'].set_text(objeto.codigo)
vpro.ocultar()
elif isinstance(objeto,(pclases.Lote, pclases.LoteCem,
pclases.Partida, pclases.PartidaCem)):
from ventana_progreso import VentanaActividad, VentanaProgreso
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
i = 0.0
tot = 5
vpro.set_valor(i/tot, "Buscando..."); i += 1
import time; time.sleep(0.5)
vpro.set_valor(i/tot, "Producto..."); i += 1
self.rellenar_producto_lote_o_partida(objeto)
vpro.set_valor(i/tot, "Lote/Partida..."); i += 1
self.rellenar_lote_o_partida(objeto)
vpro.set_valor(i/tot, "Albarán de salida..."); i += 1
self.rellenar_albaran_lote_o_partida(objeto)
vpro.set_valor(i/tot, "Producción..."); i += 1
self.rellenar_produccion_lote_o_partida(objeto)
self.wids['e_num'].set_text(objeto.codigo)
vpro.ocultar()
def rellenar_producto_partida_carga(self, pcarga):
"""
Muestra los productos de fibra que componen la partida de carga.
"""
txtvw = self.wids['txt_producto']
borrar_texto(txtvw)
prodsfibra = {}
total = 0.0
for bala in pcarga.balas:
a = bala.articulo
pv = a.productoVenta
l = bala.lote
if pv not in prodsfibra:
prodsfibra[pv] = {}
if bala.lote not in prodsfibra[pv]:
prodsfibra[pv][l] = {'balas': [], 'total': 0.0}
prodsfibra[pv][l]['balas'].append(bala)
prodsfibra[pv][l]['total'] += bala.pesobala
total += bala.pesobala
for pv in prodsfibra:
escribir(txtvw,
"%s kg de %s\n" % (utils.float2str(sum(
[prodsfibra[pv][l]['total'] for l in prodsfibra[pv]])),
pv.descripcion),
("negrita"))
for l in prodsfibra[pv]:
escribir(txtvw, "\t%d balas del lote %s; %s kg\n" % (
len(prodsfibra[pv][l]['balas']),
l.codigo,
utils.float2str(prodsfibra[pv][l]['total'])))
escribir(txtvw,
"Total cargado: %s kg\n" % (utils.float2str(total)),
("negrita", "grande"))
def rellenar_partida_carga(self, pcarga):
"""
Muestra la partida de carga, fecha y partidas de geotextiles
relacionados con ella.
"""
txtvw = self.wids['txt_lp']
borrar_texto(txtvw)
escribir(txtvw, "Partida de carga %s (%s)\n" % (
pcarga.numpartida, pcarga.codigo), ("negrita"))
escribir(txtvw, "Fecha y hora de creación: %s %s\n" % (
utils.str_fecha(pcarga.fecha),
utils.str_hora_corta(pcarga.fecha)),
("cursiva"))
escribir(txtvw, "Partidas de geotextiles relacionados:\n")
for partida in pcarga.partidas:
escribir(txtvw, "\tPartida %s (%s)\n" % (partida.numpartida,
partida.codigo))
def rellenar_albaran_partida_carga(self, pcarga):
"""
Muestra los albaranes internos relacionados con las balas de la
partida de carga.
"""
txtvw = self.wids['txt_albaran']
borrar_texto(txtvw)
albs = []
for bala in pcarga.balas:
alb = bala.articulo.albaranSalida
if alb != None and alb not in albs:
albs.append(alb)
escribir(txtvw, "Albarán %s (%s): %s\n" % (alb.numalbaran,
utils.str_fecha(alb.fecha),
alb.cliente and alb.cliente.nombre or "-"))
def rellenar_produccion_partida_carga(self, pcarga):
"""
Muestra los rollos fabricados, metros cuadrados y kilos reales
por parte de produccion.
"""
txtvw = self.wids['txt_produccion']
borrar_texto(txtvw)
fab = {}
for partida in pcarga.partidas:
for rollo in partida.rollos:
pv = rollo.productoVenta
metros = pv.camposEspecificosRollo.get_metros_cuadrados()
if pv not in fab:
fab[pv] = {'rollos': [rollo],
'peso_real': rollo.peso,
'peso_sin': rollo.peso_sin,
'peso_teorico': rollo.peso_teorico,
'metros': metros}
else:
fab[pv]['rollos'].append(rollo)
fab[pv]['peso_real'] += rollo.peso
fab[pv]['peso_sin'] += rollo.peso_sin
fab[pv]['peso_teorico'] += rollo.peso_teorico
fab[pv]['metros'] += metros
total_peso_real=total_peso_sin=total_peso_teorico=total_metros=0.0
total_rollos = 0
for pv in fab:
escribir(txtvw, "%s:\n" % (pv.descripcion), ("negrita"))
escribir(txtvw, "\t%d rollos.\n" % (len(fab[pv]['rollos'])))
total_rollos += len(fab[pv]['rollos'])
escribir(txtvw, "\t%s kg peso real.\n" % (
utils.float2str(fab[pv]['peso_real'])))
total_peso_real += fab[pv]['peso_real']
escribir(txtvw, "\t%s kg peso sin embalaje.\n" % (
utils.float2str(fab[pv]['peso_sin'])))
total_peso_sin += fab[pv]['peso_sin']
escribir(txtvw, "\t%s kg peso teórico.\n" % (
utils.float2str(fab[pv]['peso_teorico'])))
total_peso_teorico += fab[pv]['peso_teorico']
escribir(txtvw, "\t%s m².\n" % utils.float2str(fab[pv]['metros']))
total_metros += fab[pv]['metros']
escribir(txtvw, "Total fabricado:\n", ("negrita", "grande"))
escribir(txtvw, "\t%d rollos.\n" % (total_rollos), ("negrita"))
escribir(txtvw, "\t%s kg peso real.\n" % (
utils.float2str(total_peso_real)), ("negrita"))
escribir(txtvw, "\t%s kg peso sin embalaje.\n" % (
utils.float2str(total_peso_sin)), ("negrita"))
escribir(txtvw, "\t%s kg peso teórico.\n" % (
utils.float2str(total_peso_teorico)), ("negrita"))
escribir(txtvw, "\t%s m².\n" % (utils.float2str(total_metros)),
("negrita"))
def rellenar_producto_lote_o_partida(self, objeto):
"""
Determina el producto (o productos, para lotes antiguos)
al que pertenece el lote, lote de cemento o partida y lo
muestra en el cuadro correspondiente.
"""
txtvw = self.wids['txt_producto']
borrar_texto(txtvw)
productos = []
if isinstance(objeto, pclases.Lote):
for bala in objeto.balas:
if (bala.articulo and bala.articulo.productoVenta
and bala.articulo.productoVenta not in productos):
productos.append(bala.articulo.productoVenta)
elif isinstance(objeto, pclases.LoteCem):
for bigbag in objeto.bigbags:
if (bigbag.articulo and bigbag.articulo.productoVenta
and bigbag.articulo.productoVenta not in productos):
productos.append(bigbag.articulo.productoVenta)
elif isinstance(objeto, pclases.Partida):
for rollo in objeto.rollos:
if (rollo.articulo and rollo.articulo.productoVenta
and rollo.articulo.productoVenta not in productos):
productos.append(rollo.articulo.productoVenta)
elif isinstance(objeto, pclases.PartidaCem):
for pale in objeto.pales:
if (pale.productoVenta
and pale.productoVenta not in productos):
productos.append(pale.productoVenta)
for producto in productos:
self.rellenar_info_producto_venta(producto, txtvw)
def rellenar_lote_o_partida(self, objeto):
"""
Muestra la información del lote o partida
"""
txtvw = self.wids['txt_lp']
borrar_texto(txtvw)
if isinstance(objeto, pclases.Lote):
self.rellenar_info_lote(objeto, txtvw)
elif isinstance(objeto, pclases.LoteCem):
self.rellenar_info_lote_cemento(objeto, txtvw)
elif isinstance(objeto, pclases.Partida):
self.rellenar_info_partida(objeto, txtvw)
elif isinstance(objeto, pclases.PartidaCem):
self.rellenar_info_partidaCem(objeto, txtvw)
def rellenar_albaran_lote_o_partida(self, objeto):
"""
Busca los artículos que siguen en almacén del lote o partida y muestra
también la relación de albaranes de los que han salido.
"""
txtvw = self.wids['txt_albaran']
borrar_texto(txtvw)
albs = {}
en_almacen = 0
if isinstance(objeto, pclases.LoteCem):
bultos = objeto.bigbags
elif isinstance(objeto, pclases.Lote):
bultos = objeto.balas
elif isinstance(objeto, pclases.Partida):
bultos = objeto.rollos
elif isinstance(objeto, pclases.PartidaCem):
bultos = []
for pale in objeto.pales:
for caja in pale.cajas:
bultos.append(caja)
for bulto in bultos:
albaran = bulto.articulo.albaranSalida
if albaran == None:
en_almacen += 1
else:
if albaran not in albs:
albs[albaran] = 1
else:
albs[albaran] += 1
self.mostrar_info_albaranes_lote_o_partida(txtvw,
bultos,
albs,
en_almacen)
def mostrar_info_albaranes_lote_o_partida(self,
txtvw,
bultos,
albs,
en_almacen):
"""
Introduce la información en sí en el TextView.
"""
escribir(txtvw, "Bultos en almacén: %d\n" % (en_almacen))
escribir(txtvw, "Bultos vendidos: %d\n" % (len(bultos) - en_almacen))
for albaran in albs:
escribir(txtvw, "\t%d en el albarán %s.\n" % (albs[albaran],
albaran.numalbaran))
def rellenar_produccion_lote_o_partida(self, objeto):
"""
Muestra el número de bultos y kg o metros fabricados
para el lote, loteCem o partida en cuestión.
Si el objeto es una partida, muestra además el consumo
de materia prima.
"""
txtvw = self.wids['txt_produccion']
borrar_texto(txtvw)
if isinstance(objeto, pclases.LoteCem):
self.mostrar_produccion_lote_cemento(objeto, txtvw)
elif isinstance(objeto, pclases.Lote):
self.mostrar_produccion_lote(objeto, txtvw)
elif isinstance(objeto, pclases.Partida):
self.mostrar_produccion_partida(objeto, txtvw)
elif isinstance(objeto, pclases.PartidaCem):
self.mostrar_produccion_partidaCem(objeto, txtvw)
def mostrar_produccion_lote(self, objeto, txtvw):
"""
Muestra las balas del lote producidas, desglosadas en
clase A y clase B; y los partes donde se fabricaron.
"""
ba = {'bultos': 0, 'peso': 0.0}
bb = {'bultos': 0, 'peso': 0.0}
partes = []
for b in objeto.balas:
if (b.articulo.parteDeProduccion != None
and b.articulo.parteDeProduccion not in partes):
partes.append(b.articulo.parteDeProduccion)
if b.claseb:
bb['bultos'] += 1
bb['peso'] += b.pesobala
else:
ba['bultos'] += 1
ba['peso'] += b.pesobala
escribir(txtvw,
"Balas clase A: %d; %s kg\n" % (ba['bultos'],
utils.float2str(ba['peso'])))
escribir(txtvw,
"Balas clase B: %d; %s kg\n" % (bb['bultos'],
utils.float2str(bb['peso'])))
escribir(txtvw,
"TOTAL: %d balas; %s kg\n" % (ba['bultos'] + bb['bultos'],
utils.float2str(
ba['peso'] + bb['peso'])))
escribir(txtvw, "\nLote de fibra fabricado en los partes:\n")
partes.sort(lambda p1, p2: int(p1.id - p2.id))
for parte in partes:
escribir(txtvw,
"%s (%s-%s)\n" % (utils.str_fecha(parte.fecha),
utils.str_hora_corta(parte.horainicio),
utils.str_hora_corta(parte.horafin)))
def mostrar_produccion_partida(self, objeto, txtvw):
"""
Muestra la producción de los rollos de la partida.
"""
rs = {'bultos': 0,
'peso': 0.0,
'peso_sin': 0.0,
'metros2': 0.0,
'bultos_d': 0,
'peso_d': 0.0,
'peso_sin_d': 0.0,
'peso_teorico': objeto.get_kilos_teorico(
contar_defectuosos = False),
'peso_teorico_d': objeto.get_kilos_teorico(
contar_defectuosos = True),
'metros2_d': 0.0}
partes = []
for r in objeto.rollos:
if (r.articulo.parteDeProduccion != None
and r.articulo.parteDeProduccion not in partes):
partes.append(r.articulo.parteDeProduccion)
rs['bultos'] += 1
rs['peso'] += r.peso
rs['peso_sin'] += r.peso_sin
rs['metros2'] += r.articulo.superficie
for r in objeto.rollosDefectuosos:
if (r.articulo.parteDeProduccion != None
and r.articulo.parteDeProduccion not in partes):
partes.append(r.articulo.parteDeProduccion)
rs['bultos_d'] += 1
rs['peso_d'] += r.peso
rs['peso_sin_d'] += r.peso_sin
rs['metros2_d'] += r.articulo.superficie
escribir(txtvw,
"TOTAL:\n\t%d rollos;\n"
"\t%d rollos defectuosos;\n"
"\t\t%d rollos en total.\n"
"\t%s kg reales (%s kg + %s kg en rollos defectuosos).\n"
"\t%s kg sin embalaje (%s kg + %s kg en rollos defectuosos)."
"\n""\t%s kg teóricos (%s kg teóricos incluyendo rollos "
"defectuosos).\n"
"\t%s m² (%s m² + %s m² en rollos defectuosos).\n" % (
rs['bultos'],
rs['bultos_d'],
rs['bultos'] + rs['bultos_d'],
utils.float2str(rs['peso'] + rs['peso_d']),
utils.float2str(rs['peso']),
utils.float2str(rs['peso_d']),
utils.float2str(rs['peso_sin'] + rs['peso_sin_d']),
utils.float2str(rs['peso_sin']),
utils.float2str(rs['peso_sin_d']),
utils.float2str(rs['peso_teorico']),
utils.float2str(rs['peso_teorico_d']),
utils.float2str(rs['metros2'] + rs['metros2_d']),
utils.float2str(rs['metros2']),
utils.float2str(rs['metros2_d'])))
escribir(txtvw, "\nPartida de geotextiles fabricada en los partes:\n")
partes.sort(lambda p1, p2: int(p1.id - p2.id))
for parte in partes:
escribir(txtvw,
"%s (%s-%s)\n" % (utils.str_fecha(parte.fecha),
utils.str_hora_corta(parte.horainicio),
utils.str_hora_corta(parte.horafin)))
escribir(txtvw, "\n\nConsumos:\n", ("rojo, negrita"))
import geninformes
escribir(txtvw, geninformes.consumoPartida(objeto), ("rojo"))
def mostrar_produccion_lote_cemento(self, objeto, txtvw):
"""
Muestra el total de bigbags del lote y su peso total,
también muestra los de clase A y B y las fechas y turnos
de los partes en los que se fabricaron.
"""
bba = {'bultos': 0, 'peso': 0.0}
bbb = {'bultos': 0, 'peso': 0.0}
partes = []
for bb in objeto.bigbags:
if (bb.articulo.parteDeProduccion != None
and bb.articulo.parteDeProduccion not in partes):
partes.append(bb.articulo.parteDeProduccion)
if bb.claseb:
bbb['bultos'] += 1
bbb['peso'] += bb.pesobigbag
else:
bba['bultos'] += 1
bba['peso'] += bb.pesobigbag
escribir(txtvw, "Bigbags clase A: %d; %s kg\n" % (
bba['bultos'], utils.float2str(bba['peso'])))
escribir(txtvw, "Bigbags clase B: %d; %s kg\n" % (
bbb['bultos'], utils.float2str(bbb['peso'])))
escribir(txtvw, "TOTAL: %d bigbags; %s kg\n" % (
bba['bultos'] + bbb['bultos'],
utils.float2str(bba['peso'] + bbb['peso'])))
escribir(txtvw,
"\nLote de fibra de cemento fabricado en los partes:\n")
partes.sort(lambda p1, p2: int(p1.id - p2.id))
for parte in partes:
escribir(txtvw, "%s (%s-%s)\n" % (
utils.str_fecha(parte.fecha),
utils.str_hora_corta(parte.horainicio),
utils.str_hora_corta(parte.horafin)))
def mostrar_produccion_partidaCem(self, objeto, txtvw):
"""
Muestra el total de palés de la partida de cemento y su peso total,
también muestra los de clase A y B y las fechas y turnos
de los partes en los que se fabricaron.
"""
palesa = {'cajas': 0, 'bultos': 0, 'peso': 0.0}
palesb = {'cajas': 0, 'bultos': 0, 'peso': 0.0}
partes = []
for pale in objeto.pales:
if (pale.parteDeProduccion != None
and pale.parteDeProduccion not in partes):
partes.append(pale.parteDeProduccion)
if pale.claseb:
palesb['bultos'] += 1
palesb['peso'] += pale.calcular_peso()
palesb['cajas'] += len(pale.cajas) # Más realista en caso de
# incoherencias en la base de datos que pale.numcajas.
else:
palesa['bultos'] += 1
palesa['peso'] += pale.calcular_peso()
palesa['cajas'] += len(pale.cajas)
escribir(txtvw, "Palés clase A: %d; %s kg (%d cajas)\n" % (
palesa['bultos'], utils.float2str(palesa['peso']),
palesa['cajas']))
escribir(txtvw, "Palés clase B: %d; %s kg (%d cajas)\n" % (
palesb['bultos'], utils.float2str(palesb['peso']),
palesb['cajas']))
escribir(txtvw, "TOTAL: %d palés; %s kg (%d cajas)\n" % (
palesa['bultos'] + palesb['bultos'],
utils.float2str(palesa['peso'] + palesb['peso']),
palesa['cajas'] + palesb['cajas']))
escribir(txtvw, "\nPartida de fibra de cemento embolsada "
"fabricada en los partes:\n")
partes.sort(lambda p1, p2: int(p1.id - p2.id))
for parte in partes:
escribir(txtvw, "%s (%s-%s)\n" % (
utils.str_fecha(parte.fecha),
utils.str_hora_corta(parte.horainicio),
utils.str_hora_corta(parte.horafin)))
def rellenar_info_lote(self, lote, txtvw):
"""
Recibe un lote y escribe en txtvw toda la información del mismo.
"""
escribir(txtvw, "Lote número: %d\n" % (lote.numlote))
escribir(txtvw, "Código de lote: %s\n" % (lote.codigo),
("negrita", ))
escribir(txtvw, "Tenacidad: %s" % (lote.tenacidad))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_tenacidad_media())), ("azul"))
escribir(txtvw, "Elongación: %s" % (lote.elongacion))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_elongacion_media())), ("azul"))
escribir(txtvw, "Rizo: %s" % (lote.rizo))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_rizo_medio())), ("azul"))
escribir(txtvw, "Encogimiento: %s" % (lote.encogimiento))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_encogimiento_medio())), ("azul"))
escribir(txtvw, "Grasa: %s" % (utils.float2str(lote.grasa)))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_grasa_media())), ("azul"))
escribir(txtvw, "Media de título: %s" % (
utils.float2str(lote.mediatitulo)))
escribir(txtvw, " (%s)\n" % (
utils.float2str(lote.calcular_titulo_medio())), ("azul"))
escribir(txtvw, "Tolerancia: %s\n" % (
utils.float2str(lote.tolerancia)))
escribir(txtvw, "Muestras extraídas en el lote: %d\n" %
len(lote.muestras))
escribir(txtvw, "Pruebas de estiramiento realizadas: %d\n" %
len(lote.pruebasElongacion))
escribir(txtvw, "Pruebas de medida de rizo realizadas: %d\n" %
len(lote.pruebasRizo))
escribir(txtvw, "Pruebas de encogimiento realizadas: %d\n" %
len(lote.pruebasEncogimiento))
escribir(txtvw, "Pruebas de tenacidad realizadas: %d\n" %
len(lote.pruebasTenacidad))
escribir(txtvw, "Pruebas de grasa realizadas: %d\n" %
len(lote.pruebasGrasa))
escribir(txtvw, "Pruebas de título realizadas: %d\n" %
len(lote.pruebasTitulo))
def rellenar_info_partida_cemento(self, partida, txtvw):
"""
Rellena la información de la partida de fibra de cemento.
"""
escribir(txtvw, "Partida de cemento número: %s\n" % (
partida and str(partida.numpartida) or "Sin partida relacionada."))
escribir(txtvw, "Código de partida: %s\n" % (
partida and partida.codigo or "Sin partida relacionada."),
("negrita", ))
def rellenar_info_lote_cemento(self, lote, txtvw):
"""
Rellena la información del lote de cemento.
"""
escribir(txtvw, "Lote número: %d\n" % (lote.numlote))
escribir(txtvw, "Código de lote: %s\n" % (lote.codigo),
("negrita", ))
escribir(txtvw, "Tenacidad: %s" % (lote.tenacidad))
escribir(txtvw, " (%s)\n" % (lote.calcular_tenacidad_media()),
("azul"))
escribir(txtvw, "Elongación: %s" % (lote.elongacion))
escribir(txtvw, " (%s)\n" % (lote.calcular_elongacion_media()),
("azul"))
escribir(txtvw, "Encogimiento: %s" % (lote.encogimiento))
escribir(txtvw, " (%s)\n" % (lote.calcular_encogimiento_medio()),
("azul"))
escribir(txtvw, "Grasa: %s" % (
lote.grasa and utils.float2str(lote.grasa) or "-"))
escribir(txtvw, " (%s)\n" % (lote.calcular_grasa_media()),
("azul"))
escribir(txtvw, "Humedad: %s" % (lote.humedad))
escribir(txtvw, " (%s)\n" % (lote.calcular_humedad_media()),
("azul"))
escribir(txtvw, "Media de título: %s" % (
lote.mediatitulo and utils.float2str(lote.mediatitulo) or "-"))
escribir(txtvw, " (%s)\n" % (lote.calcular_titulo_medio()),
("azul"))
escribir(txtvw, "Tolerancia: %s\n" % (
lote.tolerancia and utils.float2str(lote.tolerancia) or "-"))
escribir(txtvw, "Muestras extraídas en el lote: %d\n" % (
len(lote.muestras)))
escribir(txtvw, "Pruebas de estiramiento realizadas: %d\n" % (
len(lote.pruebasElongacion)))
escribir(txtvw, "Pruebas de encogimiento realizadas: %d\n" % (
len(lote.pruebasEncogimiento)))
escribir(txtvw, "Pruebas de grasa realizadas: %d\n" % (
len(lote.pruebasGrasa)))
escribir(txtvw, "Pruebas de medida de humedad realizadas: %d\n" %
(len(lote.pruebasHumedad)))
escribir(txtvw, "Pruebas de tenacidad realizadas: %d\n" % (
len(lote.pruebasTenacidad)))
escribir(txtvw, "Pruebas de título realizadas: %d\n" % (
len(lote.pruebasTitulo)))
def rellenar_info_partida(self, partida, txtvw):
"""
Muestra la información de la partida en el txtvw.
"""
escribir(txtvw, "Número de partida: %d\n" % (partida.numpartida))
escribir(txtvw, "Código de partida: %s\n" % (partida.codigo),
("negrita"))
escribir(txtvw, "Gramaje: %s" % (utils.float2str(partida.gramaje)))
escribir(txtvw,
" (%s)\n" % utils.float2str(partida.calcular_gramaje_medio()),
("azul"))
escribir(txtvw,
"Resistencia longitudinal: %s" % (
utils.float2str(partida.longitudinal)))
escribir(txtvw,
" (%s)\n" % (utils.float2str(
partida.calcular_resistencia_longitudinal_media())),
("azul"))
escribir(txtvw,
"Alargamiento longitudinal: %s" % (
utils.float2str(partida.alongitudinal)))
escribir(txtvw,
" (%s)\n" % (utils.float2str(
partida.calcular_alargamiento_longitudinal_medio())),
("azul"))
escribir(txtvw,
"Resistencia transversal: %s" % (
utils.float2str(partida.transversal)))
escribir(txtvw,
" (%s)\n" % (utils.float2str(
partida.calcular_resistencia_transversal_media())),
("azul"))
escribir(txtvw,
"Alargamiento transversal: %s" % (
utils.float2str(partida.atransversal)))
escribir(txtvw,
" (%s)\n" % (utils.float2str(
partida.calcular_alargamiento_transversal_medio())),
("azul"))
escribir(txtvw, "CBR: %s" % (utils.float2str(partida.compresion)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_compresion_media())),
("azul"))
escribir(txtvw,
"Perforación: %s" % (utils.float2str(partida.perforacion)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_perforacion_media())),
("azul"))
escribir(txtvw,
"Espesor: %s" % (utils.float2str(partida.espesor)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_espesor_medio())),
("azul"))
escribir(txtvw,
"Permeabilidad: %s" % (
utils.float2str(partida.permeabilidad)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_permeabilidad_media())),
("azul"))
escribir(txtvw,
"Apertura de poros: %s" % (utils.float2str(partida.poros)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_poros_medio())),
("azul"))
escribir(txtvw,
"Resistencia al punzonado piramidal: %s" % (
utils.float2str(partida.piramidal)))
escribir(txtvw,
" (%s)\n" % (
utils.float2str(partida.calcular_piramidal_media())),
("azul"))
escribir(txtvw,
"Muestras extraídas en la partida: %d\n" % len(
partida.muestras))
escribir(txtvw,
"Pruebas de alargamiento longitudinal realizadas: %d\n" % len(
partida.pruebasAlargamientoLongitudinal))
escribir(txtvw,
"Pruebas de alargamiento transversal realizadas: %d\n" % len(
partida.pruebasAlargamientoTransversal))
escribir(txtvw,
"Pruebas de compresión realizadas: %d\n" % len(
partida.pruebasCompresion))
escribir(txtvw,
"Pruebas de espesor realizadas: %d\n" % len(
partida.pruebasEspesor))
escribir(txtvw,
"Pruebas de gramaje realizadas: %d\n" % len(
partida.pruebasGramaje))
escribir(txtvw,
"Pruebas de perforación realizadas: %d\n" % len(
partida.pruebasPerforacion))
escribir(txtvw,
"Pruebas de permeabilidad realizadas: %d\n" % len(
partida.pruebasPermeabilidad))
escribir(txtvw,
"Pruebas de poros realizadas: %d\n"%len(partida.pruebasPoros))
escribir(txtvw,
"Pruebas de resistencia longitudinal realizadas: %d\n" % len(
partida.pruebasResistenciaLongitudinal))
escribir(txtvw,
"Pruebas de resistencia transversal realizadas: %d\n" % len(
partida.pruebasResistenciaTransversal))
escribir(txtvw,
"Pruebas de punzonado piramidal realizadas: %d\n" % len(
partida.pruebasPiramidal))
def rellenar_info_partidaCem(self, partida, txtvw):
"""
Muestra la información de la partida de cemento en el txtvw.
"""
escribir(txtvw, "Número de partida: %d\n" % (partida.numpartida))
escribir(txtvw, "Código de partida: %s\n" % (partida.codigo),
("negrita"))
def rellenar_lotepartida(self, articulo):
txtvw = self.wids['txt_lp']
borrar_texto(txtvw)
if isinstance(articulo, pclases.Bala):
self.rellenar_info_lote(articulo.lote, txtvw)
elif isinstance(articulo, (pclases.Rollo, pclases.RolloDefectuoso)):
self.rellenar_info_partida(articulo.partida, txtvw)
elif isinstance(articulo, pclases.Bigbag):
lote = articulo.loteCem
self.rellenar_info_lote_cemento(articulo.loteCem, txtvw)
elif isinstance(articulo, (pclases.BalaCable, pclases.RolloC)):
lote = None # Las balas de cable no se agrupan por lotes.
elif isinstance(articulo, (pclases.Pale, pclases.Caja)):
#, pclases.Bolsa)):
lote = articulo.partidaCem
self.rellenar_info_partida_cemento(lote, txtvw)
else:
escribir(txtvw, "¡NO SE ENCONTRÓ INFORMACIÓN!\n"
"Posible inconsistencia de la base de datos. "
"Contacte con el administrador.")
self.logger.error("trazabilidad_articulos.py::"
"rellenar_lote_partida -> "
"No se encontró información acerca del "
"artículo ID %d." % (articulo.id))
def rellenar_albaran(self, articulo):
txtvw = self.wids['txt_albaran']
borrar_texto(txtvw)
if isinstance(articulo, pclases.Pale):
pale = articulo
cajas_a_mostrar = []
albaranes_tratados = []
for caja in pale.cajas:
articulo_caja = caja.articulo
alb = articulo_caja.albaranSalida
if alb not in albaranes_tratados:
albaranes_tratados.append(alb)
cajas_a_mostrar.append(caja)
for caja in cajas_a_mostrar:
self.rellenar_albaran(caja)
else:
try:
a = articulo.articulos[0]
except IndexError, msg:
self.logger.error("ERROR trazabilidad_articulos.py "
"(rellenar_albaran): %s" % (msg))
else:
for fecha, objeto, almacen in a.get_historial_trazabilidad():
if isinstance(objeto, pclases.AlbaranSalida):
escribir(txtvw, "Albarán número: %s (%s)\n" % (
objeto.numalbaran,
objeto.get_str_tipo()),
("_rojoclaro", ))
escribir(txtvw, "Fecha: %s\n" %
utils.str_fecha(objeto.fecha))
escribir(txtvw, "Transportista: %s\n" % (
objeto.transportista
and objeto.transportista.nombre or ''))
escribir(txtvw, "Cliente: %s\n" % (
objeto.cliente and objeto.cliente.nombre or ''),
("negrita", ))
destino = (objeto.almacenDestino and
objeto.almacenDestino.nombre or
objeto.nombre)
escribir(txtvw, "Origen: %s\n" % (
objeto.almacenOrigen
and objeto.almacenOrigen.nombre
or "ERROR - ¡Albarán sin almacén de origen!"))
escribir(txtvw, "Destino: %s\n" % (destino))
elif isinstance(objeto, pclases.Abono):
escribir(txtvw,
"El artículo fue devuelto el %s a %s en el abono"
" %s.\n" % (utils.str_fecha(fecha),
almacen.nombre,
objeto.numabono),
("rojo", ))
# Y si ya está efectivamente en almacén, lo digo:
adeda = None
for ldd in objeto.lineasDeDevolucion:
if ldd.articulo == a: # ¡Te encontré, sarraceno!
adeda = ldd.albaranDeEntradaDeAbono
if not adeda:
escribir(txtvw,"El artículo aún no ha entrado"
" en almacén. El abono no ha generado albarán "
"de entrada de mercancía.\n",
("negrita", ))
else:
escribir(txtvw, "El artículo se recibió en "
"el albarán de entrada de abono %s el día "
"%s.\n" % (
adeda.numalbaran,
utils.str_fecha(adeda.fecha)))
elif isinstance(objeto, pclases.PartidaCarga):
escribir(txtvw,
"Se consumió el %s en la partida de carga %s.\n"%(
utils.str_fecha(fecha),
objeto.codigo),
("_rojoclaro", "cursiva"))
if articulo.articulo.en_almacen():
escribir(txtvw,
"El artículo está en almacén: %s.\n" % (
articulo.articulo.almacen
and articulo.articulo.almacen.nombre
or "¡Error de coherencia en la BD!"),
("_verdeclaro", ))
if (hasattr(articulo, "parteDeProduccionID")
and articulo.parteDeProduccionID):
# Ahora también se pueden consumir los Bigbags.
pdp = articulo.parteDeProduccion
if pdp:
if isinstance(articulo, pclases.Bigbag):
escribir(txtvw,
"\nBigbag consumido el día %s para producir la"
" partida de fibra de cemento embolsado %s."%(
utils.str_fecha(pdp.fecha),
pdp.partidaCem.codigo),
("_rojoclaro", "cursiva"))
def func_orden_ldds_por_albaran_salida(self, ldd1, ldd2):
"""
Devuelve -1, 1 ó 0 dependiendo de la fecha de los albaranes de salida
relacionados con las líneas de devolución. Si las fechas son iguales,
ordena por ID de las LDD.
"""
if ldd1.albaranSalida and (ldd2.albaranSalida == None
or ldd1.albaranSalida.fecha < ldd2.albaranSalida.fecha):
return -1
if ldd2.albaranSalida and (ldd1.albaranSalida == None
or ldd1.albaranSalida.fecha > ldd2.albaranSalida.fecha):
return 1
if ldd1.id < ldd2.id:
return -1
if ldd1.id > ldd2.id:
return 1
return 0
def mostrar_info_abonos(self, articulo):
"""
Muestra la información de los abonos del artículo.
"""
if articulo.lineasDeDevolucion:
txtvw = self.wids['txt_albaran']
ldds = articulo.lineasDeDevolucion[:]
ldds.sort(self.func_orden_ldds_por_albaran_salida)
for ldd in ldds:
try:
escribir(txtvw,
"Salida del almacén el día %s en el albarán "
"%s para %s.\n" % (
utils.str_fecha(ldd.albaranSalida.fecha),
ldd.albaranSalida.numalbaran,
ldd.albaranSalida.cliente
and ldd.albaranSalida.cliente.nombre
or "?"),
("_rojoclaro", "cursiva"))
escribir(txtvw,
"Devuelto el día %s en el albarán de entrada "
"de abono %s.\n" % (
utils.str_fecha(
ldd.albaranDeEntradaDeAbono.fecha),
ldd.albaranDeEntradaDeAbono.numalbaran),
("_verdeclaro", "cursiva"))
except AttributeError, msg:
escribir(txtvw,
"ERROR DE INCONSISTENCIA. Contacte con el "
"administrador de la base de datos.\n",
("negrita", ))
txterror="trazabilidad_articulos.py::mostrar_info_abonos"\
" -> Excepción capturada con artículo "\
"ID %d: %s." % (articulo.id, msg)
print txterror
self.logger.error(txterror)
escribir(txtvw, "\n")
def rellenar_produccion(self, articulo):
txtvw = self.wids['txt_produccion']
borrar_texto(txtvw)
mostrar_parte = True
if isinstance(articulo, pclases.Bala):
escribir(txtvw, "Bala número: %s\n" % articulo.numbala)
escribir(txtvw, "Código de trazabilidad: %s\n" % articulo.codigo)
escribir(txtvw, "Fecha y hora de fabricación: %s\n"
% articulo.fechahora.strftime('%d/%m/%Y %H:%M'))
escribir(txtvw, "Peso: %s\n" % (
utils.float2str(articulo.pesobala, 1)), ("negrita",))
escribir(txtvw, "Se extrajo muestra para laboratorio: %s\n" % (
articulo.muestra and "Sí" or "No"))
escribir(txtvw, articulo.claseb and "CLASE B\n" or "")
escribir(txtvw, "Observaciones: %s\n" % (articulo.motivo or "-"))
elif isinstance(articulo, pclases.Bigbag):
escribir(txtvw, "BigBag número: %s\n" % articulo.numbigbag)
escribir(txtvw, "Código de trazabilidad: %s\n" % articulo.codigo)
escribir(txtvw, "Fecha y hora de fabricación: %s\n" %
articulo.fechahora.strftime('%d/%m/%Y %H:%M'))
escribir(txtvw,
"Peso: %s\n" % (utils.float2str(articulo.pesobigbag, 1)),
("negrita",))
escribir(txtvw,
"Se extrajo muestra para laboratorio: %s\n" % (
articulo.muestra and "Sí" or "No"))
escribir(txtvw, articulo.claseb and "CLASE B\n" or "")
escribir(txtvw, "Observaciones: %s\n" % (articulo.motivo or "-"))
elif isinstance(articulo, (pclases.Rollo, pclases.RolloDefectuoso)):
escribir(txtvw, "Rollo número: %d\n" % articulo.numrollo)
escribir(txtvw, "Código de trazabilidad: %s\n" % articulo.codigo)
escribir(txtvw,
"Fecha y hora de fabricación: %s\n" %
articulo.fechahora.strftime('%d/%m/%Y %H:%M'))
escribir(txtvw,
"Marcado como defectuoso: %s\n" % (
(isinstance(articulo, pclases.RolloDefectuoso) and "Sí")
or (articulo.rollob and "Sí" or "No")
)
)
escribir(txtvw, "Observaciones: %s\n" % articulo.observaciones)
escribir(txtvw, "Se extrajo muestra para laboratorio: %s\n" % (
hasattr(articulo, "muestra")
and articulo.muestra and "Sí" or "No"))
escribir(txtvw,
"Peso: %s\n" % (utils.float2str(articulo.peso, 1)),
("negrita",))
escribir(txtvw,
"Densidad: %s\n" % (
utils.float2str(articulo.densidad, 1)))
elif isinstance(articulo, (pclases.BalaCable, pclases.RolloC)):
escribir(txtvw, "Código de trazabilidad: %s\n" % articulo.codigo)
escribir(txtvw, "Peso: %s\n" % (utils.float2str(articulo.peso, 1)),
("negrita",))
escribir(txtvw,
"Observaciones: %s\n" % (articulo.observaciones or "-"))
escribir(txtvw,
"Fecha y hora de embalado: %s\n" %
utils.str_fechahora(articulo.fechahora))
mostrar_parte = False # Más que nada porque específicamente
# no tienen.
pdps = buscar_partes_fibra_fecha_y_hora(articulo.fechahora)
opers = operarios_de_partes(pdps)
if opers:
escribir(txtvw,
"\nOperarios del turno en la línea de fibra:\n")
for oper in opers:
escribir(txtvw,
" %s, %s\n" % (oper.apellidos, oper.nombre))
else:
self.logger.error("trazabilidad_articulos.py::rellenar_produccion"
" -> Objeto ID %d no es de la clase bala, rollo"
" ni bigbag." % (articulo.id))
if mostrar_parte:
escribir(txtvw, "\n-----------------------------------\n",
("cursiva"))
escribir(txtvw, "Información del parte de producción\n",
("cursiva"))
escribir(txtvw, "-----------------------------------\n",
("cursiva"))
try:
pdp = articulo.articulos[0].parteDeProduccion
except IndexError, msg:
self.logger.error("ERROR trazabilidad_articulos.py "
"(rellenar_produccion): %s" % (msg))
pdp = None
except AttributeError:
pdp = articulo.parteDeProduccion
if pdp == None:
escribir(txtvw,
"¡No se econtró el parte de producción para la "
"fabricación del artículo!\n",
("rojo", ))
else:
escribir(txtvw,
"Fecha del parte: %s\n" % utils.str_fecha(pdp.fecha))
escribir(txtvw,
"Hora de inicio: %s\n"
% pdp.horainicio.strftime('%H:%M'))
escribir(txtvw,
"Hora de fin: %s\n" % pdp.horafin.strftime('%H:%M'))
escribir(txtvw,
"Parte verificado y bloqueado: %s\n" % (
pdp.bloqueado and "Sí" or "No"))
escribir(txtvw, "Empleados:\n")
for ht in pdp.horasTrabajadas:
escribir(txtvw,
"\t%s, %s (%s)\n" % (ht.empleado.apellidos,
ht.empleado.nombre,
ht.horas.strftime('%H:%M')))
def rellenar_producto(self, articulo):
"""
articulo es un pclases.Rollo, un pclases.Bala, un pclases.Bigbag o un
pclases.BalaCable o un pclases.Pale o un pclases.Caja.
"""
txtvw = self.wids['txt_producto']
borrar_texto(txtvw)
if isinstance(articulo, (pclases.Caja, pclases.Pale)):
producto = articulo.productoVenta
else:
try:
producto = articulo.articulos[0].productoVenta
except IndexError, msg:
self.logger.error("ERROR trazabilidad_articulos.py"
" (rellenar_albaran): %s" % (msg))
producto = None
if producto == None:
escribir(txtvw,
"¡NO SE ENCONTRÓ INFORMACIÓN!\nPosible inconsistencia "
"de la base de datos. Contacte con el administrador.")
self.logger.error("trazabilidad_articulos.py::rellenar_producto"
" -> Objeto %s no tiene producto asociado." % (
articulo))
else:
escribir(txtvw,
"\nCódigo de trazabilidad: %s\n" % articulo.codigo,
("negrita", ))
try:
codigobarras39 = code39.Extended39(articulo.codigo,
xdim = .070 * cm).guardar_a_png()
codigobarras39 = gtk.gdk.pixbuf_new_from_file(codigobarras39)
except Exception, e:
self.logger.error("trazabilidad_articulos::rellenar_producto"
" -> No se pudo guardar o mostrar el código"
" %s. Excepción: %s" % (articulo.codigo, e))
else:
insertar_imagen(txtvw, codigobarras39)
if isinstance(articulo, pclases.Pale):
escribir(txtvw, "\nPalé de %d cajas (salidas en rojo):\n\t"
% len(articulo.cajas))
cajas = articulo.cajas[:]
cajas.sort(lambda c1, c2: int(c1.numcaja) - int(c2.numcaja))
i = 0
for caja in cajas:
codcaja = caja.codigo
if caja.en_almacen():
escribir(txtvw, codcaja, ("negrita", "cursiva",))
else:
escribir(txtvw, codcaja, ("negrita","cursiva","rojo"))
i += 1
if i < len(cajas):
escribir(txtvw, ", ")
else:
escribir(txtvw, "\n\n")
elif isinstance(articulo, pclases.Caja):
dict_bolsas = articulo.get_bolsas()
codsbolsas = ", ".join([dict_bolsas[b]['código']
for b in dict_bolsas])
escribir(txtvw, "\nCaja de %d bolsas: %s\n\n"
% (articulo.numbolsas, codsbolsas),
("negrita", "cursiva"))
self.rellenar_info_producto_venta(producto, txtvw)
def rellenar_info_producto_venta(self, producto, txtvw):
"""
Agrega la información del producto al txtvw.
"""
escribir(txtvw, "\n\t")
insertar_imagen(txtvw, gtk.gdk.pixbuf_new_from_file(
EanBarCode().getImage(producto.codigo)))
escribir(txtvw, "\nProducto: ", ("negrita", "azul"))
escribir(txtvw, "%s\n" % (producto.nombre),
("negrita", "azul", "grande"))
escribir(txtvw, "Descripción: %s\n" % (producto.descripcion),
("negrita"))
escribir(txtvw, "Código: %s\n" % (producto.codigo))
escribir(txtvw, "Arancel: %s\n" % (producto.arancel))
if producto.camposEspecificosRollo != None:
escribir(txtvw, "Código de Composán: %s\n" %
producto.camposEspecificosRollo.codigoComposan)
escribir(txtvw, "gr/m²: %d\n" %
producto.camposEspecificosRollo.gramos)
escribir(txtvw, "Ancho: %s\n" % (utils.float2str(
producto.camposEspecificosRollo.ancho, 2)))
escribir(txtvw, "Diámetro: %d\n" %
producto.camposEspecificosRollo.diametro)
escribir(txtvw, "Metros lineales: %d\n" %
producto.camposEspecificosRollo.metrosLineales)
escribir(txtvw, "Rollos por camión: %d\n" %
producto.camposEspecificosRollo.rollosPorCamion)
escribir(txtvw, "Peso del embalaje: %s\n" % (
utils.float2str(producto.camposEspecificosRollo.pesoEmbalaje)))
if producto.camposEspecificosBala != None:
escribir(txtvw, "Material: %s\n" % (
producto.camposEspecificosBala.tipoMaterialBala and
producto.camposEspecificosBala.tipoMaterialBala.descripcion or
"-"))
escribir(txtvw, "DTEX: %s\n" % (utils.float2str(
producto.camposEspecificosBala.dtex)))
escribir(txtvw, "Corte: %d\n" % (
producto.camposEspecificosBala.corte))
escribir(txtvw, "Color: %s\n" % (
producto.camposEspecificosBala.color))
escribir(txtvw, "Antiuv: %s\n" % (
producto.camposEspecificosBala.antiuv and "SÍ" or "NO"))
if producto.camposEspecificos != []:
escribir(txtvw, "Campos definidos por el usuario:\n")
for cee in producto.camposEspecificos:
escribir(txtvw, "\t%s: %s\n" % (cee.nombre, cee.valor))
if producto.camposEspecificosEspecial != None:
escribir(txtvw, "Stock: %s\n" % (
utils.float2str(producto.camposEspecificosEspecial.stock)))
escribir(txtvw, "Existencias: %s\n" % (utils.float2str(
producto.camposEspecificosEspecial.existencias, 0)))
escribir(txtvw, "Unidad: %s\n" % (
producto.camposEspecificosEspecial.unidad))
escribir(txtvw, "Observaciones: %s\n" % (
producto.camposEspecificosEspecial.observaciones))
def borrar_texto(txt):
txt.get_buffer().set_text('')
def insertar_imagen(txt, imagen):
"""
Inserta una imagen en el TextView txt en la
posición actual del texto.
"""
buffer = txt.get_buffer()
mark = buffer.get_insert()
iter = buffer.get_iter_at_mark(mark)
buffer.insert_pixbuf(iter, imagen)
buffer.insert_at_cursor("\n")
def buscar_partes_fibra_fecha_y_hora(fechahora):
"""
Busca los partes de fibra que hubiera en la fecha y hora recibida.
Devuelve una lista con todos ellos.
"""
pdps = pclases.ParteDeProduccion.select(pclases.AND(
pclases.ParteDeProduccion.q.fechahorainicio <= fechahora,
pclases.ParteDeProduccion.q.fechahorafin >= fechahora))
res = []
for pdp in pdps:
if pdp.es_de_fibra():
res.append(pdp)
return res
def operarios_de_partes(partes):
"""
Recibe una lista de partes de producción y devuelve
otra lista con los operarios de los mismos.
"""
opers = []
for pdp in partes:
for ht in pdp.horasTrabajadas:
if ht.empleado not in opers:
opers.append(ht.empleado)
return opers
def escribir(txt, texto, estilos = ()):
"""
Escribe "texto" en el buffer del TextView "txt".
"""
buffer = txt.get_buffer()
if estilos == ():
buffer.insert_at_cursor(texto)
else:
import pango
iter_insert = buffer.get_iter_at_mark(buffer.get_insert())
tag = buffer.create_tag()
if "negrita" in estilos:
tag.set_property("weight", pango.WEIGHT_BOLD)
#tag.set_property("stretch", pango.STRETCH_ULTRA_EXPANDED)
if "cursiva" in estilos:
tag.set_property("style", pango.STYLE_ITALIC)
if "rojo" in estilos:
tag.set_property("foreground", "red")
if "azul" in estilos:
tag.set_property("foreground", "blue")
if "_rojoclaro" in estilos:
tag.set_property("background", "pale violet red")
if "_verdeclaro" in estilos:
tag.set_property("background", "pale green")
if "grande" in estilos:
tag.set_property("size_points", 14)
buffer.insert_with_tags(iter_insert, texto, tag)
if __name__ == '__main__':
t = TrazabilidadArticulos(usuario = pclases.Usuario.get(1))
| pacoqueen/bbinn | formularios/trazabilidad_articulos.py | Python | gpl-2.0 | 71,269 |
#
# Copyright (C) 2015 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
from nav.smidumps import get_mib
from nav.mibs.mibretriever import MibRetriever
class HPHTTPManageableMib(MibRetriever):
"""HP-httpManageable-MIB (SEMI-MIB) MibRetriever"""
mib = get_mib('SEMI-MIB')
@defer.inlineCallbacks
def get_serial_number(self):
"""Tries to get a chassis serial number from old HP switches"""
serial = yield self.get_next('hpHttpMgSerialNumber')
if serial:
if isinstance(serial, bytes):
serial = serial.decode("utf-8")
defer.returnValue(serial)
| UNINETT/nav | python/nav/mibs/hp_httpmanageable_mib.py | Python | gpl-2.0 | 1,236 |
"""
Django settings for central_service project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$^6$7ybljkbz@b#7j&4cz_46dhe$=uiqnxuz+h3yoyj6u$$fk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'services',
'service_pages',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'central_service.urls'
WSGI_APPLICATION = 'central_service.wsgi.application'
# Templates
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Used in production to define where collectstatic stores stuff
STATIC_ROOT = os.path.join(PROJECT_PATH, '../static')
ADMIN_MEDIA_PREFIX = '/static/admin/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# )
# Used in development to force django to serve static files
STATICFILES_DIRS = [
os.path.join(PROJECT_PATH, "static"),
]
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
]
}
| Antikythera/hoot | Application/central_service/settings.py | Python | gpl-2.0 | 3,154 |
import chanutils.torrent
from chanutils import get_json, movie_title_year
from playitem import TorrentPlayItem, PlayItemList
_SEARCH_URL = 'https://yts.mx/api/v2/list_movies.json'
_FEEDLIST = [
{'title':'Latest', 'url':'https://yts.mx/api/v2/list_movies.json?limit=50'},
{'title':'Highest Rated', 'url':'https://yts.mx/api/v2/list_movies.json?sort_by=rating&limit=50'},
{'title':'Action', 'url':'https://yts.mx/api/v2/list_movies.json?genre=action&sort_by=rating&limit=50'},
{'title':'Adventure', 'url':'https://yts.mx/api/v2/list_movies.json?genre=adventure&sort_by=rating&limit=50'},
{'title':'Animation', 'url':'https://yts.mx/api/v2/list_movies.json?genre=animation&sort_by=rating&limit=50'},
{'title':'Biography', 'url':'https://yts.mx/api/v2/list_movies.json?genre=biography&sort_by=rating&limit=50'},
{'title':'Comedy', 'url':'https://yts.mx/api/v2/list_movies.json?genre=comedy&sort_by=rating&limit=50'},
{'title':'Crime', 'url':'https://yts.mx/api/v2/list_movies.json?genre=crime&sort_by=rating&limit=50'},
{'title':'Documentary', 'url':'https://yts.mx/api/v2/list_movies.json?genre=documentary&sort_by=rating&limit=50'},
{'title':'Drama', 'url':'https://yts.mx/api/v2/list_movies.json?genre=drama&sort_by=rating&limit=50'},
{'title':'Family', 'url':'https://yts.mx/api/v2/list_movies.json?genre=family&sort_by=rating&limit=50'},
{'title':'Fantasy', 'url':'https://yts.mx/api/v2/list_movies.json?genre=fantasy&sort_by=rating&limit=50'},
{'title':'Film-Noir', 'url':'https://yts.mx/api/v2/list_movies.json?genre=film-noir&sort_by=rating&limit=50'},
{'title':'History', 'url':'https://yts.mx/api/v2/list_movies.json?genre=history&sort_by=rating&limit=50'},
{'title':'Horror', 'url':'https://yts.mx/api/v2/list_movies.json?genre=horror&sort_by=rating&limit=50'},
{'title':'Music', 'url':'https://yts.mx/api/v2/list_movies.json?genre=music&sort_by=rating&limit=50'},
{'title':'Musical', 'url':'https://yts.mx/api/v2/list_movies.json?genre=musical&sort_by=rating&limit=50'},
{'title':'Mystery', 'url':'https://yts.mx/api/v2/list_movies.json?genre=mystery&sort_by=rating&limit=50'},
{'title':'Romance', 'url':'https://yts.mx/api/v2/list_movies.json?genre=romance&sort_by=rating&limit=50'},
{'title':'Sci-Fi', 'url':'https://yts.mx/api/v2/list_movies.json?genre=sci-fi&sort_by=rating&limit=50'},
{'title':'Sport', 'url':'https://yts.mx/api/v2/list_movies.json?genre=sport&sort_by=rating&limit=50'},
{'title':'Thriller', 'url':'https://yts.mx/api/v2/list_movies.json?genre=thriller&sort_by=rating&limit=50'},
{'title':'War', 'url':'https://yts.mx/api/v2/list_movies.json?genre=war&sort_by=rating&limit=50'},
{'title':'Western', 'url':'https://yts.mx/api/v2/list_movies.json?genre=western&sort_by=rating&limit=50'},
]
def name():
return 'YTS Torrents'
def image():
return 'icon.png'
def description():
return "YTS Torrents Channel (<a target='_blank' href='https://yts.mx'>https://yts.mx</a>)."
def feedlist():
return _FEEDLIST
def feed(idx):
data = get_json(_FEEDLIST[idx]['url'], proxy=True)
return _extract(data)
def search(q):
params = {'query_term':q, 'limit':50}
data = get_json(_SEARCH_URL, params=params, proxy=True)
return _extract(data)
def _extract(data):
results = PlayItemList()
if 'data' not in data:
return results
if 'movies' not in data['data']:
return results
rtree = data['data']['movies']
for r in rtree:
if 'torrents' not in r:
continue
torrent = _smallest_size(r['torrents'])
title = r['title_long']
img = r['medium_cover_image']
# Proxy
img = "https://img.yts.mx" + img[14:]
url = torrent['url']
size = torrent['size']
seeds = torrent['seeds']
peers = torrent['peers']
subtitle = chanutils.torrent.subtitle(size, seeds, peers)
rating = str(r['rating'])
if rating[-1] == '0':
rating = rating[:-1]
imdb = "<a target='_blank' href='http://www.imdb.com/title/" + r['imdb_code'] + "/'>IMDB Rating: " + rating + "</a>"
synopsis = imdb
subs = movie_title_year(title)
subs['imdb'] = r['imdb_code']
results.add(TorrentPlayItem(title, img, url, subtitle, synopsis, subs))
return results
def _smallest_size(torrlist):
size = torrlist[0]['size_bytes']
torrent = torrlist[0]
for t in torrlist:
if t['size_bytes'] < size:
size = t['size_bytes']
torrent = t
return torrent
| blissland/blissflixx | chls/bfch_yts_torrents/__init__.py | Python | gpl-2.0 | 4,386 |
#!/usr/bin/env python
from phamerator import *
from phamerator.phamerator_manage_db import *
from phamerator.db_conf import db_conf
import sys, getpass
GeneID = sys.argv[1]
password = getpass.getpass()
db = raw_input('database: ')
c = db_conf(username='root', password=password, server='134.126.132.72', db=db).get_cursor()
print get_relatives(c, GeneID, alignmentType='both', clustalwThreshold=0.275, blastThreshold=0.0001)
| byuphamerator/phamerator-dev | phamerator/plugins/get_relatives.py | Python | gpl-2.0 | 429 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 James Clark <james.clark@ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
nrburst_pickle_preserve.py
Crunch together pickles from nrburst_match.py
"""
import sys
import glob
import cPickle as pickle
import numpy as np
pickle_files = glob.glob(sys.argv[1]+'*pickle')
user_tag = sys.argv[2]
delta_samp=100
sample_pairs=zip(range(0,1000,delta_samp), range(delta_samp-1,1000,delta_samp))
# Get numbers for pre-allocation
sim_instances = [name.split('-')[1] for name in pickle_files]
sim_names = np.unique(sim_instances)
# XXX: assume all sample runs have same number of jobs..
n_sample_runs = sim_instances.count(sim_names[0])
# Load first one to extract data for preallocation
current_matches, current_masses, current_inclinations, config, \
simulations = pickle.load(open(pickle_files[0],'r'))
nSims = len(sim_names)
nsampls = config.nsampls * n_sample_runs
# --- Preallocate
matches = np.zeros(shape=(nSims, nsampls))
masses = np.zeros(shape=(nSims, nsampls))
inclinations = np.zeros(shape=(nSims, nsampls))
# be a bit careful with the simulations object
setattr(simulations, 'simulations', [])
setattr(simulations, 'nsimulations', nSims)
for f, name in enumerate(sim_names):
startidx=0
endidx=len(current_matches[0])
for s in xrange(n_sample_runs):
if n_sample_runs>1:
file = glob.glob('*%s-minsamp_%d-maxsamp_%d*'%(
name, min(sample_pairs[s]), max(sample_pairs[s])))[0]
else:
file = pickle_files[f]
current_matches, current_masses, current_inclinations, config, \
current_simulations = pickle.load(open(file,'r'))
matches[f,startidx:endidx] = current_matches[0]
masses[f,startidx:endidx] = current_masses[0]
inclinations[f,startidx:endidx] = current_inclinations[0]
startidx += len(current_matches[0])
endidx = startidx + len(current_matches[0])
simulations.simulations.append(current_simulations.simulations[0])
filename=user_tag+'_'+config.algorithm+'.pickle'
pickle.dump([matches, masses, inclinations, config, simulations],
open(filename, "wb"))
| astroclark/numrel_bursts | nrburst_utils/nrburst_pickle_preserve.py | Python | gpl-2.0 | 2,870 |
# elastos_cast_checker.py
# encoding: UTF-8
# usages:
# sudo chmod a+x elastos_cast_checker.py
# python elastos_cast_checker.py
import re
import os
import sys
def read_file(path):
lines = []
if (path.endswith('.cpp') or path.endswith('.h')):
if(os.path.isfile(path)):
handle = open(path, 'r')
for line in handle:
lines.append(line.strip())
handle.close()
return lines
def find_declare_match(param, line):
pattern = re.compile(r'AutoPtr\s*<(.*)>\s*(.*)'+'[, ]'+param+'[; ,]')
return pattern.search(line)
def check_declare_match(usedType, param, declLine):
pattern = re.compile(r'AutoPtr\s*<\s*'+usedType+'\s*>\s*(.*)'+'[, ]'+param+'[; ,]')
return pattern.search(declLine)
def find_declare_line(param, lines, lineIndex):
if len(lines) == 0:
return -1
for i in range(lineIndex, 0, -1):
line = lines[i]
if (len(line) > 1) and (line.startswith("//") == False):
match = find_declare_match(param, line)
if match:
#print line, 'match', match.group()
return i;
return -1
def check_match(firstLog, logFile, cppFilepath, usedMatch, usedLineNum, declLine, declLineNum, isHeader = True):
usedType = usedMatch.group(2)
param = usedMatch.group(4)
matchInfo = usedMatch.group()
match = check_declare_match(usedType, param, declLine)
if match == None:
if firstLog:
firstLog = False
logInfo ='\n>> process file: ' + cppFilepath + '\n'
logFile.write(logInfo)
print logInfo
fileInfo = ''
if isHeader:
fileInfo = 'in .h file'
logInfo = " > error: invalid using of {0} at line {1:d}, it is declared as {2} '{3}' at line {4:d}.\n" \
.format(matchInfo, usedLineNum + 1, declLine, fileInfo, declLineNum + 1)
logFile.write(logInfo)
print logInfo
else:
#print 'match ', matchInfo, declLine
return firstLog
def process_declare_line_in_header(logFile, firstLog, cppFilepath, match, lines, lineNum, headerFilepath):
headerLines = read_file(headerFilepath)
param = match.group(4)
matchInfo = match.group()
declLineNum = find_declare_line(param, headerLines, len(headerLines)-1)
if (declLineNum != -1):
declLine = headerLines[declLineNum]
#print 'declLine', declLine
firstLog = check_match(firstLog, logFile, cppFilepath, match, lineNum, declLine, declLineNum)
else:
logInfo = ''
if firstLog:
firstLog = False
logInfo ='\n>> process file: ' + cppFilepath + '\n'
logFile.write(logInfo)
print logInfo
if param.startswith('m'):
logInfo = " = warning: declaration for {0} at line {1:d} not found! is it declared in super class's .h file?\n".format(matchInfo, lineNum + 1)
else:
logInfo = " = warning: declaration for {0} at line {1:d} not found!\n".format(matchInfo, lineNum + 1)
logFile.write(logInfo)
print logInfo
return firstLog
def process_file(path, logFile):
if path.endswith('.cpp') == False:
return
firstLog = True;
lines = read_file(path)
lineNum = 0
for eachLine in lines:
if (len(eachLine) > 1) and (eachLine.startswith("//") == False):
pattern = re.compile(r'(\()(I\w*)(\*\*\)&)([a-zA-Z]\w*)(\))')
match = pattern.search(eachLine)
if match:
#print match.group() match.groups()
#print match.group(2), match.group(4)
usedType = match.group(2)
param = match.group(4)
# do not check weak-reference Resolve
if usedType == 'IInterface' and eachLine.find('->Resolve(') != -1:
pass
else:
declLineNum = find_declare_line(param, lines, lineNum)
if (declLineNum != -1):
declLine = lines[declLineNum]
#print 'declLine', declLine
firstLog = check_match(firstLog, logFile, path, match, lineNum, declLine, declLineNum, False)
else:
headerFilepath = path.replace("/src/", "/inc/").replace(".cpp", ".h")
firstLog = process_declare_line_in_header(logFile, firstLog, path, match, lines, lineNum, headerFilepath)
lineNum = lineNum +1
def process_dir(path, logFile):
listfile = os.listdir(path)
for filename in listfile:
filepath = path + '/' + filename
if(os.path.isdir(filepath)):
# exclude hidden dirs
if(filename[0] == '.'):
pass
else:
process_dir(filepath, logFile)
elif(os.path.isfile(filepath)):
process_file(filepath, logFile)
def summarize_log(logPath):
if(os.path.isfile(logPath)):
errorCount = 0
warningCount = 0
# summarize
logFile = open(logPath, 'r')
for line in logFile:
line = line.strip()
if line.startswith('> error:') == True:
errorCount = errorCount + 1
elif line.startswith('= warning:') == True:
warningCount = warningCount + 1
logFile.close()
# log
logFile = open(logPath, 'a')
logInfo = '\ntotal: {0:d} errors, {1:d} warnings.'.format(errorCount, warningCount)
logFile.write(logInfo)
print logInfo
logFile.close()
def process(path, logPath):
if(os.path.isfile(logPath)):
os.remove(logPath)
logFile = open(logPath, 'a')
print 'output to', logPath
if(os.path.isdir(path)):
process_dir(path, logFile)
elif(os.path.isfile(path)):
process_file(path, logFile)
else:
print 'invalid path:', path
logFile.close()
summarize_log(logPath)
#process('/home/kesalin/test/python/test.cpp', 'elastos_cast_checker.log')
#total: 2 errors, 10 warnings.
#process('/home/kesalin/Elastos5/Sources/Elastos/LibCore/src', '/home/kesalin/elastos_cast_checker.log')
#process('/home/kesalin/Elastos5/Sources/Elastos/Frameworks/Droid/Base/Core/src/', '/home/kesalin/elastos_cast_checker.log')
#total: 7 errors, 0 warnings.
process('/home/kesalin/Elastos5/Sources/Elastos/Frameworks/Droid/Base/Services/Server/src', '/home/kesalin/elastos_cast_checker.log')
| kesalin/PythonSnippet | elastos_cast_checker.py | Python | gpl-2.0 | 6,505 |
print "You enter a dark room with three doors. Do you go through door #1, door #2 or door #3?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
elif door == "3":
print " You are on the field with unicorns and faries"
print "1. I am happy dancing with faries"
print "2. How awfull! Where is my bear?"
what_happens = raw_input("> ")
if what_happens == "1":
print " Wow! You are discostar!"
elif what_happens == "2":
print " No bear, only unicorns. Suffer"
else:
print "You are bore."
else:
print "You stumble around and fall on a knife and die. Good job!" | tridvaodin/Assignments-Valya-Maskaliova | LPTHW/ex31.py | Python | gpl-2.0 | 1,470 |
import minecraft as minecraft
import random
import time
x = 128
y = 2
z = 128
mc = minecraft.Minecraft.create()
while y < 63:
j = mc.getBlock(x,y,z)
if j == 0:
mc.setBlock(x,y,z,8)
z = z - 1
if z <= -128:
z = 128
x = x - 1
if x<= -128:
x = 128
y = y + 1
| mohsraspi/mhscs14 | jay/wowobsidian.py | Python | gpl-2.0 | 437 |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in"
" the directory containing %r. It appears you've customized "
"things.\nYou'll have to run django-admin.py, passing it your"
" settings module.\n(If the file settings.py does indeed exist,"
" it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| liveaverage/baruwa | src/baruwa/manage.py | Python | gpl-2.0 | 575 |
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
"""
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
node2cssize_dict[n] = cc_size
return node2cssize_dict
def get_glianess_dict(seg_objs, thresh, glia_key, nb_cpus=1,
use_sv_volume=False, verbose=False):
glianess = {}
sizes = {}
params = [[so, glia_key, thresh, use_sv_volume] for so in seg_objs]
res = start_multiprocess(glia_loader_helper, params, nb_cpus=nb_cpus,
verbose=verbose, show_progress=verbose)
for ii, el in enumerate(res):
so = seg_objs[ii]
glianess[so] = el[0]
sizes[so] = el[1]
return glianess, sizes
def glia_loader_helper(args):
so, glia_key, thresh, use_sv_volume = args
if glia_key not in so.attr_dict.keys():
so.load_attr_dict()
curr_glianess = so.glia_pred(thresh)
if not use_sv_volume:
curr_size = so.mesh_bb
else:
curr_size = so.size
return curr_glianess, curr_size
def remove_glia_nodes(g, size_dict, glia_dict, return_removed_nodes=False):
"""
Calculate distance weights for shortest path analysis or similar, based on
glia and size vertex properties and removes unsupporting glia nodes.
Args:
g: Graph
size_dict:
glia_dict:
return_removed_nodes: bool
Returns: list of list of nodes
Remaining connected components of type neuron
"""
# set up node weights based on glia prediction and size
# weights = {}
# e_weights = {}
# for n in g.nodes():
# weights[n] = np.linalg.norm(size_dict[n][1]-size_dict[n][0], ord=2)\
# * glia_dict[n]
# # set up edge weights based on sum of node weights
# for e in g.edges():
# e_weights[e] = weights[list(e)[0]] + weights[list(e)[1]]
# nx.set_node_attributes(g, weights, 'weight')
# nx.set_edge_attributes(g, e_weights, 'weights')
# get neuron type connected component sizes
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0:
g_neuron.remove_node(n)
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
if np.all(np.array(list(neuron2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant neuron SV
if return_removed_nodes:
return [], [list(g.nodes())]
return []
# get glia type connected component sizes
g_glia = g.copy()
for n in g.nodes():
if glia_dict[n] == 0:
g_glia.remove_node(n)
glia2ccsize_dict = create_ccsize_dict(g_glia, size_dict)
if np.all(np.array(list(glia2ccsize_dict.values())) <=
global_params.config['min_cc_size_ssv']):
# no significant glia SV
if return_removed_nodes:
return [list(g.nodes())], []
return [list(g.nodes())]
tiny_glia_fragments = []
for n in g_glia.nodes():
if glia2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
tiny_glia_fragments += [n]
# create new neuron graph without sufficiently big glia connected components
g_neuron = g.copy()
for n in g.nodes():
if glia_dict[n] != 0 and n not in tiny_glia_fragments:
g_neuron.remove_node(n)
# find orphaned neuron SV's and add them to glia graph
neuron2ccsize_dict = create_ccsize_dict(g_neuron, size_dict)
g_tmp = g_neuron.copy()
for n in g_tmp.nodes():
if neuron2ccsize_dict[n] < global_params.config['min_cc_size_ssv']:
g_neuron.remove_node(n)
# create new glia graph with remaining nodes
# (as the complementary set of sufficiently big neuron connected components)
g_glia = g.copy()
for n in g_neuron.nodes():
g_glia.remove_node(n)
neuron_ccs = list(nx.connected_components(g_neuron))
if return_removed_nodes:
glia_ccs = list(nx.connected_components(g_glia))
assert len(g_glia) + len(g_neuron) == len(g)
return neuron_ccs, glia_ccs
return neuron_ccs
def glia_path_length(glia_path, glia_dict, write_paths=None):
"""
Get the path length of glia SV within glia_path. Assumes single connected
glia component within this path. Uses the mesh property of each
SegmentationObject to build a graph from all vertices to find shortest path
through (or more precise: along the surface of) glia. Edges between non-glia
vertices have negligible distance (0.0001) to ensure shortest path
along non-glia surfaces.
Args:
glia_path: list of SegmentationObjects
glia_dict: dict
Dictionary which keys the SegmentationObjects in glia_path and returns
their glia prediction
write_paths: bool
Returns: float
Shortest path between neuron type nodes in nm
"""
g = nx.Graph()
col = {}
curr_ind = 0
if write_paths is not None:
all_vert = np.zeros((0, 3))
for so in glia_path:
is_glia_sv = int(glia_dict[so] > 0)
ind, vert = so.mesh
# connect meshes of different SV, starts after first SV
if curr_ind > 0:
# build kd tree from vertices of SV before
kd_tree = spatial.cKDTree(vert_resh)
# get indices of vertives of SV before (= indices of graph nodes)
ind_offset_before = curr_ind - len(vert_resh)
# query vertices of current mesh to find close connects
next_vert_resh = vert.reshape((-1, 3))
dists, ixs = kd_tree.query(next_vert_resh, distance_upper_bound=500)
for kk, ix in enumerate(ixs):
if dists[kk] > 500:
continue
if is_glia_sv:
edge_weight = eucl_dist(next_vert_resh[kk], vert_resh[ix])
else:
edge_weight = 0.0001
g.add_edge(curr_ind + kk, ind_offset_before + ix,
weights=edge_weight)
vert_resh = vert.reshape((-1, 3))
# save all vertices for writing shortest path skeleton
if write_paths is not None:
all_vert = np.concatenate([all_vert, vert_resh])
# connect fragments of SV mesh
kd_tree = spatial.cKDTree(vert_resh)
dists, ixs = kd_tree.query(vert_resh, k=20, distance_upper_bound=500)
for kk in range(len(ixs)):
nn_ixs = ixs[kk]
nn_dists = dists[kk]
col[curr_ind + kk] = glia_dict[so]
for curr_ix, curr_dist in zip(nn_ixs, nn_dists):
col[curr_ind + curr_ix] = glia_dict[so]
if is_glia_sv:
dist = curr_dist
else: # only take path through glia into account
dist = 0
g.add_edge(kk + curr_ind, curr_ix + curr_ind, weights=dist)
curr_ind += len(vert_resh)
start_ix = 0 # choose any index of the first mesh
end_ix = curr_ind - 1 # choose any index of the last mesh
shortest_path_length = nx.dijkstra_path_length(g, start_ix, end_ix, weight="weights")
if write_paths is not None:
shortest_path = nx.dijkstra_path(g, start_ix, end_ix, weight="weights")
anno = coordpath2anno([all_vert[ix] for ix in shortest_path])
anno.setComment("{0:.4}".format(shortest_path_length))
skel = Skeleton()
skel.add_annotation(anno)
skel.to_kzip("{{}/{0:.4}_vertpath.k.zip".format(write_paths, shortest_path_length))
return shortest_path_length
def eucl_dist(a, b):
return np.linalg.norm(a - b)
def get_glia_paths(g, glia_dict, node2ccsize_dict, min_cc_size_neuron,
node2ccsize_dict_glia, min_cc_size_glia):
"""
Currently not in use, Refactoring needed
Find paths between neuron type SV grpah nodes which contain glia nodes.
Args:
g: nx.Graph
glia_dict:
node2ccsize_dict:
min_cc_size_neuron:
node2ccsize_dict_glia:
min_cc_size_glia:
Returns:
"""
end_nodes = []
paths = nx.all_pairs_dijkstra_path(g, weight="weights")
for n, d in g.degree().items():
if d == 1 and glia_dict[n] == 0 and node2ccsize_dict[n] > min_cc_size_neuron:
end_nodes.append(n)
# find all nodes along these ways and store them as mandatory nodes
glia_paths = []
glia_svixs_in_paths = []
for a, b in itertools.combinations(end_nodes, 2):
glia_nodes = [n for n in paths[a][b] if glia_dict[n] != 0]
if len(glia_nodes) == 0:
continue
sv_ccsizes = [node2ccsize_dict_glia[n] for n in glia_nodes]
if np.max(sv_ccsizes) <= min_cc_size_glia: # check minimum glia size
continue
sv_ixs = np.array([n.id for n in glia_nodes])
glia_nodes_already_exist = False
for el_ixs in glia_svixs_in_paths:
if np.all(sv_ixs == el_ixs):
glia_nodes_already_exist = True
break
if glia_nodes_already_exist: # check if same glia path exists already
continue
glia_paths.append(paths[a][b])
glia_svixs_in_paths.append(np.array([so.id for so in glia_nodes]))
return glia_paths
def write_sopath2skeleton(so_path, dest_path, scaling=None, comment=None):
"""
Writes very simple skeleton, each node represents the center of mass of a
SV, and edges are created in list order.
Args:
so_path: list of SegmentationObject
dest_path: str
scaling: np.ndarray or tuple
comment: str
Returns:
"""
if scaling is None:
scaling = np.array(global_params.config['scaling'])
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for so in so_path:
vert = so.mesh[1].reshape((-1, 3))
com = np.mean(vert, axis=0)
kd_tree = spatial.cKDTree(vert)
dist, nn_ix = kd_tree.query([com])
nn = vert[nn_ix[0]] / scaling
n = SkeletonNode().from_scratch(anno, nn[0], nn[1], nn[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
if comment is not None:
anno.setComment(comment)
skel.add_annotation(anno)
skel.to_kzip(dest_path)
def coordpath2anno(coords: np.ndarray, scaling: Optional[np.ndarray] = None) -> SkeletonAnnotation:
"""
Creates skeleton from scaled coordinates, assume coords are in order for
edge creation.
Args:
coords: np.array
scaling: np.ndarray
Returns: SkeletonAnnotation
"""
if scaling is None:
scaling = global_params.config['scaling']
anno = SkeletonAnnotation()
anno.scaling = scaling
rep_nodes = []
for c in coords:
n = SkeletonNode().from_scratch(anno, c[0] / scaling[0], c[1] / scaling[1],
c[2] / scaling[2])
anno.addNode(n)
rep_nodes.append(n)
for i in range(1, len(rep_nodes)):
anno.addEdge(rep_nodes[i - 1], rep_nodes[i])
return anno
def create_graph_from_coords(coords: np.ndarray, max_dist: float = 6000, force_single_cc: bool = True,
mst: bool = False) -> nx.Graph:
"""
Generate skeleton from sample locations by adding edges between points with a maximum distance and then pruning
the skeleton using MST. Nodes will have a 'position' attribute.
Args:
coords: Coordinates.
max_dist: Add edges between two nodes that are within this distance.
force_single_cc: Force that the tree generated from coords is a single connected component.
mst: Compute the minimum spanning tree.
Returns:
Networkx graph. Edge between nodes (coord indices) using the ordering of coords, i.e. the
edge (1, 2) connects coordinate coord[1] and coord[2].
"""
g = nx.Graph()
if len(coords) == 1:
g.add_node(0)
g.add_weighted_edges_from([[0, 0, 0]])
return g
kd_t = spatial.cKDTree(coords)
pairs = kd_t.query_pairs(r=max_dist, output_type="ndarray")
g.add_nodes_from([(ix, dict(position=coord)) for ix, coord in enumerate(coords)])
weights = np.linalg.norm(coords[pairs[:, 0]] - coords[pairs[:, 1]], axis=1)
g.add_weighted_edges_from([[pairs[i][0], pairs[i][1], weights[i]] for i in range(len(pairs))])
if force_single_cc: # make sure its a connected component
g = stitch_skel_nx(g)
if mst:
g = nx.minimum_spanning_tree(g)
return g
def draw_glia_graph(G, dest_path, min_sv_size=0, ext_glia=None, iterations=150, seed=0,
glia_key="glia_probas", node_size_cap=np.inf, mcmp=None, pos=None):
"""
Draw graph with nodes colored in red (glia) and blue) depending on their
class. Writes drawing to dest_path.
Args:
G: nx.Graph
dest_path: str
min_sv_size: int
ext_glia: dict
keys: node in G, values: number indicating class
iterations:
seed: int
Default: 0; random seed for layout generation
glia_key: str
node_size_cap: int
mcmp: color palette
pos:
Returns:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if mcmp is None:
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
np.random.seed(0)
seg_objs = list(G.nodes())
glianess, size = get_glianess_dict(seg_objs, glia_thresh, glia_key, 5,
use_sv_volume=True)
if ext_glia is not None:
for n in G.nodes():
glianess[n] = ext_glia[n.id]
plt.figure()
n_size = np.array([size[n] ** (1. / 3) for n in G.nodes()]).astype(
np.float32) # reduce cubic relation to a linear one
# n_size = np.array([np.linalg.norm(size[n][1]-size[n][0]) for n in G.nodes()])
if node_size_cap == "max":
node_size_cap = np.max(n_size)
n_size[n_size > node_size_cap] = node_size_cap
col = np.array([glianess[n] for n in G.nodes()])
col = col[n_size >= min_sv_size]
nodelist = list(np.array(list(G.nodes()))[n_size > min_sv_size])
n_size = n_size[n_size >= min_sv_size]
n_size = n_size / np.max(n_size) * 25.
if pos is None:
pos = nx.spring_layout(G, weight="weight", iterations=iterations, random_state=seed)
nx.draw(G, nodelist=nodelist, node_color=col, node_size=n_size,
cmap=mcmp, width=0.15, pos=pos, linewidths=0)
plt.savefig(dest_path)
plt.close()
return pos
def nxGraph2kzip(g, coords, kzip_path):
import tqdm
scaling = global_params.config['scaling']
coords = coords / scaling
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(g.edges()), leave=False)
for v in g.nodes():
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in g.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def svgraph2kzip(ssv: 'SuperSegmentationObject', kzip_path: str):
"""
Writes the SV graph stored in `ssv.edgelist_path` to a kzip file.
The representative coordinate of a SV is used as the corresponding node
location.
Args:
ssv: Cell reconstruction object.
kzip_path: Path to the output kzip file.
"""
sv_graph = nx.read_edgelist(ssv.edgelist_path, nodetype=int)
coords = {ix: ssv.get_seg_obj('sv', ix).rep_coord for ix in sv_graph.nodes}
import tqdm
skel = Skeleton()
anno = SkeletonAnnotation()
anno.scaling = ssv.scaling
node_mapping = {}
pbar = tqdm.tqdm(total=len(coords) + len(sv_graph.edges()), leave=False)
for v in sv_graph.nodes:
c = coords[v]
n = SkeletonNode().from_scratch(anno, c[0], c[1], c[2])
n.setComment(f'{v}')
node_mapping[v] = n
anno.addNode(n)
pbar.update(1)
for e in sv_graph.edges():
anno.addEdge(node_mapping[e[0]], node_mapping[e[1]])
pbar.update(1)
skel.add_annotation(anno)
skel.to_kzip(kzip_path)
pbar.close()
def stitch_skel_nx(skel_nx: nx.Graph, n_jobs: int = 1) -> nx.Graph:
"""
Stitch connected components within a graph by recursively adding edges between the closest components.
Args:
skel_nx: Networkx graph. Nodes require 'position' attribute.
n_jobs: Number of jobs used for query of cKDTree.
Returns:
Single connected component graph.
"""
if skel_nx.number_of_nodes() == 0:
return skel_nx
no_of_seg = nx.number_connected_components(skel_nx)
if no_of_seg == 1:
return skel_nx
skel_nx_nodes = np.array([skel_nx.nodes[ix]['position'] for ix in skel_nx.nodes()], dtype=np.int64)
while no_of_seg != 1:
rest_nodes = []
rest_nodes_ixs = []
list_of_comp = np.array([c for c in sorted(nx.connected_components(skel_nx), key=len, reverse=True)])
for single_rest_graph in list_of_comp[1:]:
rest_nodes += [skel_nx_nodes[int(ix)] for ix in single_rest_graph]
rest_nodes_ixs += list(single_rest_graph)
current_set_of_nodes = [skel_nx_nodes[int(ix)] for ix in list_of_comp[0]]
current_set_of_nodes_ixs = list(list_of_comp[0])
tree = spatial.cKDTree(rest_nodes, 1)
thread_lengths, indices = tree.query(current_set_of_nodes, n_jobs=n_jobs)
start_thread_index = np.argmin(thread_lengths)
stop_thread_index = indices[start_thread_index]
e1 = current_set_of_nodes_ixs[start_thread_index]
e2 = rest_nodes_ixs[stop_thread_index]
skel_nx.add_edge(e1, e2)
no_of_seg -= 1
return skel_nx
| StructuralNeurobiologyLab/SyConn | syconn/proc/graphs.py | Python | gpl-2.0 | 26,039 |
import xmltodict
def parseData(data):
try:
return xmltodict.parse(data)
except:
if len(data.split()) is 0:
return None
else:
raise Exception('Invalid XML data', data)
| ColinKeigher/McAfeeWebGateway | mwg/parse.py | Python | gpl-2.0 | 236 |
"""
LAMP
"""
import numpy as np
tol = 1.e-6 # zero tolerance
def pdist(x):
"""
Pairwise distance between pairs of objects
TODO: find a fast function
"""
n, d = x.shape
dist = np.zeros((n, n))
for i in range(n):
for j in range(n):
dist[i][j] = np.linalg.norm(x[i] - x[j])
return dist
def project(x, xs, ys):
"""
Projection
"""
assert (type(x) is np.ndarray) and (type(xs) is np.ndarray) and (type(ys) is np.ndarray), \
"*** ERROR (Force-Scheme): project input must be numpy.array type."
ninst, dim = x.shape # number os instances, data dimension
k, a = xs.shape # number os sample instances
p = ys.shape[1] # visual space dimension
assert dim == a, "*** LAMP Error: x and xs dimensions must be egual."
Y = np.zeros((ninst, p))
for pt in range(ninst):
# computes alphas
alpha = np.zeros(k)
for i in range(k):
# verify if the point to be projectec is a control point
# avoids division by zero
if np.linalg.norm(xs[i] - x[pt]) < tol:
alpha[i] = np.finfo(float).max
else:
alpha[i] = 1 / np.linalg.norm(xs[i] - x[pt])**2
# computes x~ and y~ (eq 3)
xtilde = np.zeros(dim)
ytilde = np.zeros(p)
for i in range(k):
xtilde += alpha[i] * xs[i]
ytilde += alpha[i] * ys[i]
xtilde /= np.sum(alpha)
ytilde /= np.sum(alpha)
A = np.zeros((k, dim))
B = np.zeros((k, p))
xhat = np.zeros((k, dim))
yhat = np.zeros((k, p))
# computation of x^ and y^ (eq 6)
for i in range(k):
xhat[i] = xs[i] - xtilde
yhat[i] = ys[i] - ytilde
A[i] = np.sqrt(alpha[i]) * xhat[i]
B[i] = np.sqrt(alpha[i]) * yhat[i]
U, D, V = np.linalg.svd(np.dot(A.T, B)) # (eq 7)
# VV is the matrix V filled with zeros
VV = np.zeros((dim, p)) # size of U = dim, by SVD
for i in range(p): # size of V = p, by SVD
VV[i,range(p)] = V[i]
M = np.dot(U, VV) # (eq 7)
Y[pt] = np.dot(x[pt] - xtilde, M) + ytilde # (eq 8)
return Y
def plot(y, t):
import matplotlib.pyplot as mpl
mpl.scatter(y.T[0], y.T[1], c = t)
mpl.show()
def test():
import time, sys, force
print "Loading data set... ",
sys.stdout.flush()
data = np.loadtxt("iris.data", delimiter=",")
print "Done."
n, d = data.shape
k = int(np.ceil(np.sqrt(n)))
x = data[:, range(d-1)]
t = data[:, d-1]
sample_idx = np.random.permutation(n)
sample_idx = sample_idx[range(k)]
xs = x[sample_idx, :]
# force
start_time = time.time()
print "Projecting samples... ",
sys.stdout.flush()
ys = force.project(xs)
print "Done. Elapsed time:", time.time() - start_time, "s."
# lamp
start_time = time.time()
print "Projecting... ",
sys.stdout.flush()
y = project(x, xs, ys)
print "Done. Elapsed time:", time.time() - start_time, "s."
plot(y, t)
if __name__ == "__main__":
print "Running test..."
test()
| adrianolinux/lamp | lamp.py | Python | gpl-2.0 | 3,207 |
# -*- coding: utf-8 -*-
import os
from lutris import settings
from lutris.runners.runner import Runner
class mupen64plus(Runner):
"""Nintendo 64 emulator"""
human_name = "Mupen64Plus"
platform = "Nintendo 64"
game_options = [{
'option': 'main_file',
'type': 'file',
'label': 'ROM file',
'help': ("The game data, commonly called a ROM image.")
}]
runner_options = [
{
'option': 'fullscreen',
'type': 'bool',
'label': 'Fullscreen',
'default': True
},
{
'option': 'nogui',
'type': 'bool',
'label': 'Hide GUI',
'default': True
}
]
tarballs = {
'i386': 'mupen64plus-bundle-linux32-2.0.tar.gz',
'x64': 'mupen64plus-bundle-linux64-2.0-ubuntu.tar.gz',
}
@property
def working_dir(self):
return os.path.join(settings.RUNNER_DIR, 'mupen64plus')
def get_executable(self):
return os.path.join(settings.RUNNER_DIR, 'mupen64plus/mupen64plus')
def play(self):
arguments = [self.get_executable()]
if self.runner_config.get('nogui'):
arguments.append('--nogui')
if self.runner_config.get('fullscreen'):
arguments.append('--fullscreen')
else:
arguments.append('--windowed')
rom = self.game_config.get('main_file') or ''
if not os.path.exists(rom):
return {'error': 'FILE_NOT_FOUND', 'file': rom}
arguments.append("\"%s\"" % rom)
return {'command': arguments}
| malkavi/lutris | lutris/runners/mupen64plus.py | Python | gpl-3.0 | 1,602 |
d, n = list(map(int, input().split()))
answer = 0
while n%10:
if n%10 == d:
answer += 1
n //= 10
print(answer)
| lesina/labs2016 | contests_1sem/8/F.py | Python | gpl-3.0 | 127 |
"""
<This library provides a Python interface for the Telegram Bot API>
Copyright (C) <2015> <Jacopo De Luca>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class Location(object):
"""
This object represents a point on the map.
"""
def __init__(self, longitude, latitude):
"""
:param longitude: Longitude as defined by sender
:type longitude: float
:param latitude: Latitude as defined by sender
:type latitude: float
"""
self.longitude = longitude
self.latitude = latitude
@staticmethod
def build_from_json(jlocation):
"""
:param jlocation: A dictionary that contains JSON-parsed object
:type jlocation: dict
:rtype: Location
"""
return Location(jlocation['longitude'], jlocation['latitude'])
| jacopodl/TbotPy | src/Object/Location.py | Python | gpl-3.0 | 1,452 |
import os
DOWNLOAD_TMP_DIR = '/tmp'
REPOSITORY_PUBLIC_KEY = '/root/keys/cloud_key.pub'
LOCAL_REPOSITORY_DIR = '/pkgs'
HTML_DIR = '/usr/share/mod-pacmanager/html/'
IHM_RESET_SCRIPT = '/root/reset.py'
if os.path.exists("/root/repository"):
fh = open("/root/repository")
REPOSITORY_ADDRESS = fh.read().strip()
fh.close()
else:
REPOSITORY_ADDRESS = 'http://packages.portalmod.com/api'
PORT = 8889
PACMAN_COMMAND = 'pacman'
def check_environment():
for dirname in (DOWNLOAD_TMP_DIR, LOCAL_REPOSITORY_DIR):
if not os.path.exists(dirname):
os.mkdir(dirname)
| moddevices/mod-pacmanager | pacman/settings.py | Python | gpl-3.0 | 597 |
# https://github.com/Naish21/themostat
'''
* The MIT License (MIT)
*
* Copyright (c) 2016 Jorge Aranda Moro
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
'''
#This part is to connect to the WiFi
#In this case: SSID: TP-LINK_F3D4B2 & PASS: 90546747
WIFISSID='koen'
WIFIPASS='/*Casa*/'
def do_connect():
from network import WLAN
sta_if = WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect(WIFISSID, WIFIPASS)
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
#---End Wifi Config---
from machine import Pin
led = Pin(2, Pin.OUT, value=1)
#---MQTT Sending---
from time import sleep_ms
from ubinascii import hexlify
from machine import unique_id
#import socket
from umqtt import MQTTClient
SERVER = "192.168.31.16"
CLIENT_ID = hexlify(unique_id())
TOPIC1 = b"/cultivo/temp"
TOPIC2 = b"/scultivo/hum"
TOPIC3 = b"/cultivo/alarma"
def envioMQTT(server=SERVER, topic="/cultivo", dato=None):
try:
c = MQTTClient(CLIENT_ID, server)
c.connect()
c.publish(topic, dato)
sleep_ms(200)
c.disconnect()
#led.value(1)
except Exception as e:
pass
#led.value(0)
state = 0
def sub_cb(topic, msg):
global state
print((topic, msg))
if msg == b"on":
led.value(0)
state = 1
elif msg == b"off":
led.value(1)
state = 0
def recepcionMQTT(server=SERVER, topic=TOPIC3):
c = MQTTClient(CLIENT_ID, server)
# Subscribed messages will be delivered to this callback
c.set_callback(sub_cb)
c.connect()
c.subscribe(topic)
print("Connected to %s, subscribed to %s topic" % (server, topic))
try:
c.wait_msg()
finally:
c.disconnect()
#---End MQTT Sending---
#---DHT22---
from dht import DHT22
ds = DHT22(Pin(4)) #DHT22 connected to GPIO4
def medirTemHum():
try:
ds.measure()
tem = ds.temperature()
hum = ds.humidity()
#ed.value(1)
return (tem,hum)
except Exception as e:
#led.value(0)
return (-1,-1)
#---End DHT22---
#---Main Program---
sleep_ms(10000)
while True:
(tem,hum) = medirTemHum()
envioMQTT(SERVER,TOPIC1,str(tem))
envioMQTT(SERVER,TOPIC2,str(hum))
recepcionMQTT()
sleep_ms(10000)
#---END Main Program---
| fandres/Monitor-heladas | Code/themostat-master/main.py | Python | gpl-3.0 | 3,439 |
# -*- coding:utf-8 -*-
import datetime
import xml.etree.ElementTree as et
import pony.orm as orm
import sys
import os
pjoin = os.path.join
__dir__ = os.path.abspath(os.path.dirname(__file__))
sys.path.append(__dir__)
from server import *
dat = dict(
code = 'concefly',
last_login = datetime.datetime.now(),
user_type = 'admin',
is_active = True,
date_joined = datetime.datetime.now(),
balance = 10000,
point_member = 10000,
point_xzl = 10000,
point_jhs = 10000,
point_nlb = 10000,
point_nlt = 10000
)
with orm.db_session:
User(**dat)
| concefly/indent_system | db_test.py | Python | gpl-3.0 | 598 |
#!/usr/bin/python
# Copyright (c) 2015 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "openstacksdk"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
type: bool
default: 'no'
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
type: bool
default: 'no'
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
ephemeral:
description:
- A string used for filtering flavors based on the amount of ephemeral
storage. Format is the same as the I(ram) parameter
type: bool
default: 'no'
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
# less than 30gb of ephemeral storage.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
ephemeral=dict(required=False, default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
['name', 'ephemeral']
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
ephemeral = module.params['ephemeral']
limit = module.params['limit']
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if ephemeral:
filters['ephemeral'] = ephemeral
sdk, cloud = openstack_cloud_from_module(module)
try:
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
if filters:
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| mheap/ansible | lib/ansible/modules/cloud/openstack/os_flavor_facts.py | Python | gpl-3.0 | 6,809 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
import glob
import gettext
__trans = gettext.translation('pisilinux', fallback=True)
_ = __trans.ugettext
# pisilinux Modules
import pisilinux.context as ctx
# ActionsAPI Modules
import pisilinux.actionsapi
# ActionsAPI Modules
from pisilinux.actionsapi import get
from pisilinux.actionsapi import cmaketools
from pisilinux.actionsapi import shelltools
basename = "qt4"
prefix = "/%s" % get.defaultprefixDIR()
libdir = "%s/lib" % prefix
bindir = "%s/bin" % prefix
datadir = "%s/share/%s" % (prefix, basename)
includedir = "%s/include" % prefix
docdir = "/%s/%s" % (get.docDIR(), basename)
examplesdir = "%s/%s/examples" % (libdir, basename)
demosdir = "%s/%s/demos" % (libdir, basename)
importdir = "%s/%s/imports" % (libdir, basename)
plugindir = "%s/%s/plugins" % (libdir, basename)
translationdir = "%s/translations" % datadir
sysconfdir= "/etc"
qmake = "%s/qmake" % bindir
class ConfigureError(pisilinux.actionsapi.Error):
def __init__(self, value=''):
pisilinux.actionsapi.Error.__init__(self, value)
self.value = value
ctx.ui.error(value)
def configure(projectfile='', parameters='', installPrefix=prefix):
if projectfile != '' and not shelltools.can_access_file(projectfile):
raise ConfigureError(_("Project file '%s' not found.") % projectfile)
profiles = glob.glob("*.pro")
if len(profiles) > 1 and projectfile == '':
raise ConfigureError(_("It seems there are more than one .pro file, you must specify one. (Possible .pro files: %s)") % ", ".join(profiles))
shelltools.system("%s -makefile %s PREFIX='%s' QMAKE_CFLAGS+='%s' QMAKE_CXXFLAGS+='%s' %s" % (qmake, projectfile, installPrefix, get.CFLAGS(), get.CXXFLAGS(), parameters))
def make(parameters=''):
cmaketools.make(parameters)
def install(parameters='', argument='install'):
cmaketools.install('INSTALL_ROOT="%s" %s' % (get.installDIR(), parameters), argument)
| hknyldz/pisitools | pisilinux/pisilinux/actionsapi/qt4.py | Python | gpl-3.0 | 2,253 |
from __future__ import print_function
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""A replacement for the ``time.clock()`` function.
From the clock man page::
Note that the time can wrap around. On a 32bit system
where CLOCKS_PER_SEC equals 1000000 this function will
return the same value approximately every 72 minutes.
The ``clock()`` function defined below tries to fix this problem.
However, if the ``clock()`` function is not called often enough (more
than 72 minutes between two calls), then there is no way of knowing
how many times the ``time.clock()`` function has wrapped arround! - in
this case a huge number is returned (1.0e100). This problem can be
avoided by calling the ``update()`` function at intervals smaller than
72 minutes."""
import sys
import time
import math
import functools
import numpy as np
import gpaw.mpi as mpi
wrap = 1e-6 * 2**32
# Global variables:
c0 = time.clock()
t0 = time.time()
cputime = 0.0
trouble = False
def clock():
"""clock() -> floating point number
Return the CPU time in seconds since the start of the process."""
update()
if trouble:
return 1.0e100
return cputime
def update():
global trouble, t0, c0, cputime
if trouble:
return
t = time.time()
c = time.clock()
if t - t0 >= wrap:
trouble = True
return
dc = c - c0
if dc < 0.0:
dc += wrap
cputime += dc
t0 = t
c0 = c
def function_timer(func, *args, **kwargs):
out = kwargs.pop('timeout', sys.stdout)
t1 = time.time()
r = func(*args, **kwargs)
t2 = time.time()
print(t2 - t1, file=out)
return r
class Timer:
def __init__(self, print_levels=1000):
self.timers = {}
self.t0 = time.time()
self.running = []
self.print_levels = print_levels
def print_info(self, calc):
"""Override to get to write info during calculator's initialize()."""
pass
def start(self, name):
names = tuple(self.running + [name])
self.timers[names] = self.timers.get(names, 0.0) - time.time()
self.running.append(name)
def stop(self, name=None):
if name is None:
name = self.running[-1]
names = tuple(self.running)
running = self.running.pop()
if name != running:
raise RuntimeError('Must stop timers by stack order. '
'Requested stopping of %s but topmost is %s'
% (name, running))
self.timers[names] += time.time()
def __call__(self, name):
"""Context manager for timing a block of code.
Example (t is a timer object)::
with t('Add two numbers'):
x = 2 + 2
# same as this:
t.start(Add two numbers')
x = 2 + 2
t.stop()
"""
self.start(name)
return self
def __enter__(self):
pass
def __exit__(self, *args):
self.stop()
def get_time(self, *names):
# print self.timers, names
return self.timers[names]
def write(self, out=sys.stdout):
while self.running:
self.stop()
if len(self.timers) == 0:
return
t0 = time.time()
tot = t0 - self.t0
n = max([len(names[-1]) + len(names) for names in self.timers]) + 1
line = '=' * (n + 26) + '\n'
out.write(line)
out.write('%-*s incl. excl.\n' % (n, 'Timing:'))
out.write(line)
tother = tot
inclusive = self.timers.copy()
exclusive = self.timers
keys = exclusive.keys()
keys.sort()
for names in keys:
t = exclusive[names]
if len(names) > 1:
if len(names) < self.print_levels + 1:
exclusive[names[:-1]] -= t
else:
tother -= t
exclusive[('Other',)] = tother
inclusive[('Other',)] = tother
keys.append(('Other',))
for names in keys:
t = exclusive[names]
tinclusive = inclusive[names]
r = t / tot
p = 100 * r
i = int(40 * r + 0.5)
if i == 0:
bar = '|'
else:
bar = '|%s|' % ('-' * (i - 1))
level = len(names)
if level > self.print_levels:
continue
name = (level - 1) * ' ' + names[-1] + ':'
out.write('%-*s%9.3f %9.3f %5.1f%% %s\n' %
(n, name, tinclusive, t, p, bar))
out.write(line)
out.write('%-*s%9.3f %5.1f%%\n' % (n + 10, 'Total:', tot, 100.0))
out.write(line)
out.write('date: %s\n' % time.asctime())
def add(self, timer):
for name, t in timer.timers.items():
self.timers[name] = self.timers.get(name, 0.0) + t
class timer:
"""Decorator for timing a method call.
Example::
class A:
def __init__(self):
self.timer = Timer()
@timer('Add two numbers')
def add(self, x, y):
return x + y
"""
def __init__(self, name):
self.name = name
def __call__(self, method):
@functools.wraps(method)
def new_method(slf, *args, **kwargs):
slf.timer.start(self.name)
x = method(slf, *args, **kwargs)
try:
slf.timer.stop()
except IndexError:
pass
return x
return new_method
class NullTimer:
"""Compatible with Timer and StepTimer interfaces. Does nothing."""
def __init__(self): pass
def print_info(self, calc): pass
def start(self, name): pass
def stop(self, name=None): pass
def get_time(self, name): return 0.0
def write(self, out=sys.stdout): pass
def write_now(self, mark=''): pass
def add(self, timer): pass
nulltimer = NullTimer()
class DebugTimer(Timer):
def __init__(self, print_levels=1000, comm=mpi.world, txt=sys.stdout):
Timer.__init__(self, print_levels)
ndigits = 1 + int(math.log10(comm.size))
self.srank = '%0*d' % (ndigits, comm.rank)
self.txt = txt
def start(self, name):
Timer.start(self, name)
abstime = time.time()
t = self.timers[tuple(self.running)] + abstime
self.txt.write('T%s >> %15.8f %s (%7.5fs) started\n'
% (self.srank, abstime, name, t))
def stop(self, name=None):
if name is None:
name = self.running[-1]
abstime = time.time()
t = self.timers[tuple(self.running)] + abstime
self.txt.write('T%s << %15.8f %s (%7.5fs) stopped\n'
% (self.srank, abstime, name, t))
Timer.stop(self, name)
class ParallelTimer(DebugTimer):
"""Like DebugTimer but writes timings from all ranks.
Each rank writes to timings.<rank>.txt. Also timings.metadata.txt
will contain information about the parallelization layout. The idea
is that the output from this timer can be used for plots and to
determine bottlenecks in the parallelization.
See the tool gpaw-plot-parallel-timings."""
def __init__(self):
ndigits = len(str(mpi.world.size - 1))
ranktxt = '%0*d' % (ndigits, mpi.world.rank)
fname = 'timings.%s.txt' % ranktxt
txt = open(fname, 'w')
DebugTimer.__init__(self, comm=mpi.world, txt=txt)
def print_info(self, calc):
"""Print information about parallelization into a file."""
fd = open('timings.metadata.txt', 'w')
DebugTimer.print_info(self, calc)
wfs = calc.wfs
# We won't have to type a lot if everyone just sends all their numbers.
myranks = np.array([wfs.world.rank, wfs.kd.comm.rank,
wfs.bd.comm.rank, wfs.gd.comm.rank])
allranks = None
if wfs.world.rank == 0:
allranks = np.empty(wfs.world.size * 4, dtype=int)
wfs.world.gather(myranks, 0, allranks)
if wfs.world.rank == 0:
for itsranks in allranks.reshape(-1, 4):
fd.write('r=%d k=%d b=%d d=%d\n' % tuple(itsranks))
fd.close()
class StepTimer(Timer):
"""Step timer to print out timing used in computation steps.
Use it like this::
from gpaw.utilities.timing import StepTimer
st = StepTimer()
...
st.write_now('step 1')
...
st.write_now('step 2')
The parameter write_as_master_only can be used to force the timer to
print from processess that are not the mpi master process.
"""
def __init__(self, out=sys.stdout, name=None, write_as_master_only=True):
Timer.__init__(self)
if name is None:
name = '<%s>' % sys._getframe(1).f_code.co_name
self.name = name
self.out = out
self.alwaysprint = not write_as_master_only
self.now = 'temporary now'
self.start(self.now)
def write_now(self, mark=''):
self.stop(self.now)
if self.alwaysprint or mpi.rank == 0:
print(self.name, mark, self.get_time(self.now), file=self.out)
self.out.flush()
del self.timers[self.now]
self.start(self.now)
class TAUTimer(Timer):
"""TAUTimer requires installation of the TAU Performance System
http://www.cs.uoregon.edu/research/tau/home.php
The TAU Python API will not output any data if there are any
unmatched starts/stops in the code."""
top_level = 'GPAW.calculator' # TAU needs top level timer
merge = True # Requires TAU 2.19.2 or later
def __init__(self):
Timer.__init__(self)
import pytau
self.pytau = pytau
self.tau_timers = {}
pytau.setNode(mpi.rank)
self.tau_timers[self.top_level] = pytau.profileTimer(self.top_level)
pytau.start(self.tau_timers[self.top_level])
def start(self, name):
Timer.start(self, name)
self.tau_timers[name] = self.pytau.profileTimer(name)
self.pytau.start(self.tau_timers[name])
def stop(self, name=None):
Timer.stop(self, name)
self.pytau.stop(self.tau_timers[name])
def write(self, out=sys.stdout):
Timer.write(self, out)
if self.merge:
self.pytau.dbMergeDump()
else:
self.pytau.stop(self.tau_timers[self.top_level])
class HPMTimer(Timer):
"""HPMTimer requires installation of the IBM BlueGene/P HPM
middleware interface to the low-level UPC library. This will
most likely only work at ANL's BlueGene/P. Must compile
with GPAW_HPM macro in customize.py. Note that HPM_Init
and HPM_Finalize are called in _gpaw.c and not in the Python
interface. Timer must be called on all ranks in node, otherwise
HPM will hang. Hence, we only call HPM_start/stop on a list
subset of timers."""
top_level = 'GPAW.calculator' # HPM needs top level timer
compatible = ['Initialization', 'SCF-cycle']
def __init__(self):
Timer.__init__(self)
from _gpaw import hpm_start, hpm_stop
self.hpm_start = hpm_start
self.hpm_stop = hpm_stop
hpm_start(self.top_level)
def start(self, name):
Timer.start(self, name)
if name in self.compatible:
self.hpm_start(name)
def stop(self, name=None):
Timer.stop(self, name)
if name in self.compatible:
self.hpm_stop(name)
def write(self, out=sys.stdout):
Timer.write(self, out)
self.hpm_stop(self.top_level)
class CrayPAT_timer(Timer):
"""Interface to CrayPAT API. In addition to regular timers,
the corresponding regions are profiled by CrayPAT. The gpaw-python has
to be compiled under CrayPAT.
"""
def __init__(self, print_levels=4):
Timer.__init__(self, print_levels)
from _gpaw import craypat_region_begin, craypat_region_end
self.craypat_region_begin = craypat_region_begin
self.craypat_region_end = craypat_region_end
self.regions = {}
self.region_id = 5 # leave room for regions in C
def start(self, name):
Timer.start(self, name)
if name in self.regions:
id = self.regions[name]
else:
id = self.region_id
self.regions[name] = id
self.region_id += 1
self.craypat_region_begin(id, name)
def stop(self, name=None):
Timer.stop(self, name)
id = self.regions[name]
self.craypat_region_end(id)
| robwarm/gpaw-symm | gpaw/utilities/timing.py | Python | gpl-3.0 | 12,883 |
#import coin
class IDatabase:
def enter_coin(coin):
raise Exception('NotImplementedError') | SVladkov/Numismatic | database/idatabase.py | Python | gpl-3.0 | 94 |
# -----------------------------------------------------------------------
# Copyright: 2010-2022, imec Vision Lab, University of Antwerp
# 2013-2022, CWI, Amsterdam
#
# Contact: astra@astra-toolbox.com
# Website: http://www.astra-toolbox.com/
#
# This file is part of the ASTRA Toolbox.
#
#
# The ASTRA Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ASTRA Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>.
#
# -----------------------------------------------------------------------
import astra
import numpy as np
vol_geom = astra.create_vol_geom(256, 256)
proj_geom = astra.create_proj_geom('parallel', 1.0, 384, np.linspace(0,np.pi,180,False))
# For CPU-based algorithms, a "projector" object specifies the projection
# model used. In this case, we use the "strip" model.
proj_id = astra.create_projector('strip', proj_geom, vol_geom)
# Create a sinogram from a phantom
import scipy.io
P = scipy.io.loadmat('phantom.mat')['phantom256']
sinogram_id, sinogram = astra.create_sino(P, proj_id)
import pylab
pylab.gray()
pylab.figure(1)
pylab.imshow(P)
pylab.figure(2)
pylab.imshow(sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CPU
# The main difference with the configuration of a GPU algorithm is the
# extra ProjectorId setting.
cfg = astra.astra_dict('SIRT')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Available algorithms:
# ART, SART, SIRT, CGLS, FBP
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 20 iterations of the algorithm
# This will have a runtime in the order of 10 seconds.
astra.algorithm.run(alg_id, 20)
# Get the result
rec = astra.data2d.get(rec_id)
pylab.figure(3)
pylab.imshow(rec)
pylab.show()
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
| astra-toolbox/astra-toolbox | samples/python/s004_cpu_reconstruction.py | Python | gpl-3.0 | 2,550 |
from matplotlib import pyplot as plt, colorbar
from lib2.VNATimeResolvedDispersiveMeasurement2D import *
class DispersiveRamseyFringes(VNATimeResolvedDispersiveMeasurement2D):
def __init__(self, name, sample_name, **devs_aliases_map):
devs_aliases_map["q_z_awg"] = None
super().__init__(name, sample_name, devs_aliases_map)
self._measurement_result = \
DispersiveRamseyFringesResult(name, sample_name)
self._sequence_generator = IQPulseBuilder.build_dispersive_ramsey_sequences
def set_fixed_parameters(self, pulse_sequence_parameters, **dev_params):
super().set_fixed_parameters(pulse_sequence_parameters, **dev_params)
def set_swept_parameters(self, ramsey_delays, excitation_freqs):
q_if_frequency = self._q_awg.get_calibration() \
.get_radiation_parameters()["if_frequency"]
swept_pars = {"ramsey_delay": \
(self._output_pulse_sequence,
ramsey_delays),
"excitation_frequency":
(lambda x: self._exc_iqvg.set_frequency(x + q_if_frequency),
excitation_freqs)}
super().set_swept_parameters(**swept_pars)
def _output_pulse_sequence(self, ramsey_delay):
self._pulse_sequence_parameters["ramsey_delay"] = ramsey_delay
super()._output_pulse_sequence()
class DispersiveRamseyFringesResult(VNATimeResolvedDispersiveMeasurement2DResult):
def _prepare_data_for_plot(self, data):
return data["excitation_frequency"] / 1e9, \
data["ramsey_delay"] / 1e3, \
data["data"]
def _annotate_axes(self, axes):
axes[0].set_ylabel("Ramsey delay [$\mu$s]")
axes[-2].set_ylabel("Ramsey delay [$\mu$s]")
axes[-1].set_xlabel("Excitation if_freq [GHz]")
axes[-2].set_xlabel("Excitation if_freq [GHz]")
| vdrhtc/Measurement-automation | lib2/DispersiveRamseyFringes.py | Python | gpl-3.0 | 1,912 |
import src
import random
class Shocker(src.items.Item):
"""
ingame item used as ressource to build bombs and stuff
should have the habit to explode at inconvienent times
"""
type = "Shocker"
def __init__(self):
"""
set up internal state
"""
super().__init__(display="/\\")
def apply(self, character):
"""
Parameters:
character: the character trying to use the item
"""
compressorFound = None
for item in character.inventory:
if isinstance(item,src.items.itemMap["CrystalCompressor"]):
compressorFound = item
break
if compressorFound:
if self.container and isinstance(self.container,src.rooms.Room):
if hasattr(self.container,"electricalCharges"):
if self.container.electricalCharges < self.container.maxElectricalCharges:
self.container.electricalCharges += 1
character.addMessage("you activate the shocker and increase the rooms charges to %s"%(self.container.electricalCharges,))
character.inventory.remove(compressorFound)
else:
character.addMessage("this room is fully charged")
else:
character.addMessage("this room can't be charged")
else:
character.addMessage("no room found")
else:
character.addMessage("no crystal compressor found in inventory")
src.items.addType(Shocker)
| MarxMustermann/OfMiceAndMechs | src/itemFolder/military/shocker.py | Python | gpl-3.0 | 1,603 |
'''
base tools
'''
# -*- coding: utf-8 -*-
import re
def is_ipv4(ip) :
pattern = r'^(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[0-9]{1,2})){3}$'
matcher = re.match(pattern, ip)
if matcher is not None :
return True
return False
def is_domain(domain) :
pattern = r'[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+\.?'
matcher = re.match(pattern, domain)
if matcher is not None :
return True
return False
| allen1989127/WhereRU | org/sz/tools.py | Python | gpl-3.0 | 508 |
#!/usr/bin/env python
# encoding: utf-8
'''
pvaurora configuration file
'''
LATITUDE = 42.6
LONGITUDE = 12.9
API_KEY = "api_key_value"
SYSTEM_ID = -1
| yuroller/pvaurora | src/config-dist.py | Python | gpl-3.0 | 152 |
"""
/**
* Ossec Framework
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* @category Ossec
* @package Ossec
* @version $Id: Histogram.php,v 1.3 2008/03/03 15:12:18 dcid Exp $
* @author Chris Abernethy
* @copyright Copyright (c) 2007-2008, Daniel B. Cid <dcid@ossec.net>, All rights reserved.
* @license http://www.gnu.org/licenses/gpl-3.0.txt GNU Public License
*/
"""
##############################################################
# Copyright C) 2015 Masashi Okumura All rights reseerved.
##############################################################
import os,sys
import re
from flask import Flask, session, request, redirect, render_template, url_for
from flask import jsonify, make_response
from datetime import *
import time
import uuid
import hashlib
import ossec_conf
import os_lib_handle
import os_lib_agent
import os_lib_alerts
#import os_lib_syscheck
from ossec_categories import global_categories
from ossec_formats import log_categories
from .view import View
class Search(View):
def __init__(self, request):
super().__init__()
self.request = request
self.html = ""
self.contents= ""
self.is_post = False
if request.method == 'POST':
self.is_post = True
self._make_contents()
self._make_html()
def _make_contents(self):
# Starting handle
ossec_handle = os_lib_handle.os_handle_start(ossec_conf.ossec_dir)
# Iniitializing some variables
u_final_time = int(time.time())
#u_final_time = int(time.mktime(datetime.now().timetuple()))
u_init_time = int(u_final_time - ossec_conf.ossec_search_time) # 14400 = 3600 * 4
u_level = ossec_conf.ossec_search_level # 7
u_pattern = ""
u_rule = ""
u_srcip = ""
u_user = ""
u_location = ""
# masao added the folloings :
USER_final = 0
USER_init = 0
USER_level = ""
USER_pattern = None
LOCATION_pattern = None
USER_group = None
USER_log = None
USER_rule = None
USER_srcip = None
USER_user = None
USER_page = int(1)
USER_searchid = 0
USER_monitoring = 0
used_stored = 0
buffer = ""
# Getting search id
if self.is_post and ('searchid' in self.request.form):
str_searchid = self.request.form.get('searchid')
if re.search("[a-z0-9]+", str_searchid):
USER_searchid = str_searchid # It might be hex. dont use int().
is_rt_monitoring = False
# TODO : real time monitoring t.b. implemented.
rt_sk = ""
sv_sk = 'checked="checked"'
if self.is_post and ('monitoring' in self.request.form):
str_monitoring = self.request.form.get('monitoring')
if int(str_monitoring) == 1:
is_rt_monitoring = True
rt_sk = 'checked="checked"'
sv_sk = "";
# Cleaning up time
USER_final = u_final_time
USER_init = u_init_time
USER_monitoring = 1
# Cleaning up fields
# $_POST['search'] = "Search";
# unset($_POST['initdate']);
# unset($_POST['finaldate']);
# Deleting search
if USER_searchid != 0:
os_lib_alerts.os_cleanstored(USER_searchid)
# Refreshing every 90 seconds by default */
m_ossec_refresh_time = ossec_conf.ossec_refresh_time * 1000;
buffer += """\
<script language="javascript">
setTimeout("document.dosearch.submit()", %d);
</script>\n""" % m_ossec_refresh_time
# Reading user input -- being very careful parsing it
# Initial Date
datepattern = "^([0-9]{4})-([0-9]{2})-([0-9]{2}) ([0-9]{2}):([0-9]{2})$";
if is_rt_monitoring:
pass
elif self.is_post and ('initdate' in self.request.form):
str_initdate = self.request.form.get('initdate')
mobj = re.search(datepattern, str_initdate)
if mobj:
year = int(mobj.group(1))
month = int(mobj.group(2))
day = int(mobj.group(3))
hour = int(mobj.group(4))
minute = int(mobj.group(5))
USER_init = int(time.mktime((year, month, day, hour, minute, 0, 0, 0, -1)))
u_init_time = USER_init
# to check :
# print(datetime.fromtimestamp(u_init_time))
# Final Date
if is_rt_monitoring:
pass
elif self.is_post and ('finaldate' in self.request.form):
str_finaldate = self.request.form.get('finaldate')
mobj = re.search(datepattern, str_finaldate)
if mobj:
year = int(mobj.group(1))
month = int(mobj.group(2))
day = int(mobj.group(3))
hour = int(mobj.group(4))
minute = int(mobj.group(5))
USER_final = int(time.mktime((year, month, day, hour, minute, 0, 0, 0, -1)))
u_final_time = USER_final
# Level
if self.is_post and ('level' in self.request.form):
str_level = self.request.form.get('level')
if str_level and str_level.isdigit() and (int(str_level) > 0) and (int(str_level) < 16):
USER_level = str_level
u_level = str_level
# Page
if self.is_post and ('page' in self.request.form):
str_page = self.request.form.get('page')
if str_page and str_page.isdigit() and (int(str_page) > 0) and (int(str_page) <= 999):
USER_page = str_page
# Pattern
strpattern = "^[0-9a-zA-Z. _|^!\-()?]{1,128}$"
intpattern = "^[0-9]{1,8}$"
if self.is_post and ('strpattern' in self.request.form):
str_strpattern = self.request.form.get('strpattern')
if re.search(strpattern, str_strpattern):
USER_pattern = str_strpattern
u_pattern = USER_pattern
# Getting location
if self.is_post and ('locationpattern' in self.request.form):
lcpattern = "^[0-9a-zA-Z. _|^!>\/\\-]{1,156}$"
str_locationpattern = self.request.form.get('locationpattern')
if re.search(lcpattern, str_locationpattern):
LOCATION_pattern = str_locationpattern
u_location = LOCATION_pattern
# Group pattern
if self.is_post and ('grouppattern' in self.request.form):
str_grouppattern = self.request.form.get('grouppattern')
if str_grouppattern == "ALL":
USER_group = None
elif re.search(strpattern, str_grouppattern):
USER_group = str_grouppattern
pass
# Log pattern
if self.is_post and ('logpattern' in self.request.form):
str_logpattern = self.request.form.get('logpattern')
if str_logpattern == "ALL":
USER_log = None
elif re.search(strpattern, str_logpattern):
USER_log = str_logpattern
# Rule pattern
if self.is_post and ('rulepattern' in self.request.form):
str_rulepattern = self.request.form.get('rulepattern')
if re.search(strpattern, str_rulepattern):
USER_rule = str_rulepattern
u_rule = USER_rule
# Src ip pattern
if self.is_post and ('srcippattern' in self.request.form):
str_srcippattern = self.request.form.get('srcippattern')
if re.search(strpattern, str_srcippattern):
USER_srcip = str_srcippattern
u_srcip = USER_srcip
# User pattern
if self.is_post and ('userpattern' in self.request.form):
str_userpattern = self.request.form.get('userpattern')
if re.search(strpattern, str_userpattern):
USER_user = str_userpattern
u_user = USER_user
# Maximum number of alerts
if self.is_post and ('max_alerts_per_page' in self.request.form):
str_max_alerts_per_page = self.request.form.get('max_alerts_per_page')
if re.search(intpattern, str_max_alerts_per_page):
int_max_alerts_per_page = int (str_max_alerts_per_page)
if (int_max_alerts_per_page > 200) and (int_max_alerts_per_page < 10000):
ossec_conf.ossec_max_alerts_per_page = int_max_alerts_per_page
# Getting search id -- should be enough to avoid duplicates
if is_rt_monitoring: # 'get('search') is "Search"
m = hashlib.md5()
m.update(str(uuid.uuid4()).encode('UTF-8'))
USER_searchid = m.hexdigest()
USER_page = 1
elif self.is_post and ('search' in self.request.form):
str_search = self.request.form.get('search')
# ImmutableMultiDict([('initdate', '2015-07-21 15:00'), ('level', '3'), ('search', 'Search'), ('monitoring', '0'), ('finaldate', '2015-07-21 19:00'), ('searchid', '0')])
if str_search == "Search":
# Creating new search id
# (in php) $USER_searchid = md5(uniqid(rand(), true));
m = hashlib.md5()
m.update(str(uuid.uuid4()).encode('UTF-8'))
USER_searchid = m.hexdigest()
USER_page = 1
elif str_search == "<< First":
USER_page = 1
elif str_search == "< Prev":
if int(USER_page) > 1:
UESR_page = int(USER_page) - 1
elif str_search == "Next >":
USER_page = int(USER_page) + 1
elif str_search == "Last >>":
USER_page = 999
elif str_search == "":
pass
else:
buffer += "<b class='red'>Invalid search. </b><br />\n"
self.contents = buffer
return
# Printing current date
buffer += """<div class="smaller2">%s<br/>""" % datetime.now().strftime("%m/%d/%Y %H:%M:%S")
if USER_monitoring == 1:
buffer += """ -- Refreshing every %s secs</div><br />""" % ossec_conf.ossec_refresh_time
else:
buffer += "</div><br/>"
# Getting all agents
agent_list = os_lib_agent.os_getagents(ossec_handle)
buffer += "<h2>Alert search options:</h2>\n"
#################
### Search forms ###
#################
buffer += """\
<form name="dosearch" method="post" action="/search">
<table><tr valign="top">
<td><input type="radio" name="monitoring" value="0" checked="checked"/></td>
<td>From: <input type="text" name="initdate" id="i_date_a" size="17" value="%s" maxlength="16" class="formText" />
<img src="static/img/calendar.gif" id="i_trigger" title="Date selector" alt="Date selector" class="formText" /></td>
<td> To: <input type="text" name="finaldate" id="f_date_a" size="17" value="%s" maxlength="16" class="formText" />
<img src="static/img/calendar.gif" id="f_trigger" title="Date selector" alt="Date selector" class="formText" /></td>
</tr>
""" % (
datetime.fromtimestamp(u_init_time).strftime("%Y-%m-%d %H:%M"),
datetime.fromtimestamp(u_final_time).strftime("%Y-%m-%d %H:%M")
)
buffer += """<tr><td><input type="radio" name="monitoring" value="1" %s/></td>
<td>Real time monitoring</td></tr>
</table>
<br />
<table>
""" % rt_sk
# Minimum Level
buffer += """<tr><td>Minimum level:</td><td><select name="level" class="formText">"""
if int(u_level) == 1:
buffer += ' <option value="1" selected="selected">All</option>'
else:
buffer += ' <option value="1">All</option>'
for l_counter in range(15, 1, -1):
if l_counter == int(u_level):
buffer += ' <option value="%s" selected="selected">%s</option>' % (l_counter, l_counter)
else:
buffer += ' <option value="%s">%s</option>' % (l_counter, l_counter)
buffer += "</select>"
# Category
buffer += """</td><td>
Category: </td><td><select name="grouppattern" class="formText">"""
buffer += '<option value="ALL" class="bluez">All categories</option>'
for _cat_name, _cat in global_categories.items():
for cat_name, cat_val in _cat.items():
sl = ""
if USER_group == cat_val:
sl = ' selected="selected"'
if cat_name.find("(all)") != -1:
buffer += """<option class="bluez" %s value="%s">%s</option>""" % (sl, cat_val, cat_name)
else:
buffer += """<option value="%s" %s> %s</option>""" % (cat_val, sl, cat_name)
buffer += '</select>'
# Str pattern
buffer += """</td></tr><tr><td>
Pattern: </td><td><input type="text" name="strpattern" size="16"
value="%s" class="formText" /></td>""" % u_pattern
# Log formats
buffer += '<td>Log formats: </td><td><select name="logpattern" class="formText">'
buffer += '<option value="ALL" class="bluez">All log formats</option>'
for _cat_name, _cat in log_categories.items():
for cat_name, cat_val in _cat.items():
sl = ""
if USER_log == cat_val:
sl = ' selected="selected"'
if cat_name.find("(all)") != -1:
buffer += """<option class="bluez" %s value="%s">%s</option>"""% (sl, cat_val, cat_name)
else:
buffer += """<option value="%s" %s> %s</option>""" % (cat_val, sl, cat_name)
buffer += '</select>'
# Srcip pattern
buffer += """</td></tr><tr><td>
Srcip: </td><td>
<input type="text" name="srcippattern" size="16" class="formText"
value="%s"/> """ % u_srcip
# Rule pattern
buffer += """</td><td>
User: </td><td><input type="text" name="userpattern" size="8"
value="%s" class="formText" /></td></tr>""" % u_user
# Location
buffer += """<tr><td>
Location:</td><td>
<input type="text" name="locationpattern" size="16" class="formText"
value="%s"/> """ % u_location
# Rule pattern
buffer += """</td><td>
Rule id: </td><td><input type="text" name="rulepattern" size="8"
value="%s" class="formText"/>""" % u_rule
# Max alerts
buffer += """'</td></tr><tr><td>
Max Alerts:</td>
<td><input type="text" name="max_alerts_per_page" size="8" value="%s" class="formText" /></td></tr>
""" % ossec_conf.ossec_max_alerts_per_page
# Agent
# seems not implemented
# Final form
buffer += """\
<tr><td>
<input type="submit" name="search" value="Search" class="button" />
"""
buffer += """</td></tr></table>
<input type="hidden" name="searchid" value="%s" />
</form><br /> <br />""" % USER_searchid
# Java script for date
buffer += """\
<script type="text/javascript">
Calendar.setup({
button : "i_trigger",
inputField : "i_date_a",
ifFormat : "%Y-%m-%d %H:%M",
showsTime : true,
timeFormat : "24"
});
Calendar.setup({
button : "f_trigger",
inputField : "f_date_a",
ifFormat : "%Y-%m-%d %H:%M",
showsTime : true,
timeFormat : "24"
});
</script>
"""
buffer += "<h2>Results:</h2>\n"
if (not USER_init) or (not USER_final) or (not USER_level):
buffer += "<b>No search performed.</b><br/>\n"
self.contents = buffer
return
output_list = None
# Getting stored alerts
if is_rt_monitoring:
# Getting alerts
output_list = os_lib_alerts.os_searchalerts(ossec_handle,
USER_searchid,
USER_init,
USER_final,
ossec_conf.ossec_max_alerts_per_page,
USER_level,
USER_rule,
LOCATION_pattern,
USER_pattern,
USER_group,
USER_srcip,
USER_user,
USER_log)
elif self.is_post and ('search' in request.form):
str_search = self.request.form.get("search")
if str_search != "Search":
output_list = os_lib_alerts.os_getstoredalerts(ossec_handle, USER_searchid)
used_stored = 1
else: # Searchiing for new ones
# Getting alerts
output_list = os_lib_alerts.os_searchalerts(ossec_handle,
USER_searchid,
USER_init,
USER_final,
ossec_conf.ossec_max_alerts_per_page,
USER_level,
USER_rule,
LOCATION_pattern,
USER_pattern,
USER_group,
USER_srcip,
USER_user,
USER_log)
if (output_list is None) or (output_list[1] is None):
if used_stored == 1:
buffer += "<b class='red'>Nothing returned (search expired). </b><br />\n"
else:
buffer += "<b class='red'>Nothing returned. </b><br />\n"
self.contents = buffer
return
# Checking for no return
if not 'count' in output_list[0]:
buffer += "<b class='red'>Nothing returned. </b><br />\n"
self.contents = buffer
return
# Checking maximum page size
if int(USER_page) >= int(output_list[0]['pg']):
USER_page = output_list[0]['pg']
# Page 1 will become the latest and the latest, page 1
real_page = (output_list[0]['pg'] + 1) - USER_page
buffer += "<b>Total alerts found: </b>%s<br />" % output_list[0]['count']
if output_list[0]['pg'] > 1:
buffer += "<b>Output divided in </b>%s pages.<br/>" % output_list[0]['pg']
buffer += '<br /><form name="dopage" method="post" action="/search">'
buffer += """\
<input type="submit" name="search" value="<< First" class="button" class="formText" />
<input type="submit" name="search" value="< Prev" class="button" class="formText" />
Page <b>%s</b> (%s alerts)""" % (USER_page, output_list[0][real_page])
# Currently page
buffer += """\
<input type="hidden" name="initdate" value="%s" />
<input type="hidden" name="finaldate" value="%s" />
<input type="hidden" name="rulepattern" value="%s" />
<input type="hidden" name="srcippattern" value="%s" />
<input type="hidden" name="userpattern" value="'%s" />
<input type="hidden" name="locationpattern" value="%s" />
<input type="hidden" name="level" value="%s" />
<input type="hidden" name="page" value="%s" />
<input type="hidden" name="searchid" value="%s" />
<input type="hidden" name="monitoring" value="%s" />
<input type="hidden" name="max_alerts_per_page" value="%s" />
""" % (
datetime.fromtimestamp(u_init_time).strftime("%Y-%m-%d %H:%M"),
datetime.fromtimestamp(u_final_time).strftime("%Y-%m-%d %H:%M"),
u_rule, u_srcip, u_user, u_location, u_level, USER_page, USER_searchid, USER_monitoring, ossec_conf.ossec_max_alerts_per_page
)
if output_list[0]['pg'] > 1:
buffer += """\
<input type="submit" name="search" value="Next >" class="button" class="formText" />
<input type="submit" name="search" value="Last >>" class="button" class="formText" />
</form>
"""
# Checking if page exists
target = output_list[real_page]
target_file = os.environ['CCPRISM_HOME'] + target
print("real_page is %s" %real_page)
print("target_file is " + target_file)
print(output_list[0].keys())
if 'count' in output_list[0].keys():
print("count key exists.")
if 'pg' in output_list[0].keys():
print ('pg key exists')
if real_page in output_list[0].keys():
print("real_page key exists.")
if (not real_page in output_list[0].keys()) or (len(target) < 5) or (not os.path.exists(target_file)):
#if (not output_list[0][real_page]) or (len(target) < 5) or (not os.path.exists(target_file)):
print("heyheyhey")
buffer += "<b class='red'>Nothing returned (or search expired). (* 1)</b><br />\n"
self.contents = buffer
return
buffer += "<br/><br/>"
# Printing page
# TODO: There are functions for slurping file contents.
fobj = open(target_file, 'r')
target_buffer = fobj.read()
fobj.close()
buffer += target_buffer
self.contents = buffer
def _make_html(self):
self.html = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
%s
</head>
<body>
<br/>
%s
<div id="container">
<div id="content_box">
<div id="content" class="pages">
<a name="top"></a>
<!-- BEGIN: content -->
%s
<!-- END: content -->
<br /><br />
<br /><br />
</div>
</div>
%s
</div>
</body>
</html>
""" % (View.HEAD, View.HEADER, self.contents, View.FOOTER)
pass
def getHtml(self):
return self.html
| classcat/cc-prism-hids | main/pylib/ccprism/search.py | Python | gpl-3.0 | 23,411 |
import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help, settings
from cogs.utils.dataIO import dataIO
import os
import re
import asyncio
class Antilink:
"""Blocks Discord invite links from users who don't have the permission 'Manage Messages'"""
def __init__(self, bot):
self.bot = bot
self.location = 'data/antilink/settings.json'
self.json = dataIO.load_json(self.location)
self.regex = re.compile(r"<?(https?:\/\/)?(www\.)?(discord\.gg|discordapp\.com\/invite)\b([-a-zA-Z0-9/]*)>?")
self.regex_discordme = re.compile(r"<?(https?:\/\/)?(www\.)?(discord\.me\/)\b([-a-zA-Z0-9/]*)>?")
@commands.group(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def antilinkset(self, ctx):
"""Manages the settings for antilink."""
serverid = ctx.message.server.id
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
if serverid not in self.json:
self.json[serverid] = {'toggle': False, 'message': '', 'dm': False}
@antilinkset.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def toggle(self, ctx):
"""Enable/disables antilink in the server"""
serverid = ctx.message.server.id
if self.json[serverid]['toggle'] is True:
self.json[serverid]['toggle'] = False
await self.bot.say('Antilink is now disabled')
elif self.json[serverid]['toggle'] is False:
self.json[serverid]['toggle'] = True
await self.bot.say('Antilink is now enabled')
dataIO.save_json(self.location, self.json)
@antilinkset.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def message(self, ctx, *, text):
"""Set the message for when the user sends a illegal discord link"""
serverid = ctx.message.server.id
self.json[serverid]['message'] = text
dataIO.save_json(self.location, self.json)
await self.bot.say('Message is set')
if self.json[serverid]['dm'] is False:
await self.bot.say('Remember: Direct Messages on removal is disabled!\nEnable it with ``antilinkset toggledm``')
@antilinkset.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def toggledm(self, ctx):
serverid = ctx.message.server.id
if self.json[serverid]['dm'] is False:
self.json[serverid]['dm'] = True
await self.bot.say('Enabled DMs on removal of invite links')
elif self.json[serverid]['dm'] is True:
self.json[serverid]['dm'] = False
await self.bot.say('Disabled DMs on removal of invite links')
dataIO.save_json(self.location, self.json)
async def _new_message(self, message):
"""Finds the message and checks it for regex"""
user = message.author
if message.server is None:
return
if message.server.id in self.json:
if self.json[message.server.id]['toggle'] is True:
if self.regex.search(message.content) is not None or self.regex_discordme.search(message.content) is not None:
roles = [r.name for r in user.roles]
bot_admin = settings.get_server_admin(message.server)
bot_mod = settings.get_server_mod(message.server)
if user.id == settings.owner:
return
elif bot_admin in roles:
return
elif bot_mod in roles:
return
elif user.permissions_in(message.channel).manage_messages is True:
return
else:
asyncio.sleep(0.5)
await self.bot.delete_message(message)
if self.json[message.server.id]['dm'] is True:
await self.bot.send_message(message.author, self.json[message.server.id]['message'])
def check_folder():
if not os.path.exists('data/antilink'):
os.makedirs('data/antilink')
def check_file():
f = 'data/antilink/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = Antilink(bot)
bot.add_cog(n)
bot.add_listener(n._new_message, 'on_message')
| Krissbro/LondonGaymers | antilink/antilink.py | Python | gpl-3.0 | 4,545 |
#!/usr/bin/env python3
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPixmap, QMouseEvent
from PyQt5.QtWidgets import QToolButton
from PyQt5.QtCore import Qt, pyqtSignal
from models.components import *
class DraggableComponentButton(QToolButton):
mousePress = pyqtSignal(ComponentType, QMouseEvent, name='mousePress')
def __init__(self, parent=None):
QToolButton.__init__(self, parent)
self.componentType = None
def mousePressEvent(self, event):
self.checked = False
self.mousePress.emit(self.componentType, event) | bandienkhamgalan/flappyeagle | views/DraggableComponentButton.py | Python | gpl-3.0 | 549 |
from __future__ import print_function, unicode_literals
import sys
from resources.lib.kodiutils import params as decode
class Params:
handle = int(sys.argv[1]) if len(sys.argv) > 1 else -1
orig_args = sys.argv[2] if len(sys.argv) > 2 else ''
args = decode(sys.argv[2]) if len(sys.argv) > 2 else {}
resume = sys.argv[3][7:] != 'false' if len(sys.argv) > 3 else False
url = None
params = Params()
| bbaronSVK/plugin.video.stream-cinema | resources/lib/params.py | Python | gpl-3.0 | 420 |
# -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for gwsumm.html.static
"""
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
from collections import OrderedDict
from .. import static
# test simple utils
def test_get_css():
css = static.get_css()
assert isinstance(css, OrderedDict)
# test dict keys
assert list(css.keys()) == [
'font-awesome',
'font-awesome-solid',
'gwbootstrap',
]
# test list of files
css_files = list(x.split('/')[-1] for x in css.values())
assert css_files == [
'fontawesome.min.css',
'solid.min.css',
'gwbootstrap.min.css',
]
def test_get_js():
js = static.get_js()
assert isinstance(js, OrderedDict)
# test dict keys
assert list(js.keys()) == [
'jquery',
'jquery-ui',
'moment',
'bootstrap',
'fancybox',
'datepicker',
'gwbootstrap',
]
# test list of files
js_files = list(x.split('/')[-1] for x in js.values())
assert js_files == [
'jquery-3.5.1.min.js',
'jquery-ui.min.js',
'moment.min.js',
'bootstrap.bundle.min.js',
'jquery.fancybox.min.js',
'bootstrap-datepicker.min.js',
'gwbootstrap-extra.min.js',
]
| gwpy/gwsumm | gwsumm/html/tests/test_static.py | Python | gpl-3.0 | 1,947 |
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create makerlinks application
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE = os.path.join(app.root_path, 'makerlinks-dev.db'),
DEBUG = True,
APP_NAME = "MakerLinks",
SECRET_KEY = 'development key',
USERNAME = 'admin',
PASSWORD = 'default'
))
app.config.from_envvar('MAKERLINKS_SETTINGS', silent = True)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_links():
db = get_db()
cur = db.execute('select link, submitter from links order by id desc')
links = cur.fetchall()
return render_template('show_links.html', links=links)
@app.route('/add', methods=['POST'])
def add_link():
db = get_db()
db.execute('insert into links (link, submitter) values (?, ?)',
[request.form['link'], request.form['submitter']])
db.commit()
flash('New link was successfully posted')
return redirect(url_for('show_links'))
if __name__ == "__main__":
if not os.path.exists(app.config['DATABASE']):
init_db()
app.run()
| MakerLabPC/makerlinks | makerlinks.py | Python | gpl-3.0 | 1,532 |
# coding=utf-8
import unittest
"""996. Number of Squareful Arrays
https://leetcode.com/problems/number-of-squareful-arrays/description/
Given an array `A` of non-negative integers, the array is _squareful_ if for
every pair of adjacent elements, their sum is a perfect square.
Return the number of permutations of A that are squareful. Two permutations
`A1` and `A2` differ if and only if there is some index `i` such that `A1[i]
!= A2[i]`.
**Example 1:**
**Input:** [1,17,8]
**Output:** 2
**Explanation:**
[1,8,17] and [17,8,1] are the valid permutations.
**Example 2:**
**Input:** [2,2,2]
**Output:** 1
**Note:**
1. `1 <= A.length <= 12`
2. `0 <= A[i] <= 1e9`
Similar Questions:
"""
class Solution(object):
def numSquarefulPerms(self, A):
"""
:type A: List[int]
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc996-number-of-squareful-arrays.py | Python | gpl-3.0 | 993 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
#pylint: disable=no-init
from __future__ import (absolute_import, division, print_function)
from mantid import config
import os
import systemtesting
import glob
EXPECTED_EXT = '.expected'
class ValidateInstrumentDefinitionFiles(systemtesting.MantidSystemTest):
xsdFile=''
# Explicitly specify single file to test. If None, test all.
theFileToTest=None #"MARI_Definition.xml"
def skipTests(self):
try:
from genxmlif import GenXmlIfError # noqa
from minixsv import pyxsval # noqa
except ImportError:
return True
return False
def __getDataFileList__(self):
# get a list of directories to look in
direc = config['instrumentDefinition.directory']
print("Looking for instrument definition files in: %s" % direc)
cwd = os.getcwd()
os.chdir(direc)
myFiles = glob.glob("*Definition*.xml")
os.chdir(cwd)
files = []
for filename in myFiles:
files.append(os.path.join(direc, filename))
return files
def runTest(self):
"""Main entry point for the test suite"""
from minixsv import pyxsval
# need to extend minixsv library to add method for that forces it to
# validate against local schema when the xml file itself has
# reference to schema online. The preference is to systemtest against
# a local schema file to avoid this systemtest failing is
# external url temporariliy not available. Secondary it also avoid
# having to worry about proxies.
#pylint: disable=too-few-public-methods
class MyXsValidator(pyxsval.XsValidator):
########################################
# force validation of XML input against local file
#
def validateXmlInputForceReadFile (self, xmlInputFile, inputTreeWrapper, xsdFile):
xsdTreeWrapper = self.parse (xsdFile)
xsdTreeWrapperList = []
xsdTreeWrapperList.append(xsdTreeWrapper)
self._validateXmlInput (xmlInputFile, inputTreeWrapper, xsdTreeWrapperList)
for xsdTreeWrapper in xsdTreeWrapperList:
xsdTreeWrapper.unlink()
return inputTreeWrapper
def parseAndValidateXmlInputForceReadFile(inputFile, xsdFile=None, **kw):
myXsValidator = MyXsValidator(**kw)
# parse XML input file
inputTreeWrapper = myXsValidator.parse (inputFile)
# validate XML input file
return myXsValidator.validateXmlInputForceReadFile (inputFile, inputTreeWrapper, xsdFile)
direc = config['instrumentDefinition.directory']
self.xsdFile = os.path.join(direc,'Schema/IDF/1.0/','IDFSchema.xsd')
if self.theFileToTest is None:
files = self.__getDataFileList__()
else:
files = [os.path.join(direc,self.theFileToTest)]
# run the tests
failed = []
for filename in files:
try:
print("----------------------------------------")
print("Validating '%s'" % filename)
parseAndValidateXmlInputForceReadFile(filename, xsdFile=self.xsdFile)
except Exception as e:
print("VALIDATION OF '%s' FAILED WITH ERROR:" % filename)
print(e)
failed.append(filename)
# final say on whether or not it 'worked'
print("----------------------------------------")
if len(failed) != 0:
print("SUMMARY OF FAILED FILES")
for filename in failed:
print(filename)
raise RuntimeError("Failed Validation for %d of %d files"
% (len(failed), len(files)))
else:
print("Successfully Validated %d files" % len(files))
if __name__ == '__main__':
valid = ValidateInstrumentDefinitionFiles()
# validate specific file
#valid.theFileToTest = "MARI_Definition.xml"
valid.runTest()
| mganeva/mantid | Testing/SystemTests/tests/analysis/ValidateInstrumentDefinitionFiles.py | Python | gpl-3.0 | 4,371 |
#!/usr/bin/python2
'''
This example shows how to use the os chdir command.
Note that an exception is thrown if a directory name which cannot be
changed to is supplied (bad name, access problem and more).
'''
import os # for chdir, system
# a wrong directory on purpose...
try:
os.chdir('/tmpi')
except:
print('yes, got an exception for a bad directory')
os.chdir('/tmp')
os.system('ls')
| nonZero/demos-python | src/examples/short/systems_programming/os_chdir.py | Python | gpl-3.0 | 399 |
from datetime import datetime
import discord
from discord.ext import commands
import inspect
import logging
import os
import re
import textwrap
import time
from .mod import CaseMessageNotFound, NoModLogAccess
from .utils import checks
from .utils.chat_formatting import pagify, box, warning, error, info, bold
from .utils.dataIO import dataIO
__version__ = '2.0.3'
try:
import tabulate
except Exception as e:
raise RuntimeError("You must run `pip3 install tabulate`.") from e
log = logging.getLogger('red.punish')
ACTION_STR = "Timed mute \N{HOURGLASS WITH FLOWING SAND} \N{SPEAKER WITH CANCELLATION STROKE}"
PURGE_MESSAGES = 1 # for cpunish
PATH = 'data/punish/'
JSON = PATH + 'settings.json'
DEFAULT_ROLE_NAME = 'Punished'
DEFAULT_TEXT_OVERWRITE = discord.PermissionOverwrite(send_messages=False, send_tts_messages=False, add_reactions=False)
DEFAULT_VOICE_OVERWRITE = discord.PermissionOverwrite(speak=False)
DEFAULT_TIMEOUT_OVERWRITE = discord.PermissionOverwrite(send_messages=True, read_messages=True)
DEFAULT_TIMEOUT = '30m'
DEFAULT_CASE_MIN_LENGTH = '30m' # only create modlog cases when length is longer than this
UNIT_TABLE = (
(('weeks', 'wks', 'w'), 60 * 60 * 24 * 7),
(('days', 'dys', 'd'), 60 * 60 * 24),
(('hours', 'hrs', 'h'), 60 * 60),
(('minutes', 'mins', 'm'), 60),
(('seconds', 'secs', 's'), 1),
)
class BadTimeExpr(Exception):
pass
def _find_unit(unit):
for names, length in UNIT_TABLE:
if any(n.startswith(unit) for n in names):
return names, length
raise BadTimeExpr("Invalid unit: %s" % unit)
def _parse_time(time):
time = time.lower()
if not time.isdigit():
time = re.split(r'\s*([\d.]+\s*[^\d\s,;]*)(?:[,;\s]|and)*', time)
time = sum(map(_timespec_sec, filter(None, time)))
return int(time)
def _timespec_sec(expr):
atoms = re.split(r'([\d.]+)\s*([^\d\s]*)', expr)
atoms = list(filter(None, atoms))
if len(atoms) > 2: # This shouldn't ever happen
raise BadTimeExpr("invalid expression: '%s'" % expr)
elif len(atoms) == 2:
names, length = _find_unit(atoms[1])
if atoms[0].count('.') > 1 or \
not atoms[0].replace('.', '').isdigit():
raise BadTimeExpr("Not a number: '%s'" % atoms[0])
else:
names, length = _find_unit('seconds')
try:
return float(atoms[0]) * length
except ValueError:
raise BadTimeExpr("invalid value: '%s'" % atoms[0])
def _generate_timespec(sec, short=False, micro=False):
timespec = []
for names, length in UNIT_TABLE:
n, sec = divmod(sec, length)
if n:
if micro:
s = '%d%s' % (n, names[2])
elif short:
s = '%d%s' % (n, names[1])
else:
s = '%d %s' % (n, names[0])
if n <= 1:
s = s.rstrip('s')
timespec.append(s)
if len(timespec) > 1:
if micro:
return ''.join(timespec)
segments = timespec[:-1], timespec[-1:]
return ' and '.join(', '.join(x) for x in segments)
return timespec[0]
def format_list(*items, join='and', delim=', '):
if len(items) > 1:
return (' %s ' % join).join((delim.join(items[:-1]), items[-1]))
elif items:
return items[0]
else:
return ''
def permissions_for_roles(channel, *roles):
"""
Calculates the effective permissions for a role or combination of roles.
Naturally, if no roles are given, the default role's permissions are used
"""
default = channel.server.default_role
base = discord.Permissions(default.permissions.value)
# Apply all role values
for role in roles:
base.value |= role.permissions.value
# Server-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return discord.Permissions.all()
role_ids = set(map(lambda r: r.id, roles))
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in channel._permission_overwrites:
# Handle default role first, if present
if overwrite.id == default.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
if overwrite.type == 'role' and overwrite.id in role_ids:
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# default channels can always be read
if channel.is_default:
base.read_messages = True
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = discord.Permissions.all_channel()
base.value &= ~denied.value
# text channels do not have voice related permissions
if channel.type is discord.ChannelType.text:
denied = discord.Permissions.voice()
base.value &= ~denied.value
return base
def overwrite_from_dict(data):
allow = discord.Permissions(data.get('allow', 0))
deny = discord.Permissions(data.get('deny', 0))
return discord.PermissionOverwrite.from_pair(allow, deny)
def overwrite_to_dict(overwrite):
allow, deny = overwrite.pair()
return {
'allow' : allow.value,
'deny' : deny.value
}
def format_permissions(permissions, include_null=False):
entries = []
for perm, value in sorted(permissions, key=lambda t: t[0]):
if value is True:
symbol = "\N{WHITE HEAVY CHECK MARK}"
elif value is False:
symbol = "\N{NO ENTRY SIGN}"
elif include_null:
symbol = "\N{RADIO BUTTON}"
else:
continue
entries.append(symbol + ' ' + perm.replace('_', ' ').title().replace("Tts", "TTS"))
if entries:
return '\n'.join(entries)
else:
return "No permission entries."
def getmname(mid, server):
member = discord.utils.get(server.members, id=mid)
if member:
return str(member)
else:
return '(absent user #%s)' % mid
class Punish:
"""
Put misbehaving users in timeout where they are unable to speak, read, or
do other things that can be denied using discord permissions. Includes
auto-setup and more.
"""
def __init__(self, bot):
self.bot = bot
self.json = compat_load(JSON)
self.handles = {}
try:
self.analytics = CogAnalytics(self)
except Exception as error:
self.bot.logger.exception(error)
self.analytics = None
self.task = bot.loop.create_task(self.on_load())
def __unload(self):
self.task.cancel()
self.save()
def save(self):
dataIO.save_json(JSON, self.json)
def can_create_cases(self):
mod = self.bot.get_cog('Mod')
if not mod:
return False
sig = inspect.signature(mod.new_case)
return 'force_create' in sig.parameters
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def punish(self, ctx, user: discord.Member, duration: str = None, *, reason: str = None):
if ctx.invoked_subcommand:
return
elif user:
await ctx.invoke(self.punish_start, user=user, duration=duration, reason=reason)
else:
await self.bot.send_cmd_help(ctx)
@punish.command(pass_context=True, no_pm=True, name='start')
@checks.mod_or_permissions(manage_messages=True)
async def punish_start(self, ctx, user: discord.Member, duration: str = None, *, reason: str = None):
"""
Puts a user into timeout for a specified time, with optional reason.
Time specification is any combination of number with the units s,m,h,d,w.
Example: !punish @idiot 1.1h10m Enough bitching already!
"""
await self._punish_cmd_common(ctx, user, duration, reason)
@punish.command(pass_context=True, no_pm=True, name='cstart')
@checks.mod_or_permissions(manage_messages=True)
async def punish_cstart(self, ctx, user: discord.Member, duration: str = None, *, reason: str = None):
"""
Same as [p]punish start, but cleans up the target's last message.
"""
success = await self._punish_cmd_common(ctx, user, duration, reason, quiet=True)
if not success:
return
def check(m):
return m.id == ctx.message.id or m.author == user
try:
await self.bot.purge_from(ctx.message.channel, limit=PURGE_MESSAGES + 1, check=check)
except discord.errors.Forbidden:
await self.bot.say("Punishment set, but I need permissions to manage messages to clean up.")
@punish.command(pass_context=True, no_pm=True, name='list')
@checks.mod_or_permissions(manage_messages=True)
async def punish_list(self, ctx):
"""
Shows a table of punished users with time, mod and reason.
Displays punished users, time remaining, responsible moderator and
the reason for punishment, if any.
"""
server = ctx.message.server
server_id = server.id
table = []
now = time.time()
headers = ['Member', 'Remaining', 'Moderator', 'Reason']
msg = ''
# Multiline cell/header support was added in 0.8.0
if tabulate.__version__ >= '0.8.0':
headers = [';\n'.join(headers[i::2]) for i in (0, 1)]
else:
msg += warning('Compact formatting is only supported with tabulate v0.8.0+ (currently v%s). '
'Please update it.\n\n' % tabulate.__version__)
for member_id, data in self.json.get(server_id, {}).items():
if not member_id.isdigit():
continue
member_name = getmname(member_id, server)
moderator = getmname(data['by'], server)
reason = data['reason']
until = data['until']
sort = until or float("inf")
remaining = _generate_timespec(until - now, short=True) if until else 'forever'
row = [member_name, remaining, moderator, reason or 'No reason set.']
if tabulate.__version__ >= '0.8.0':
row[-1] = textwrap.fill(row[-1], 35)
row = [';\n'.join(row[i::2]) for i in (0, 1)]
table.append((sort, row))
if not table:
await self.bot.say("No users are currently punished.")
return
table.sort()
msg += tabulate.tabulate([k[1] for k in table], headers, tablefmt="grid")
for page in pagify(msg):
await self.bot.say(box(page))
@punish.command(pass_context=True, no_pm=True, name='clean')
@checks.mod_or_permissions(manage_messages=True)
async def punish_clean(self, ctx, clean_pending: bool = False):
"""
Removes absent members from the punished list.
If run without an argument, it only removes members who are no longer
present but whose timer has expired. If the argument is 'yes', 1,
or another trueish value, it will also remove absent members whose
timers have yet to expire.
Use this option with care, as removing them will prevent the punished
role from being re-added if they rejoin before their timer expires.
"""
count = 0
now = time.time()
server = ctx.message.server
data = self.json.get(server.id, {})
for mid, mdata in data.copy().items():
if not mid.isdigit() or server.get_member(mid):
continue
elif clean_pending or ((mdata['until'] or 0) < now):
del(data[mid])
count += 1
await self.bot.say('Cleaned %i absent members from the list.' % count)
@punish.command(pass_context=True, no_pm=True, name='warn')
@checks.mod_or_permissions(manage_messages=True)
async def punish_warn(self, ctx, user: discord.Member, *, reason: str = None):
"""
Warns a user with boilerplate about the rules
"""
msg = ['Hey %s, ' % user.mention]
msg.append("you're doing something that might get you muted if you keep "
"doing it.")
if reason:
msg.append(" Specifically, %s." % reason)
msg.append("Be sure to review the server rules.")
await self.bot.say(' '.join(msg))
@punish.command(pass_context=True, no_pm=True, name='end', aliases=['remove'])
@checks.mod_or_permissions(manage_messages=True)
async def punish_end(self, ctx, user: discord.Member, *, reason: str = None):
"""
Removes punishment from a user before time has expired
This is the same as removing the role directly.
"""
role = await self.get_role(user.server, quiet=True)
sid = user.server.id
now = time.time()
data = self.json.get(sid, {}).get(user.id, {})
if role and role in user.roles:
msg = 'Punishment manually ended early by %s.' % ctx.message.author
original_start = data.get('start')
original_end = data.get('until')
remaining = original_end and (original_end - now)
if remaining:
msg += ' %s was left' % _generate_timespec(round(remaining))
if original_start:
msg += ' of the original %s.' % _generate_timespec(round(original_end - original_start))
else:
msg += '.'
if reason:
msg += '\n\nReason for ending early: ' + reason
if data.get('reason'):
msg += '\n\nOriginal reason was: ' + data['reason']
await self._unpunish(user, msg, update=True)
await self.bot.say(msg)
elif data: # This shouldn't happen, but just in case
now = time.time()
until = data.get('until')
remaining = until and _generate_timespec(round(until - now)) or 'forever'
data_fmt = '\n'.join([
"**Reason:** %s" % (data.get('reason') or 'no reason set'),
"**Time remaining:** %s" % remaining,
"**Moderator**: %s" % (user.server.get_member(data.get('by')) or 'Missing ID#%s' % data.get('by'))
])
self.json[sid].pop(user.id, None)
self.save()
await self.bot.say("That user doesn't have the %s role, but they still have a data entry. I removed it, "
"but in case it's needed, this is what was there:\n\n%s" % (role.name, data_fmt))
elif role:
await self.bot.say("That user doesn't have the %s role." % role.name)
else:
await self.bot.say("The punish role couldn't be found in this server.")
@punish.command(pass_context=True, no_pm=True, name='reason')
@checks.mod_or_permissions(manage_messages=True)
async def punish_reason(self, ctx, user: discord.Member, *, reason: str = None):
"""
Updates the reason for a punishment, including the modlog if a case exists.
"""
server = ctx.message.server
data = self.json.get(server.id, {}).get(user.id, {})
if not data:
await self.bot.say("That user doesn't have an active punishment entry. To update modlog "
"cases manually, use the `%sreason` command." % ctx.prefix)
return
data['reason'] = reason
self.save()
if reason:
msg = 'Reason updated.'
else:
msg = 'Reason cleared'
caseno = data.get('caseno')
mod = self.bot.get_cog('Mod')
if mod and caseno:
moderator = ctx.message.author
case_error = None
try:
if moderator.id != data.get('by') and not mod.is_admin_or_superior(moderator):
moderator = server.get_member(data.get('by')) or server.me # fallback gracefully
await mod.update_case(server, case=caseno, reason=reason, mod=moderator)
except CaseMessageNotFound:
case_error = 'the case message could not be found'
except NoModLogAccess:
case_error = 'I do not have access to the modlog channel'
except Exception:
pass
if case_error:
msg += '\n\n' + warning('There was an error updating the modlog case: %s.' % case_error)
await self.bot.say(msg)
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def punishset(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@punishset.command(pass_context=True, no_pm=True, name='setup')
async def punishset_setup(self, ctx):
"""
(Re)configures the punish role and channel overrides
"""
server = ctx.message.server
default_name = DEFAULT_ROLE_NAME
role_id = self.json.get(server.id, {}).get('ROLE_ID')
if role_id:
role = discord.utils.get(server.roles, id=role_id)
else:
role = discord.utils.get(server.roles, name=default_name)
perms = server.me.server_permissions
if not perms.manage_roles and perms.manage_channels:
await self.bot.say("I need the Manage Roles and Manage Channels permissions for that command to work.")
return
if not role:
msg = "The %s role doesn't exist; Creating it now... " % default_name
msgobj = await self.bot.say(msg)
perms = discord.Permissions.none()
role = await self.bot.create_role(server, name=default_name, permissions=perms)
else:
msgobj = await self.bot.say('%s role exists... ' % role.name)
if role.position != (server.me.top_role.position - 1):
if role < server.me.top_role:
msgobj = await self.bot.edit_message(msgobj, msgobj.content + 'moving role to higher position... ')
await self.bot.move_role(server, role, server.me.top_role.position - 1)
else:
await self.bot.edit_message(msgobj, msgobj.content + 'role is too high to manage.'
' Please move it to below my highest role.')
return
msgobj = await self.bot.edit_message(msgobj, msgobj.content + '(re)configuring channels... ')
for channel in server.channels:
await self.setup_channel(channel, role)
await self.bot.edit_message(msgobj, msgobj.content + 'done.')
if role and role.id != role_id:
if server.id not in self.json:
self.json[server.id] = {}
self.json[server.id]['ROLE_ID'] = role.id
self.save()
@punishset.command(pass_context=True, no_pm=True, name='channel')
async def punishset_channel(self, ctx, channel: discord.Channel = None):
"""
Sets or shows the punishment "timeout" channel.
This channel has special settings to allow punished users to discuss their
infraction(s) with moderators.
If there is a role deny on the channel for the punish role, it is
automatically set to allow. If the default permissions don't allow the
punished role to see or speak in it, an overwrite is created to allow
them to do so.
"""
server = ctx.message.server
current = self.json.get(server.id, {}).get('CHANNEL_ID')
current = current and server.get_channel(current)
if channel is None:
if not current:
await self.bot.say("No timeout channel has been set.")
else:
await self.bot.say("The timeout channel is currently %s." % current.mention)
else:
if server.id not in self.json:
self.json[server.id] = {}
elif current == channel:
await self.bot.say("The timeout channel is already %s. If you need to repair its permissions, use "
"`%spunishset setup`." % (current.mention, ctx.prefix))
return
self.json[server.id]['CHANNEL_ID'] = channel.id
self.save()
role = await self.get_role(server, create=True)
update_msg = '{} to the %s role' % role
grants = []
denies = []
perms = permissions_for_roles(channel, role)
overwrite = channel.overwrites_for(role) or discord.PermissionOverwrite()
for perm, value in DEFAULT_TIMEOUT_OVERWRITE:
if value is None:
continue
if getattr(perms, perm) != value:
setattr(overwrite, perm, value)
name = perm.replace('_', ' ').title().replace("Tts", "TTS")
if value:
grants.append(name)
else:
denies.append(name)
# Any changes made? Apply them.
if grants or denies:
grants = grants and ('grant ' + format_list(*grants))
denies = denies and ('deny ' + format_list(*denies))
to_join = [x for x in (grants, denies) if x]
update_msg = update_msg.format(format_list(*to_join))
if current and current.id != channel.id:
if current.permissions_for(server.me).manage_roles:
msg = info("Resetting permissions in the old channel (%s) to the default...")
else:
msg = error("I don't have permissions to reset permissions in the old channel (%s)")
await self.bot.say(msg % current.mention)
await self.setup_channel(current, role)
if channel.permissions_for(server.me).manage_roles:
await self.bot.say(info('Updating permissions in %s to %s...' % (channel.mention, update_msg)))
await self.bot.edit_channel_permissions(channel, role, overwrite)
else:
await self.bot.say(error("I don't have permissions to %s." % update_msg))
await self.bot.say("Timeout channel set to %s." % channel.mention)
@punishset.command(pass_context=True, no_pm=True, name='clear-channel')
async def punishset_clear_channel(self, ctx):
"""
Clears the timeout channel and resets its permissions
"""
server = ctx.message.server
current = self.json.get(server.id, {}).get('CHANNEL_ID')
current = current and server.get_channel(current)
if current:
msg = None
self.json[server.id]['CHANNEL_ID'] = None
self.save()
if current.permissions_for(server.me).manage_roles:
role = await self.get_role(server, quiet=True)
await self.setup_channel(current, role)
msg = ' and its permissions reset'
else:
msg = ", but I don't have permissions to reset its permissions."
await self.bot.say("Timeout channel has been cleared%s." % msg)
else:
await self.bot.say("No timeout channel has been set yet.")
@punishset.command(pass_context=True, allow_dm=False, name='case-min')
async def punishset_case_min(self, ctx, *, timespec: str = None):
"""
Set/disable or display the minimum punishment case duration
If the punishment duration is less than this value, a case will not be created.
Specify 'disable' to turn off case creation altogether.
"""
server = ctx.message.server
current = self.json[server.id].get('CASE_MIN_LENGTH', _parse_time(DEFAULT_CASE_MIN_LENGTH))
if not timespec:
if current:
await self.bot.say('Punishments longer than %s will create cases.' % _generate_timespec(current))
else:
await self.bot.say("Punishment case creation is disabled.")
else:
if timespec.strip('\'"').lower() == 'disable':
value = None
else:
try:
value = _parse_time(timespec)
except BadTimeExpr as e:
await self.bot.say(error(e.args[0]))
return
if server.id not in self.json:
self.json[server.id] = {}
self.json[server.id]['CASE_MIN_LENGTH'] = value
self.save()
@punishset.command(pass_context=True, no_pm=True, name='overrides')
async def punishset_overrides(self, ctx, *, channel: discord.Channel = None):
"""
Copy or display the punish role overrides
If a channel is specified, the allow/deny settings for it are saved
and applied to new channels when they are created. To apply the new
settings to existing channels, use [p]punishset setup.
An important caveat: voice channel and text channel overrides are
configured separately! To set the overrides for a channel type,
specify the name of or mention a channel of that type.
"""
server = ctx.message.server
settings = self.json.get(server.id, {})
role = await self.get_role(server, quiet=True)
timeout_channel_id = settings.get('CHANNEL_ID')
confirm_msg = None
if not role:
await self.bot.say(error("Punish role has not been created yet. Run `%spunishset setup` first."
% ctx.prefix))
return
if channel:
overwrite = channel.overwrites_for(role)
if channel.id == timeout_channel_id:
confirm_msg = "Are you sure you want to copy overrides from the timeout channel?"
elif overwrite is None:
overwrite = discord.PermissionOverwrite()
confirm_msg = "Are you sure you want to copy blank (no permissions set) overrides?"
if channel.type is discord.ChannelType.text:
key = 'text'
elif channel.type is discord.ChannelType.voice:
key = 'voice'
else:
await self.bot.say(error("Unknown channel type!"))
return
if confirm_msg:
await self.bot.say(warning(confirm_msg + '(reply `yes` within 30s to confirm)'))
reply = await self.bot.wait_for_message(channel=ctx.message.channel, author=ctx.message.author,
timeout=30)
if reply is None:
await self.bot.say('Timed out waiting for a response.')
return
elif reply.content.strip(' `"\'').lower() != 'yes':
await self.bot.say('Commmand cancelled.')
return
self.json[server.id][key.upper() + '_OVERWRITE'] = overwrite_to_dict(overwrite)
self.save()
await self.bot.say("{} channel overrides set to:\n".format(key.title()) +
format_permissions(overwrite) +
"\n\nRun `%spunishset setup` to apply them to all channels." % ctx.prefix)
else:
msg = []
for key, default in [('text', DEFAULT_TEXT_OVERWRITE), ('voice', DEFAULT_VOICE_OVERWRITE)]:
data = settings.get(key.upper() + '_OVERWRITE')
title = '%s permission overrides:' % key.title()
if not data:
data = overwrite_to_dict(default)
title = title[:-1] + ' (defaults):'
msg.append(bold(title) + '\n' + format_permissions(overwrite_from_dict(data)))
await self.bot.say('\n\n'.join(msg))
@punishset.command(pass_context=True, no_pm=True, name='reset-overrides')
async def punishset_reset_overrides(self, ctx, channel_type: str = 'both'):
"""
Resets the punish role overrides for text, voice or both (default)
This command exists in case you want to restore the default settings
for newly created channels.
"""
settings = self.json.get(ctx.message.server.id, {})
channel_type = channel_type.strip('`"\' ').lower()
msg = []
for key, default in [('text', DEFAULT_TEXT_OVERWRITE), ('voice', DEFAULT_VOICE_OVERWRITE)]:
if channel_type not in ['both', key]:
continue
settings.pop(key.upper() + '_OVERWRITE', None)
title = '%s permission overrides reset to:' % key.title()
msg.append(bold(title) + '\n' + format_permissions(default))
if not msg:
await self.bot.say("Invalid channel type. Use `text`, `voice`, or `both` (the default, if not specified)")
return
msg.append("Run `%spunishset setup` to apply them to all channels." % ctx.prefix)
self.save()
await self.bot.say('\n\n'.join(msg))
async def get_role(self, server, quiet=False, create=False):
default_name = DEFAULT_ROLE_NAME
role_id = self.json.get(server.id, {}).get('ROLE_ID')
if role_id:
role = discord.utils.get(server.roles, id=role_id)
else:
role = discord.utils.get(server.roles, name=default_name)
if create and not role:
perms = server.me.server_permissions
if not perms.manage_roles and perms.manage_channels:
await self.bot.say("The Manage Roles and Manage Channels permissions are required to use this command.")
return
else:
msg = "The %s role doesn't exist; Creating it now..." % default_name
if not quiet:
msgobj = await self.bot.reply(msg)
log.debug('Creating punish role in %s' % server.name)
perms = discord.Permissions.none()
role = await self.bot.create_role(server, name=default_name, permissions=perms)
await self.bot.move_role(server, role, server.me.top_role.position - 1)
if not quiet:
msgobj = await self.bot.edit_message(msgobj, msgobj.content + 'configuring channels... ')
for channel in server.channels:
await self.setup_channel(channel, role)
if not quiet:
await self.bot.edit_message(msgobj, msgobj.content + 'done.')
if role and role.id != role_id:
if server.id not in self.json:
self.json[server.id] = {}
self.json[server.id]['ROLE_ID'] = role.id
self.save()
return role
# Legacy command stubs
@commands.command(pass_context=True, no_pm=True)
async def legacy_lspunish(self, ctx):
await self.bot.say("This command is deprecated; use `%spunish list` instead.\n\n"
"This notice will be removed in a future release." % ctx.prefix)
@commands.command(pass_context=True, no_pm=True)
async def legacy_cpunish(self, ctx):
await self.bot.say("This command is deprecated; use `%spunish cstart <member> [duration] [reason ...]` "
"instead.\n\nThis notice will be removed in a future release." % ctx.prefix)
@commands.command(pass_context=True, no_pm=True, name='punish-clean')
async def legacy_punish_clean(self, ctx):
await self.bot.say("This command is deprecated; use `%spunish clean` instead.\n\n"
"This notice will be removed in a future release." % ctx.prefix)
@commands.command(pass_context=True, no_pm=True)
async def legacy_pwarn(self, ctx):
await self.bot.say("This command is deprecated; use `%spunish warn` instead.\n\n"
"This notice will be removed in a future release." % ctx.prefix)
@commands.command(pass_context=True, no_pm=True)
async def legacy_fixpunish(self, ctx):
await self.bot.say("This command is deprecated; use `%spunishset setup` instead.\n\n"
"This notice will be removed in a future release." % ctx.prefix)
async def setup_channel(self, channel, role):
settings = self.json.get(channel.server.id, {})
timeout_channel_id = settings.get('CHANNEL_ID')
if channel.id == timeout_channel_id:
# maybe this will be used later:
# config = settings.get('TIMEOUT_OVERWRITE')
config = None
defaults = DEFAULT_TIMEOUT_OVERWRITE
elif channel.type is discord.ChannelType.voice:
config = settings.get('VOICE_OVERWRITE')
defaults = DEFAULT_VOICE_OVERWRITE
else:
config = settings.get('TEXT_OVERWRITE')
defaults = DEFAULT_TEXT_OVERWRITE
if config:
perms = overwrite_from_dict(config)
else:
perms = defaults
await self.bot.edit_channel_permissions(channel, role, overwrite=perms)
async def on_load(self):
await self.bot.wait_until_ready()
for serverid, members in self.json.copy().items():
server = self.bot.get_server(serverid)
# Bot is no longer in the server
if not server:
del(self.json[serverid])
continue
me = server.me
role = await self.get_role(server, quiet=True, create=True)
if not role:
log.error("Needed to create punish role in %s, but couldn't."
% server.name)
continue
for member_id, data in members.copy().items():
if not member_id.isdigit():
continue
until = data['until']
if until:
duration = until - time.time()
member = server.get_member(member_id)
if until and duration < 0:
if member:
reason = 'Punishment removal overdue, maybe bot was offline. '
if self.json[server.id][member_id]['reason']:
reason += self.json[server.id][member_id]['reason']
await self._unpunish(member, reason)
else: # member disappeared
del(self.json[server.id][member_id])
elif member and role not in member.roles:
if role >= me.top_role:
log.error("Needed to re-add punish role to %s in %s, "
"but couldn't." % (member, server.name))
continue
await self.bot.add_roles(member, role)
if until:
self.schedule_unpunish(duration, member)
self.save()
async def _punish_cmd_common(self, ctx, member, duration, reason, quiet=False):
server = ctx.message.server
using_default = False
updating_case = False
case_error = None
mod = self.bot.get_cog('Mod')
if server.id not in self.json:
self.json[server.id] = {}
current = self.json[server.id].get(member.id, {})
reason = reason or current.get('reason') # don't clear if not given
hierarchy_allowed = ctx.message.author.top_role > member.top_role
case_min_length = self.json[server.id].get('CASE_MIN_LENGTH', _parse_time(DEFAULT_CASE_MIN_LENGTH))
if mod:
hierarchy_allowed = mod.is_allowed_by_hierarchy(server, ctx.message.author, member)
if not hierarchy_allowed:
await self.bot.say('Permission denied due to role hierarchy.')
return
if duration and duration.lower() in ['forever', 'inf', 'infinite']:
duration = None
else:
if not duration:
using_default = True
duration = DEFAULT_TIMEOUT
try:
duration = _parse_time(duration)
if duration < 1:
await self.bot.say("Duration must be 1 second or longer.")
return False
except BadTimeExpr as e:
await self.bot.say("Error parsing duration: %s." % e.args)
return False
role = await self.get_role(server, quiet=quiet, create=True)
if role is None:
return
if role >= server.me.top_role:
await self.bot.say('The %s role is too high for me to manage.' % role)
return
# Call time() after getting the role due to potential creation delay
now = time.time()
until = (now + duration + 0.5) if duration else None
if mod and (case_min_length is not None) and self.can_create_cases() and ((duration is None)
or duration >= case_min_length):
mod_until = until and datetime.utcfromtimestamp(until)
try:
if current:
case_number = current.get('caseno')
moderator = ctx.message.author
updating_case = True
# update_case does ownership checks, we need to cheat them in case the
# command author doesn't qualify to edit a case
if moderator.id != current.get('by') and not mod.is_admin_or_superior(moderator):
moderator = server.get_member(current.get('by')) or server.me # fallback gracefully
await mod.update_case(server, case=case_number, reason=reason, mod=moderator,
until=mod_until and mod_until.timestamp() or False)
else:
case_number = await mod.new_case(server, action=ACTION_STR, mod=ctx.message.author,
user=member, reason=reason, until=mod_until,
force_create=True)
except Exception as e:
case_error = e
else:
case_number = None
subject = 'the %s role' % role.name
if member.id in self.json[server.id]:
if role in member.roles:
msg = '{0} already had the {1.name} role; resetting their timer.'
else:
msg = '{0} is missing the {1.name} role for some reason. I added it and reset their timer.'
elif role in member.roles:
msg = '{0} already had the {1.name} role, but had no timer; setting it now.'
else:
msg = 'Applied the {1.name} role to {0}.'
subject = 'it'
msg = msg.format(member, role)
if duration:
timespec = _generate_timespec(duration)
if using_default:
timespec += ' (the default)'
msg += ' I will remove %s in %s.' % (subject, timespec)
if (case_min_length is not None) and not self.can_create_cases() and ((duration is None)
or duration >= case_min_length):
if mod:
msg += '\n\n' + warning('If you can, please update the bot so I can create modlog cases.')
else:
pass # msg += '\n\nI cannot create modlog cases if the `mod` cog is not loaded.'
elif case_error:
if isinstance(case_error, CaseMessageNotFound):
case_error = 'the case message could not be found'
elif isinstance(case_error, NoModLogAccess):
case_error = 'I do not have access to the modlog channel'
else:
case_error = None
if case_error:
verb = 'updating' if updating_case else 'creating'
msg += '\n\n' + warning('There was an error %s the modlog case: %s.' % (verb, case_error))
elif case_number:
verb = 'updated' if updating_case else 'created'
msg += ' I also %s case #%i in the modlog.' % (verb, case_number)
voice_overwrite = self.json[server.id].get('VOICE_OVERWRITE')
if voice_overwrite:
voice_overwrite = overwrite_from_dict(voice_overwrite)
else:
voice_overwrite = DEFAULT_VOICE_OVERWRITE
overwrite_denies_speak = (voice_overwrite.speak is False) or (voice_overwrite.connect is False)
self.json[server.id][member.id] = {
'start' : current.get('start') or now, # don't override start time if updating
'until' : until,
'by' : current.get('by') or ctx.message.author.id, # don't override original moderator
'reason' : reason,
'unmute' : overwrite_denies_speak and not member.voice.mute,
'caseno' : case_number
}
await self.bot.add_roles(member, role)
if member.voice_channel and overwrite_denies_speak:
await self.bot.server_voice_state(member, mute=True)
self.save()
# schedule callback for role removal
if duration:
self.schedule_unpunish(duration, member, reason)
if not quiet:
await self.bot.say(msg)
return True
# Functions related to unpunishing
def _create_unpunish_task(self, member, reason):
return self.bot.loop.create_task(self._unpunish(member, reason))
def schedule_unpunish(self, delay, member, reason=None):
"""
Schedules role removal, canceling and removing existing tasks if present
"""
sid = member.server.id
if sid not in self.handles:
self.handles[sid] = {}
if member.id in self.handles[sid]:
self.handles[sid][member.id].cancel()
handle = self.bot.loop.call_later(delay, self._create_unpunish_task, member, reason)
self.handles[sid][member.id] = handle
async def _unpunish(self, member, reason=None, remove_role=True, update=False, moderator=None):
"""
Remove punish role, delete record and task handle
"""
server = member.server
role = await self.get_role(server, quiet=True)
if role:
data = self.json.get(member.server.id, {})
member_data = data.get(member.id, {})
caseno = member_data.get('caseno')
mod = self.bot.get_cog('Mod')
# Has to be done first to prevent triggering listeners
self._unpunish_data(member)
if remove_role:
await self.bot.remove_roles(member, role)
if update and caseno and mod:
until = member_data.get('until') or False
if until:
until = datetime.utcfromtimestamp(until).timestamp()
if moderator and moderator.id != member_data.get('by') and not mod.is_admin_or_superior(moderator):
moderator = None
# fallback gracefully
moderator = moderator or server.get_member(member_data.get('by')) or server.me
try:
await mod.update_case(server, case=caseno, reason=reason, mod=moderator, until=until)
except Exception:
pass
if member_data.get('unmute', False):
if member.voice_channel:
await self.bot.server_voice_state(member, mute=False)
else:
if 'PENDING_UNMUTE' not in data:
data['PENDING_UNMUTE'] = []
unmute_list = data['PENDING_UNMUTE']
if member.id not in unmute_list:
unmute_list.append(member.id)
self.save()
msg = 'Your punishment in %s has ended.' % member.server.name
if reason:
msg += "\nReason: %s" % reason
await self.bot.send_message(member, msg)
return member_data
def _unpunish_data(self, member):
"""Removes punish data entry and cancels any present callback"""
sid = member.server.id
if member.id in self.json.get(sid, {}):
del(self.json[member.server.id][member.id])
self.save()
if sid in self.handles and member.id in self.handles[sid]:
self.handles[sid][member.id].cancel()
del(self.handles[member.server.id][member.id])
# Listeners
async def on_channel_create(self, channel):
"""Run when new channels are created and set up role permissions"""
if channel.is_private:
return
role = await self.get_role(channel.server, quiet=True)
if not role:
return
await self.setup_channel(channel, role)
async def on_member_update(self, before, after):
"""Remove scheduled unpunish when manually removed"""
sid = before.server.id
data = self.json.get(sid, {})
member_data = data.get(before.id)
if member_data is None:
return
role = await self.get_role(before.server, quiet=True)
if role and role in before.roles and role not in after.roles:
msg = 'Punishment manually ended early by a moderator/admin.'
if member_data['reason']:
msg += '\nReason was: ' + member_data['reason']
await self._unpunish(after, msg, remove_role=False, update=True)
async def on_member_join(self, member):
"""Restore punishment if punished user leaves/rejoins"""
sid = member.server.id
role = await self.get_role(member.server, quiet=True)
data = self.json.get(sid, {}).get(member.id)
if not role or data is None:
return
duration = data['until'] - time.time()
if duration > 0:
await self.bot.add_roles(member, role)
reason = 'Punishment re-added on rejoin. '
if data['reason']:
reason += data['reason']
if member.id not in self.handles[sid]:
self.schedule_unpunish(duration, member, reason)
async def on_voice_state_update(self, before, after):
data = self.json.get(before.server.id, {})
member_data = data.get(before.id, {})
unmute_list = data.get('PENDING_UNMUTE', [])
if not after.voice_channel:
return
if member_data and not after.voice.mute:
await self.bot.server_voice_state(after, mute=True)
elif before.id in unmute_list:
await self.bot.server_voice_state(after, mute=False)
while before.id in unmute_list:
unmute_list.remove(before.id)
self.save()
async def on_command(self, command, ctx):
if ctx.cog is self and self.analytics:
self.analytics.command(ctx)
def compat_load(path):
data = dataIO.load_json(path)
for server, punishments in data.items():
for user, pdata in punishments.items():
if not user.isdigit():
continue
# read Kownlin json
by = pdata.pop('givenby', None)
by = by if by else pdata.pop('by', None)
pdata['by'] = by
pdata['until'] = pdata.pop('until', None)
pdata['reason'] = pdata.pop('reason', None)
return data
def check_folder():
if not os.path.exists(PATH):
log.debug('Creating folder: data/punish')
os.makedirs(PATH)
def check_file():
if not dataIO.is_valid_json(JSON):
print('Creating empty %s' % JSON)
dataIO.save_json(JSON, {})
def setup(bot):
check_folder()
check_file()
bot.add_cog(Punish(bot))
| Injabie3/Red-DiscordBot | cogs/punish.py | Python | gpl-3.0 | 48,408 |
#!/usr/bin/env python
# ROS node for the Neato Robot Vacuum
# Copyright (c) 2010 University at Albany. All right reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University at Albany nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ROS node for Neato XV-11 Robot Vacuum.
"""
__author__ = "ferguson@cs.albany.edu (Michael Ferguson)"
import roslib; roslib.load_manifest("neato_node")
import rospy
from math import sin,cos
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf.broadcaster import TransformBroadcaster
from neato_node.msg import NeatoDropSensor
from neato_driver import xv11, BASE_WIDTH, MAX_SPEED
class NeatoNode:
def __init__(self):
""" Start up connection to the Neato Robot. """
rospy.init_node('neato')
self.port = rospy.get_param('~port', "/dev/ttyACM0")
rospy.loginfo("Using port: %s"%(self.port))
self.robot = xv11(self.port)
rospy.Subscriber("cmd_vel", Twist, self.cmdVelCb)
self.scanPub = rospy.Publisher('base_scan', LaserScan)
self.odomPub = rospy.Publisher('odom',Odometry)
self.dropPub = rospy.Publisher('neato_drop',NeatoDropSensor)
self.odomBroadcaster = TransformBroadcaster()
self.cmd_vel = [0,0]
def spin(self):
encoders = [0,0]
self.x = 0 # position in xy plane
self.y = 0
self.th = 0
then = rospy.Time.now()
# things that don't ever change
scan_link = rospy.get_param('~frame_id','base_laser_link')
scan = LaserScan(header=rospy.Header(frame_id=scan_link))
scan.angle_min = 0
scan.angle_max = 6.26
scan.angle_increment = 0.017437326
scan.range_min = 0.020
scan.range_max = 5.0
odom = Odometry(header=rospy.Header(frame_id="odom"), child_frame_id='base_link')
# main loop of driver
r = rospy.Rate(5)
self.robot.requestScan()
while not rospy.is_shutdown():
# prepare laser scan
scan.header.stamp = rospy.Time.now()
#self.robot.requestScan()
scan.ranges = self.robot.getScanRanges()
# get motor encoder values
left, right = self.robot.getMotors()
# get analog sensors
self.robot.getAnalogSensors()
# get drop sensors
left_drop = self.robot.state["LeftDropInMM"]
right_drop = self.robot.state["RightDropInMM"]
# send updated movement commands
self.robot.setMotors(self.cmd_vel[0], self.cmd_vel[1], max(abs(self.cmd_vel[0]),abs(self.cmd_vel[1])))
# ask for the next scan while we finish processing stuff
self.robot.requestScan()
# now update position information
dt = (scan.header.stamp - then).to_sec()
then = scan.header.stamp
d_left = (left - encoders[0])/1000.0
d_right = (right - encoders[1])/1000.0
encoders = [left, right]
dx = (d_left+d_right)/2
dth = (d_right-d_left)/(BASE_WIDTH/1000.0)
x = cos(dth)*dx
y = -sin(dth)*dx
self.x += cos(self.th)*x - sin(self.th)*y
self.y += sin(self.th)*x + cos(self.th)*y
self.th += dth
# prepare tf from base_link to odom
quaternion = Quaternion()
quaternion.z = sin(self.th/2.0)
quaternion.w = cos(self.th/2.0)
# prepare odometry
odom.header.stamp = rospy.Time.now()
odom.pose.pose.position.x = self.x
odom.pose.pose.position.y = self.y
odom.pose.pose.position.z = 0
odom.pose.pose.orientation = quaternion
odom.twist.twist.linear.x = dx/dt
odom.twist.twist.angular.z = dth/dt
# prepare drop
drop = NeatoDropSensor()
drop.header.stamp = rospy.Time.now()
drop.left = left_drop
drop.right = right_drop
# publish everything
self.odomBroadcaster.sendTransform( (self.x, self.y, 0), (quaternion.x, quaternion.y, quaternion.z, quaternion.w),
then, "base_link", "odom" )
self.scanPub.publish(scan)
self.odomPub.publish(odom)
self.dropPub.publish(drop)
# wait, then do it again
r.sleep()
# shut down
self.robot.setLDS("off")
self.robot.setTestMode("off")
def cmdVelCb(self,req):
x = req.linear.x * 1000
th = req.angular.z * (BASE_WIDTH/2)
k = max(abs(x-th),abs(x+th))
# sending commands higher than max speed will fail
if k > MAX_SPEED:
x = x*MAX_SPEED/k; th = th*MAX_SPEED/k
self.cmd_vel = [ int(x-th) , int(x+th) ]
if __name__ == "__main__":
robot = NeatoNode()
robot.spin()
| rbtying/Companion-Cube | neato_node/nodes/neato.py | Python | gpl-3.0 | 6,453 |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import Mapping
import inspect
import os
import re
import select
import signal
import sys
import threading
import traceback
if sys.version_info < (3,):
from SimpleXMLRPCServer import SimpleXMLRPCServer
from StringIO import StringIO
from xmlrpclib import Binary, ServerProxy
PY2, PY3 = True, False
else:
from io import StringIO
from xmlrpc.client import Binary, ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
PY2, PY3 = False, True
unicode = str
long = int
__all__ = ['RobotRemoteServer', 'stop_remote_server', 'test_remote_server']
__version__ = 'devel'
BINARY = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F]')
NON_ASCII = re.compile('[\x80-\xff]')
class RobotRemoteServer(object):
def __init__(self, library, host='127.0.0.1', port=8270, port_file=None,
allow_stop='DEPRECATED', serve=True, allow_remote_stop=True):
"""Configure and start-up remote server.
:param library: Test library instance or module to host.
:param host: Address to listen. Use ``'0.0.0.0'`` to listen
to all available interfaces.
:param port: Port to listen. Use ``0`` to select a free port
automatically. Can be given as an integer or as
a string.
:param port_file: File to write the port that is used. ``None`` means
no such file is written. Port file is created after
the server is started and removed automatically
after it has stopped.
:param allow_stop: DEPRECATED since version 1.1. Use
``allow_remote_stop`` instead.
:param serve: If ``True``, start the server automatically and
wait for it to be stopped.
:param allow_remote_stop: Allow/disallow stopping the server using
``Stop Remote Server`` keyword and
``stop_remote_server`` XML-RPC method.
"""
self._library = RemoteLibraryFactory(library)
self._server = StoppableXMLRPCServer(host, int(port))
self._register_functions(self._server)
self._port_file = port_file
self._allow_remote_stop = allow_remote_stop \
if allow_stop == 'DEPRECATED' else allow_stop
if serve:
self.serve()
def _register_functions(self, server):
server.register_function(self.get_keyword_names)
server.register_function(self.run_keyword)
server.register_function(self.get_keyword_arguments)
server.register_function(self.get_keyword_documentation)
server.register_function(self.stop_remote_server)
@property
def server_address(self):
"""Server address as a tuple ``(host, port)``."""
return self._server.server_address
@property
def server_port(self):
"""Server port as an integer.
If the initial given port is 0, also this property returns 0 until
the server is activated.
"""
return self._server.server_address[1]
def activate(self):
"""Bind port and activate the server but do not yet start serving.
:return Port number that the server is going to use. This is the
actual port to use, even if the initially given port is 0.
"""
return self._server.activate()
def serve(self, log=True):
"""Start the server and wait for it to be stopped.
:param log: When ``True``, print messages about start and stop to
the console.
Automatically activates the server if it is not activated already.
If this method is executed in the main thread, automatically registers
signals SIGINT, SIGTERM and SIGHUP to stop the server.
Using this method requires using ``serve=False`` when initializing the
server. Using ``serve=True`` is equal to first using ``serve=False``
and then calling this method.
In addition to signals, the server can be stopped with the ``Stop
Remote Server`` keyword and the ``stop_remote_serve`` XML-RPC method,
unless they are disabled when the server is initialized. If this method
is executed in a thread, then it is also possible to stop the server
using the :meth:`stop` method.
"""
self._server.activate()
self._announce_start(log, self._port_file)
with SignalHandler(self.stop):
self._server.serve()
self._announce_stop(log, self._port_file)
def _announce_start(self, log, port_file):
self._log('started', log)
if port_file:
with open(port_file, 'w') as pf:
pf.write(str(self.server_port))
def _announce_stop(self, log, port_file):
self._log('stopped', log)
if port_file and os.path.exists(port_file):
os.remove(port_file)
def _log(self, action, log=True, warn=False):
if log:
address = '%s:%s' % self.server_address
if warn:
print('*WARN*', end=' ')
print('Robot Framework remote server at %s %s.' % (address, action))
def stop(self):
"""Stop server."""
self._server.stop()
# Exposed XML-RPC methods. Should they be moved to own class?
def stop_remote_server(self, log=True):
if not self._allow_remote_stop:
self._log('does not allow stopping', log, warn=True)
return False
self.stop()
return True
def get_keyword_names(self):
return self._library.get_keyword_names() + ['stop_remote_server']
def run_keyword(self, name, args, kwargs=None):
if name == 'stop_remote_server':
return KeywordRunner(self.stop_remote_server).run_keyword(args, kwargs)
return self._library.run_keyword(name, args, kwargs)
def get_keyword_arguments(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_arguments(name)
def get_keyword_documentation(self, name):
if name == 'stop_remote_server':
return ('Stop the remote server unless stopping is disabled.\n\n'
'Return ``True/False`` depending was server stopped or not.')
return self._library.get_keyword_documentation(name)
def get_keyword_tags(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_tags(name)
class StoppableXMLRPCServer(SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, host, port):
SimpleXMLRPCServer.__init__(self, (host, port), logRequests=False,
bind_and_activate=False)
self._activated = False
self._stopper_thread = None
def activate(self):
if not self._activated:
self.server_bind()
self.server_activate()
self._activated = True
return self.server_address[1]
def serve(self):
self.activate()
try:
self.serve_forever()
except select.error:
# Signals seem to cause this error with Python 2.6.
if sys.version_info[:2] > (2, 6):
raise
self.server_close()
if self._stopper_thread:
self._stopper_thread.join()
self._stopper_thread = None
def stop(self):
self._stopper_thread = threading.Thread(target=self.shutdown)
self._stopper_thread.daemon = True
self._stopper_thread.start()
class SignalHandler(object):
def __init__(self, handler):
self._handler = lambda signum, frame: handler()
self._original = {}
def __enter__(self):
for name in 'SIGINT', 'SIGTERM', 'SIGHUP':
if hasattr(signal, name):
try:
orig = signal.signal(getattr(signal, name), self._handler)
except ValueError: # Not in main thread
return
self._original[name] = orig
def __exit__(self, *exc_info):
while self._original:
name, handler = self._original.popitem()
signal.signal(getattr(signal, name), handler)
def RemoteLibraryFactory(library):
if inspect.ismodule(library):
return StaticRemoteLibrary(library)
get_keyword_names = dynamic_method(library, 'get_keyword_names')
if not get_keyword_names:
return StaticRemoteLibrary(library)
run_keyword = dynamic_method(library, 'run_keyword')
if not run_keyword:
return HybridRemoteLibrary(library, get_keyword_names)
return DynamicRemoteLibrary(library, get_keyword_names, run_keyword)
def dynamic_method(library, underscore_name):
tokens = underscore_name.split('_')
camelcase_name = tokens[0] + ''.join(t.title() for t in tokens[1:])
for name in underscore_name, camelcase_name:
method = getattr(library, name, None)
if method and is_function_or_method(method):
return method
return None
def is_function_or_method(item):
return inspect.isfunction(item) or inspect.ismethod(item)
class StaticRemoteLibrary(object):
def __init__(self, library):
self._library = library
self._names, self._robot_name_index = self._get_keyword_names(library)
def _get_keyword_names(self, library):
names = []
robot_name_index = {}
for name, kw in inspect.getmembers(library):
if is_function_or_method(kw):
if getattr(kw, 'robot_name', None):
names.append(kw.robot_name)
robot_name_index[kw.robot_name] = name
elif name[0] != '_':
names.append(name)
return names, robot_name_index
def get_keyword_names(self):
return self._names
def run_keyword(self, name, args, kwargs=None):
kw = self._get_keyword(name)
return KeywordRunner(kw).run_keyword(args, kwargs)
def _get_keyword(self, name):
if name in self._robot_name_index:
name = self._robot_name_index[name]
return getattr(self._library, name)
def get_keyword_arguments(self, name):
if __name__ == '__init__':
return []
kw = self._get_keyword(name)
args, varargs, kwargs, defaults = inspect.getargspec(kw)
if inspect.ismethod(kw):
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
if kwargs:
args.append('**%s' % kwargs)
return args
def get_keyword_documentation(self, name):
if name == '__intro__':
source = self._library
elif name == '__init__':
source = self._get_init(self._library)
else:
source = self._get_keyword(name)
return inspect.getdoc(source) or ''
def _get_init(self, library):
if inspect.ismodule(library):
return None
init = getattr(library, '__init__', None)
return init if self._is_valid_init(init) else None
def _is_valid_init(self, init):
if not init:
return False
# https://bitbucket.org/pypy/pypy/issues/2462/
if 'PyPy' in sys.version:
if PY2:
return init.__func__ is not object.__init__.__func__
return init is not object.__init__
return is_function_or_method(init)
def get_keyword_tags(self, name):
keyword = self._get_keyword(name)
return getattr(keyword, 'robot_tags', [])
class HybridRemoteLibrary(StaticRemoteLibrary):
def __init__(self, library, get_keyword_names):
StaticRemoteLibrary.__init__(self, library)
self.get_keyword_names = get_keyword_names
class DynamicRemoteLibrary(HybridRemoteLibrary):
def __init__(self, library, get_keyword_names, run_keyword):
HybridRemoteLibrary.__init__(self, library, get_keyword_names)
self._run_keyword = run_keyword
self._supports_kwargs = self._get_kwargs_support(run_keyword)
self._get_keyword_arguments \
= dynamic_method(library, 'get_keyword_arguments')
self._get_keyword_documentation \
= dynamic_method(library, 'get_keyword_documentation')
self._get_keyword_tags \
= dynamic_method(library, 'get_keyword_tags')
def _get_kwargs_support(self, run_keyword):
spec = inspect.getargspec(run_keyword)
return len(spec.args) > 3 # self, name, args, kwargs=None
def run_keyword(self, name, args, kwargs=None):
args = [name, args, kwargs] if kwargs else [name, args]
return KeywordRunner(self._run_keyword).run_keyword(args)
def get_keyword_arguments(self, name):
if self._get_keyword_arguments:
return self._get_keyword_arguments(name)
if self._supports_kwargs:
return ['*varargs', '**kwargs']
return ['*varargs']
def get_keyword_documentation(self, name):
if self._get_keyword_documentation:
return self._get_keyword_documentation(name)
return ''
def get_keyword_tags(self, name):
if self._get_keyword_tags:
return self._get_keyword_tags(name)
return []
class KeywordRunner(object):
def __init__(self, keyword):
self._keyword = keyword
def run_keyword(self, args, kwargs=None):
args = self._handle_binary(args)
kwargs = self._handle_binary(kwargs or {})
result = KeywordResult()
with StandardStreamInterceptor() as interceptor:
try:
return_value = self._keyword(*args, **kwargs)
except Exception:
result.set_error(*sys.exc_info())
else:
try:
result.set_return(return_value)
except Exception:
result.set_error(*sys.exc_info()[:2])
else:
result.set_status('PASS')
result.set_output(interceptor.output)
return result.data
def _handle_binary(self, arg):
# No need to compare against other iterables or mappings because we
# only get actual lists and dicts over XML-RPC. Binary cannot be
# a dictionary key either.
if isinstance(arg, list):
return [self._handle_binary(item) for item in arg]
if isinstance(arg, dict):
return dict((key, self._handle_binary(arg[key])) for key in arg)
if isinstance(arg, Binary):
return arg.data
return arg
class StandardStreamInterceptor(object):
def __init__(self):
self.output = ''
self.origout = sys.stdout
self.origerr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
def __enter__(self):
return self
def __exit__(self, *exc_info):
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
close = [sys.stdout, sys.stderr]
sys.stdout = self.origout
sys.stderr = self.origerr
for stream in close:
stream.close()
if stdout and stderr:
if not stderr.startswith(('*TRACE*', '*DEBUG*', '*INFO*', '*HTML*',
'*WARN*', '*ERROR*')):
stderr = '*INFO* %s' % stderr
if not stdout.endswith('\n'):
stdout += '\n'
self.output = stdout + stderr
class KeywordResult(object):
_generic_exceptions = (AssertionError, RuntimeError, Exception)
def __init__(self):
self.data = {'status': 'FAIL'}
def set_error(self, exc_type, exc_value, exc_tb=None):
self.data['error'] = self._get_message(exc_type, exc_value)
if exc_tb:
self.data['traceback'] = self._get_traceback(exc_tb)
continuable = self._get_error_attribute(exc_value, 'CONTINUE')
if continuable:
self.data['continuable'] = continuable
fatal = self._get_error_attribute(exc_value, 'EXIT')
if fatal:
self.data['fatal'] = fatal
def _get_message(self, exc_type, exc_value):
name = exc_type.__name__
message = self._get_message_from_exception(exc_value)
if not message:
return name
if exc_type in self._generic_exceptions \
or getattr(exc_value, 'ROBOT_SUPPRESS_NAME', False):
return message
return '%s: %s' % (name, message)
def _get_message_from_exception(self, value):
# UnicodeError occurs if message contains non-ASCII bytes
try:
msg = unicode(value)
except UnicodeError:
msg = ' '.join(self._str(a, handle_binary=False) for a in value.args)
return self._handle_binary_result(msg)
def _get_traceback(self, exc_tb):
# Latest entry originates from this module so it can be removed
entries = traceback.extract_tb(exc_tb)[1:]
trace = ''.join(traceback.format_list(entries))
return 'Traceback (most recent call last):\n' + trace
def _get_error_attribute(self, exc_value, name):
return bool(getattr(exc_value, 'ROBOT_%s_ON_FAILURE' % name, False))
def set_return(self, value):
value = self._handle_return_value(value)
if value != '':
self.data['return'] = value
def _handle_return_value(self, ret):
if isinstance(ret, (str, unicode, bytes)):
return self._handle_binary_result(ret)
if isinstance(ret, (int, long, float)):
return ret
if isinstance(ret, Mapping):
return dict((self._str(key), self._handle_return_value(value))
for key, value in ret.items())
try:
return [self._handle_return_value(item) for item in ret]
except TypeError:
return self._str(ret)
def _handle_binary_result(self, result):
if not self._contains_binary(result):
return result
if not isinstance(result, bytes):
try:
result = result.encode('ASCII')
except UnicodeError:
raise ValueError("Cannot represent %r as binary." % result)
# With IronPython Binary cannot be sent if it contains "real" bytes.
if sys.platform == 'cli':
result = str(result)
return Binary(result)
def _contains_binary(self, result):
if PY3:
return isinstance(result, bytes) or BINARY.search(result)
return (isinstance(result, bytes) and NON_ASCII.search(result) or
BINARY.search(result))
def _str(self, item, handle_binary=True):
if item is None:
return ''
if not isinstance(item, (str, unicode, bytes)):
item = unicode(item)
if handle_binary:
item = self._handle_binary_result(item)
return item
def set_status(self, status):
self.data['status'] = status
def set_output(self, output):
if output:
self.data['output'] = self._handle_binary_result(output)
def test_remote_server(uri, log=True):
"""Test is remote server running.
:param uri: Server address.
:param log: Log status message or not.
:return ``True`` if server is running, ``False`` otherwise.
"""
logger = print if log else lambda message: None
try:
ServerProxy(uri).get_keyword_names()
except Exception:
logger('No remote server running at %s.' % uri)
return False
logger('Remote server running at %s.' % uri)
return True
def stop_remote_server(uri, log=True):
"""Stop remote server unless server has disabled stopping.
:param uri: Server address.
:param log: Log status message or not.
:return ``True`` if server was stopped or it was not running in
the first place, ``False`` otherwise.
"""
logger = print if log else lambda message: None
if not test_remote_server(uri, log=False):
logger('No remote server running at %s.' % uri)
return True
logger('Stopping remote server at %s.' % uri)
if not ServerProxy(uri).stop_remote_server():
logger('Stopping not allowed!')
return False
return True
if __name__ == '__main__':
def parse_args(script, *args):
actions = {'stop': stop_remote_server, 'test': test_remote_server}
if not (0 < len(args) < 3) or args[0] not in actions:
sys.exit('Usage: %s {test|stop} [uri]' % os.path.basename(script))
uri = args[1] if len(args) == 2 else 'http://127.0.0.1:8270'
if '://' not in uri:
uri = 'http://' + uri
return actions[args[0]], uri
action, uri = parse_args(*sys.argv)
success = action(uri)
sys.exit(0 if success else 1)
| priyesingh/rijenpy | libs/robotremoteserver.py | Python | gpl-3.0 | 21,857 |
#!/usr/bin/python3
#
# Copyright (C) 2019 by Compassion International. All rights reserved.
# License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
# This is free software: you are free to change and redistribute it.
# There is NO WARRANTY, to the extent permitted by law.
"""
ETL North Carolina voter registration files
1. Read state-wide tab-delimited file
2. Narrow down the columns
3. Output all counties to a single CSV file
Source data available here:
https://www.ncsbe.gov/data-stats/other-election-related-data
http://dl.ncsbe.gov/data/ncvoter_Statewide.zip
"""
import glob
import sys
import pandas as pd
def groupby(df, col):
gb = df.groupby(col)[[col]].count()
print(gb)
def go():
"""The main loop"""
if not len(sys.argv) == 3:
print('As arguments pass (1) the state-wide NC voter registration file and (2) the output .csv filename.')
print('Example: python3 %s ncvoter_Statewide.txt nc.csv' % sys.argv[0])
sys.exit(1)
in_fn = sys.argv[1]
out_fn = sys.argv[2]
usecols = [3, 9, 10, 11, 12, 25, 26, 28]
print('Reading tab-delimited file: %s' % in_fn)
df = pd.read_csv(in_fn, sep='\t', usecols=usecols)
print('Row count: {:,}'.format(df.shape[0]))
groupby(df, 'status_cd')
groupby(df, 'race_code')
groupby(df, 'ethnic_code')
groupby(df, 'gender_code')
print('Writing to CSV file: %s' % out_fn)
df.to_csv(out_fn, index=False)
go()
| az0/entity-metadata | code/etl_voter_north_carolina.py | Python | gpl-3.0 | 1,464 |
#!/usr/bin/env python3
'''
@author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic
@license This software is free - http://www.gnu.org/licenses/gpl.html
'''
WEEK_DAYS = 7
MAX_WEEKS = 6
first = int(input("first (0-6)? "))
length = int(input("length (28-31)? "))
for i in range (WEEK_DAYS * MAX_WEEKS):
day = i + 1 - first
if 0 < day <= length:
print(f"{:3}".format(day), end="")
else:
print(" ", end="")
if i % WEEK_DAYS == WEEK_DAYS - 1:
print()
print()
for y in range(MAX_WEEKS):
for x in range(WEEK_DAYS):
day = y * WEEK_DAYS + x + 1 - first
if 0 < day <= length:
print("{:3}".format(day), end='')
else:
print(" ", end='')
print()
print()
for y in range(WEEK_DAYS):
for x in range(MAX_WEEKS):
day = y + x * WEEK_DAYS + 1 - first
if 0 < day <= length:
print("{:3}".format(day), end='')
else:
print(" ", end='')
print()
print()
| tomamic/fondinfo | exercises/e3_2012_7_calendar.py | Python | gpl-3.0 | 1,002 |
#!/usr/bin/env python
"""
Purpose : Extract next sequence number of auto-scaled instance and set new tag to self instance. Script will be running from new instance.
will take input from command line instead of from json file
Future Plan :
will associate instance to a role based IAM profile
Usage :
python ec2-autoscale-instance-modify.py -a <your aws access_key> -s <aws secret key> -g <auto scale group that used in cloudformation file> -r <region> -n <min_server_number> -c <customer> -t <uat/plab/prod> -p <appname> -d <domainname ie example.net>
"""
__author__ = "kama maiti"
__copyright__ = "Copyright 2016, AWS autoscaled instance tag modification project"
__credits__ = ["kamal maiti"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "kamal maiti"
__email__ = "kamal.maiti@gmail.com"
__status__ = "production/Non-production"
import re
import argparse
import boto.ec2.autoscale
from boto.ec2 import EC2Connection
import shlex, subprocess
akey = ""
skey = ""
ag = ""
rg = ""
min_num = ""
def find_server_number(str):
#Assuming first match only with consecutive three digits
match = []
match = re.findall(r'\d\d\d', str)
if match:
return match #will return a list containg server number
else:
return match #will return blank list
def main():
arg_parser = argparse.ArgumentParser(description='Read autoscale instance')
arg_parser.add_argument('-a', dest='akey',help='Provide AWS_ACCESS_KEY')
arg_parser.add_argument('-s', dest='skey',help='Provide AWS_SECRET_ACCESS_KEY')
arg_parser.add_argument('-g', dest='ag',help='Provide User provided autoscale group name')
arg_parser.add_argument('-r', dest='rg',help='Provide region name')
arg_parser.add_argument('-n', dest='min_num',help='Minimum Server name')
arg_parser.add_argument('-c', dest='customer',help='Name of the customer in short')
arg_parser.add_argument('-t', dest='servertype',help='Type of the server ie prod or uat or plab')
arg_parser.add_argument('-p', dest='purpose',help='Purpose of the Server')
arg_parser.add_argument('-d', dest='domain',help='Domain name that will be appended to server name')
args = arg_parser.parse_args()
#print(args)
access_key = args.akey
secret_key = args.skey
region = args.rg
group_name = str(args.ag)
min_server_num = int(args.min_num)
customer = str(args.customer)
servertype = str(args.servertype)
purpose = str(args.purpose)
domain = str(args.domain)
#created two objects below. One for autocale connection and another for ec2 instance
as_conn = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
try:
groups = as_conn.get_all_groups()
all_groups = [group.name for group in groups]
for g in all_groups:
if group_name in g: #searching autocaling group that we are concerned with. Note all autoscalling group name should be unique
FOUND_GROUP = g #FOUND_GROUP will save exact AG name. Note that exact AG name is not same as user provided name. It'll check if group_name is subset of g
FOUND_GROUP_WITH_DES = as_conn.get_all_groups(names=[FOUND_GROUP])[0]
instance_ids = [i.instance_id for i in FOUND_GROUP_WITH_DES.instances]
#reservations = ec2_conn.get_all_instances(instance_ids)
instances = ec2_conn.get_only_instances(instance_ids)
#instances = [i for r in reservations for i in r.instances]
lNameTag = []
#collect all tags of all instances and sort Name tags and save them in list.
for i,j in enumerate(instances):
a = instances[i].tags
lNameTag.append(a['Name'])
#process each instances and take their server number in one list
lServerNum = []
if not lNameTag: #checking if list is empty or not. If empty then this is first instance whose server num will be min_server_num
next_number = min_server_num
else:
for server in lNameTag: #iterating each value of "Name" tag
if not find_server_number(server): #if method find_server_number returns null list
next_number = min_server_num
else:
val = find_server_number(server) #got value like [u'101']. Below comand will remove [],' and u
actual_num=str(val).strip('[]').strip('u').strip('\'')
lServerNum.append(int(actual_num)) #actual_num is string, need to convert to int
if not lServerNum: #check if list of server number is blank or not
next_number = min_server_num
else:
maximum_number = max(lServerNum) #used max function to find out maximum number in the list
next_number = maximum_number + 1
#Now we need to save this next_number in a file so that we can collect it and send to other commands.
with open('/tmp/serverno','w') as fd: #created a file and save the number as string. Then read it and used later
fd.write(str(next_number))
with open('/tmp/serverno','r') as fd:
num=fd.read()
#Will modify tag of current instance. Let's build a new tag.
delm = "-" #Delimeter that will be used to join multiple string
seq = ( customer, servertype, purpose, num, domain) #created a tuple
new_tag = delm.join(seq) #joined tuple strings
with open('/tmp/nodename','w') as fd:
fd.write(str(new_tag))
#will extract current instance ID using curl. ie curl http://169.254.169.254/latest/meta-data/instance-id
#
cmd = 'curl http://169.254.169.254/latest/meta-data/instance-id'
#shlex is simple lexical analyser for splitting a large string into tokens
args = shlex.split(cmd) #args will have value like : ['curl', 'http://169.254.169.254/latest/meta-data/instance-id']
output,error = subprocess.Popen(args,stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #out and error are saved in variable. communicate will execute comamnd
#o="i-fd96291f" #used for testing
cur_instance_reservation = ec2_conn.get_all_instances(instance_ids=output)
cur_instance = cur_instance_reservation[0].instances[0]
cur_instance.add_tag('Name', new_tag)
finally:
as_conn.close()
ec2_conn.close()
if __name__ == '__main__':
main()
| kmaiti/AWSAutoScalingWithF5andCloudFormation | aws-autoscale-ec2-instance-modify.py | Python | gpl-3.0 | 6,954 |
# cls_plan_BDI.py
import datetime
class Plan_BDI(object):
"""
class for handling various plans for AIKIF using
Belief | Desires | Intentions
"""
def __init__(self, name, dependency):
self.name = name
self.id = 1
self.dependency = dependency
self.plan_version = "v0.10"
self.success = False
self.start_date = datetime.datetime.now().strftime("%I:%M%p %d-%B-%Y")
self.resources = []
self.constraint = []
self.beliefs = Beliefs(self)
self.desires = Desires(self)
self.intentions = Intentions(self)
def __str__(self):
res = "---== Plan ==---- \n"
res += "name : " + self.name + "\n"
res += "version : " + self.plan_version + "\n"
for i in self.beliefs.list():
res += "belief : " + i + "\n"
for i in self.desires.list():
res += "desire : " + i + "\n"
for i in self.intentions.list():
res += "intention : " + i + "\n"
return res
def get_name(self):
return self.name
def generate_plan(self):
"""
Main logic in class which generates a plan
"""
print("generating plan... TODO")
def load_plan(self, fname):
""" read the list of thoughts from a text file """
with open(fname, "r") as f:
for line in f:
if line != '':
tpe, txt = self.parse_plan_from_string(line)
#print('tpe= "' + tpe + '"', txt)
if tpe == 'name':
self.name = txt
elif tpe == 'version':
self.plan_version = txt
elif tpe == 'belief':
self.beliefs.add(txt)
elif tpe == 'desire':
self.desires.add(txt)
elif tpe == 'intention':
self.intentions.add(txt)
def save_plan(self, fname):
with open(fname, "w") as f:
f.write("# AIKIF Plan specification \n")
f.write("name :" + self.name + "\n")
f.write("version :" + self.plan_version + "\n")
for txt in self.beliefs.list():
f.write("belief :" + txt + "\n")
for txt in self.desires.list():
f.write("desire :" + txt + "\n")
for txt in self.intentions.list():
f.write("intention :" + txt + "\n")
def parse_plan_from_string(self, line):
tpe = ''
txt = ''
if line != '':
if line[0:1] != '#':
parts = line.split(":")
tpe = parts[0].strip()
txt = parts[1].strip()
return tpe, txt
def add_resource(self, name, tpe):
"""
add a resource available for the plan. These are text strings
of real world objects mapped to an ontology key or programs
from the toolbox section (can also be external programs)
"""
self.resources.append([name, tpe])
def add_constraint(self, name, tpe, val):
"""
adds a constraint for the plan
"""
self.constraint.append([name, tpe, val])
class Thoughts(object):
""" base class for beliefs, desires, intentions simply
to make it easier to manage similar groups of objects """
def __init__(self, thought_type):
#print("Thoughts - init: thought_type = " + thought_type + "\n")
self._thoughts = []
self._type = thought_type
def __str__(self):
res = ' -- Thoughts --\n'
for i in self._thoughts:
res += i + '\n'
return res
def add(self, name):
self._thoughts.append(name)
def list(self, print_console=False):
lst = []
for i, thought in enumerate(self._thoughts):
if print_console is True:
print(self._type + str(i) + ' = ' + thought)
lst.append(thought)
return lst
class Beliefs(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Beliefs, self).__init__('belief')
class Desires(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Desires, self).__init__('desire')
class Intentions(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Intentions, self).__init__('intention')
def TEST():
myplan = Plan_BDI('new plan', '')
myplan.beliefs.add('belief0')
myplan.beliefs.add('belief1')
myplan.beliefs.add('belief2')
myplan.desires.add('desire0')
myplan.desires.add('desire1')
myplan.intentions.add('intention0')
myplan.beliefs.list()
myplan.desires.list()
myplan.intentions.list()
#myplan.save_plan("test_plan.txt")
#myplan.load_plan("test_plan.txt")
print(str(myplan))
if __name__ == '__main__':
TEST()
| acutesoftware/AIKIF | aikif/lib/cls_plan_BDI.py | Python | gpl-3.0 | 5,209 |
# Draw graph x-axios is the number of nodes in the network.
import re
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
from mininet.log import setLogLevel, output, info
# Read the experimental result data file in a specfied directory and generate a list of data file name.
def sortDataFile(dataFilePath, dataFileDir):
dataFileList = os.listdir("%s/data/%s" % (dataFilePath, dataFileDir))
# delete file names that are not statistic result.
i = 0
for dataFileName in dataFileList:
if not ("statResult" in dataFileName):
dataFileList.pop(i)
i = i+1
# Sort data file according to file name (the number of nodes) by using bubble algorithm
i = 0
while i < len(dataFileList)-1:
try:
j = 0
while j < len(dataFileList)-1-i:
fileName = dataFileList[j].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j = fileName[startChar : endChar]
# after j
fileName=dataFileList[j+1].strip()
startChar = fileName.index("-") + len("-")
endChar = fileName.index(".", startChar)
nodeNumber_j1 = fileName[startChar : endChar]
if int(nodeNumber_j1.strip()) < int(nodeNumber_j.strip()):
tmp = dataFileList[j]
dataFileList[j] = dataFileList[j+1]
dataFileList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataFileList
# Read a data file and convert to two dimession List.
def readFileData(dataFilePath, dataFileDir, dataFileName):
data_file = open ("%s/data/%s/%s" % (dataFilePath, dataFileDir, dataFileName), "r")
lineField = []
dataLine = []
for line in data_file:
lineString = ""
j=0
# read a line data and generate list of fields
while j < len(line):
if not (line[j] == " "):
lineString = lineString + str(line[j].strip())
if j == len(line)-1:
lineField.append(lineString.strip())
else:
lineField.append(lineString.strip())
lineString = ""
j = j+1
dataLine.append(lineField)
lineField = []
return dataLine
# Sort two dimession List
def listSort(dataList, sortCol):
"sortCol: the specified colume used for sorting."
i = 0
while i < len(dataList)-1:
try:
j = 0
while j < len(dataList)-1-i:
if float(dataList[j+1][sortCol]) < float(dataList[j][sortCol].strip()):
tmp = dataList[j]
dataList[j] = dataList[j+1]
dataList[j+1] = tmp
j = j+1
except:
pass
i = i+1
return dataList
#Calculate average statistic result in one experiment.
def aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol):
"aveCol: the specified colume used for calculating a average value"
# Caculate average value according to a specified column
# Read data file and generate a list
dataList = readFileData(dataFilePath, dataFileDir, dataFileName)
sortDataList = listSort(dataList, aveCol)
conNodes = []
aveConNumOutInt = []
aveDelay = []
aveNumOutInt = []
aveIntPLR = []
aveDataPLR = []
avePLR = []
i = 0
while i < len(sortDataList):
conNumOutInt = float(sortDataList[i][2].strip())
Delay = float(sortDataList[i][3].strip())
numOutInt = float(sortDataList[i][4].strip())
IntPLR = float(sortDataList[i][8].strip())
DataPLR = float(sortDataList[i][9].strip())
PLR = float(sortDataList[i][10].strip())
tmp = sortDataList[i][aveCol].strip()
j = i+1
n = 1
flag = True
while (j < len(sortDataList)) and flag:
if sortDataList[j][aveCol] == tmp:
n = n + 1
conNumOutInt = conNumOutInt + float(sortDataList[j][2].strip())
Delay = Delay + float(sortDataList[j][3].strip())
numOutInt = numOutInt + float(sortDataList[j][4].strip())
IntPLR = IntPLR + float(sortDataList[j][8].strip())
DataPLR = DataPLR + float(sortDataList[j][9].strip())
PLR = PLR + float(sortDataList[j][10].strip())
j = j+1
else:
flag = False
i = j
conNodes.append(int(tmp))
aveConNumOutInt.append(conNumOutInt/n)
aveDelay.append(Delay/n)
aveNumOutInt.append(numOutInt/n)
aveIntPLR.append(IntPLR/n)
aveDataPLR.append(DataPLR/n)
avePLR.append(PLR/n)
return conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR
# randomly generate color
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
# Draw statistical graph
def drawStatGraph(dataFilePath, dataFileDir, aveCol):
# setting a style to use
plt.style.use('fivethirtyeight')
# create a figure
fig = plt.figure()
# define subplots and their positions in figure
plt1 = fig.add_subplot(221, axisbg='white')
plt2 = fig.add_subplot(222, axisbg='white')
plt3 = fig.add_subplot(223, axisbg='white')
plt4 = fig.add_subplot(224, axisbg='white')
# plotting the line 1 points
plt1.axis([5, 35, 4, 10])
plt2.axis([5, 35, 0, 15])
plt3.axis([5, 35, 0, 200])
plt4.axis([5, 35, 0, 2.5])
dataFileList = sortDataFile (dataFilePath, dataFileDir)
colors = ["b","g","r","y","black"]
nodes = [10, 15, 20, 25,30]
# open all data files for experiment, average data according the number of consumer
# n is times (here n=5 because {1,3,5,7,9} ) for opening these files.
# extact one colume data in all experimental result and draw one cave line according to average value in the number of consumer
n = 0
while n < 5:
col = n
# generate data for drawing cave lines
conNum = []
aveCNOI = [] #sent interest packets before
aveDLY = [] # delay
aveNOI = [] # the total number of interest packets trasmitted on network
aveIPLR = [] # Packet loss rate of interest packet
for dataFileName in dataFileList:
# calculate average value according to the number of consumers in statResult-xx.dat
# the returned results are saved in some List.
conNodes, aveConNumOutInt, aveDelay, aveNumOutInt, aveIntPLR, aveDataPLR, avePLR = \
aveStatResult(dataFilePath, dataFileDir, dataFileName, aveCol)
startChar = dataFileName.index("-") + len("-")
endChar = dataFileName.index(".", startChar)
nodeNumber = dataFileName[startChar : endChar]
# extract the data of the specified colume.
conNum = conNodes[col]
aveCNOI.append(aveConNumOutInt[col])
aveDLY.append(aveDelay[col])
aveNOI.append(aveNumOutInt[col])
aveIPLR.append(aveIntPLR[col])
# draw one cave line according the specified number of consumers, for example 1,3,5,7, or 9
labelChar = "CN=" + str(conNum)
colorN = int((n+1)*5)/5 - 1
color = colors[colorN]
plt1.plot(nodes, aveCNOI, color=color, linestyle='solid', label = labelChar,marker='s',markerfacecolor=color, markersize=10)
plt2.plot(nodes, aveDLY, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
plt3.plot(nodes, aveNOI, color=color, linestyle='solid',label=labelChar, marker='s', markerfacecolor=color, markersize=10)
plt4.plot(nodes, aveIPLR, color=color, linestyle='solid', label = labelChar,marker='s', markerfacecolor=color, markersize=10)
n = n+1
plt1.set_title('The Number of Interest Packet')
plt2.set_title('Average Delay')
plt3.set_title('The Total Number of Interest Packet')
plt4.set_title('Packet Loss Rate of Interest Packet')
#plt1.xlabel('nodes')
#plt1.ylabel('ISR')
#plt1.title('Average Delay')
plt1.legend(loc='upper left')
plt2.legend(loc='upper right')
plt3.legend(loc='upper left')
plt4.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
setLogLevel('info')
dataFilePath = os.path.abspath(os.path.dirname(sys.argv[0]))
# drawStatGraph(dataFilePath, 'oppo', 0)
drawStatGraph(dataFilePath, 'bread', 0)
| iamxg/minindn-wifi | ndnwifi/averesultgraph-consumer-bread.py | Python | gpl-3.0 | 8,800 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
roundcube = [
{'name':'common',
'mainpackage':True,
'shortdesc':'Installs the latest version of roundcube',
'description':'',
'packages':['roundcube-plugins', 'roundcube-plugins-extra', 'roundcube-amacube']
},
{'name':'mysql',
'shortdesc':'Installs roundcube using MySQL database',
'description':'',
'depends':['common'],
'packages':['roundcube-mysql']
},
{'name':'pgsql',
'shortdesc':'Installs roundcube using PostgreSQL database',
'description':'',
'depends':['common'],
'packages':['roundcube-pgsql']
},
{'name':'sqlite3',
'shortdesc':'Installs roundcube using SQLite3 database',
'description':'',
'depends':['common'],
'packages':['roundcube-sqlite3']
},
{'name':'none',
'shortdesc':'Uninstalls all versions of roundcube',
'description':'',
'packages':[],
'noconflicts':[]
},
]
| aroth-arsoft/arsoft-meta-packages | grp_roundcube.py | Python | gpl-3.0 | 1,044 |
from selenium import webdriver
from django.test import LiveServerTestCase, TestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import datetime
from planner.models import Participation, Event, Occurrence, EventType, Role
from django.contrib.auth.models import User
import pytz
import time
tz = pytz.timezone("Europe/Stockholm")
def event(date):
e = Event.objects.create(title="TestEvent", event_type=EventType.objects.get(name="Gudstjänst"))
e.event = Occurrence.objects.create(start_time = tz.localize(date))
Participation.objects.create(user = User.objects.get(pk=2), event = e, attending = "true", role = Role.objects.get(name = "Mötesledare"))
Participation.objects.create(user = User.objects.get(pk=3), event = e, attending = "null", role = Role.objects.get(name = "Textläsare"))
e.save()
def login(browser):
browser.find_element_by_id('id_username').send_keys("admin")
browser.find_element_by_id('id_password').send_keys("1234")
browser.find_element_by_id('id_submit').click()
class BasicTest(StaticLiveServerTestCase):
fixtures = ["fixture1.json"]
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_login(self):
self.browser.get(self.live_server_url)
assert "Planering" in self.browser.title
login(self.browser)
menu = self.browser.find_element_by_id('main-menu').text
assert 'Nytt evenemang' in menu
assert 'Tabellvy' in menu
def test_event_is_displayed(self):
event(datetime.datetime.now() + datetime.timedelta(days = 17))
self.browser.get(self.live_server_url)
login(self.browser)
t = self.browser.find_element_by_id("table-scroll").text
time.sleep(10)
print(t)
assert 'Testevenemang' in t
if __name__ == '__main__':
unittest.main() | danieka/churchplanner | planner/functional_tests.py | Python | gpl-3.0 | 1,831 |
from collections import deque
import re
class Formula(object):
"""
Formula allows translation from a prefix-notation expression in a string to a complex number.
This is eventually to be replaced with a cython, c++, or openCL implementation as I'm sure
the performance of this class is pretty horrible.
"""
def __init__(self, formulaString='+ ** z 2 c'):
self.formulaString = formulaString
self.functions = None
"""
Compile:
This method takes in a the prefix statement and evaluates it for given values z and c.
"""
def compile(self, c, z):
form = deque(self.formulaString.split(' '))
return self.parse(form, c, z)
def parse(self, queuestring, c, z=0.0):
value = queuestring.popleft()
if (value == '+'):
return self.parse(queuestring, c, z) + self.parse(queuestring, c, z)
elif (value == '-'):
return self.parse(queuestring, c, z) - self.parse(queuestring, c, z)
elif (value == '*'):
return self.parse(queuestring, c, z) * self.parse(queuestring, c, z)
elif (value == '/'):
return self.parse(queuestring, c, z) / self.parse(queuestring, c, z)
elif (value == '^' or value == '**'):
return self.parse(queuestring, c, z) ** self.parse(queuestring, c, z)
elif (value == 'mod' or value == '%'):
return self.parse(queuestring, c, z) % self.parse(queuestring, c, z)
elif (value == 'rpart'):
return complex(self.parse(queuestring, c, z)).real
elif (value == 'ipart'):
return complex(self.parse(queuestring, c, z)).imag
elif (value == 'z'):
return z
elif (value == 'c'):
return c
elif (re.compile('^[\d\.]+').match(value)):
return float(value)
elif (re.compile('^[\d\.]+[\+\-][\d\.]+i$').match(value)):
nums = re.split('[\+\-]', value)
return complex(float(nums[0], nums[1][:-1]))
else:
return EOFError | dtwiers/PyFracking | src/Formula.py | Python | gpl-3.0 | 2,090 |
from recon.core.module import BaseModule
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'List Creator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Creates a file containing a list of records from the database.',
'options': (
('table', 'hosts', True, 'source table of data for the list'),
('column', 'ip_address', True, 'source column of data for the list'),
('unique', True, True, 'only return unique items from the dataset'),
('nulls', False, True, 'include nulls in the dataset'),
('filename', os.path.join(BaseModule.workspace, 'list.txt'), True, 'path and filename for output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# handle the source of information for the report
column = self.options['column']
table = self.options['table']
nulls = ' WHERE "%s" IS NOT NULL' % (column) if not self.options['nulls'] else ''
unique = 'DISTINCT ' if self.options['unique'] else ''
values = (unique, column, table, nulls)
query = 'SELECT %s"%s" FROM "%s"%s ORDER BY 1' % values
rows = self.query(query)
for row in [x[0] for x in rows]:
row = row if row else ''
outfile.write('%s\n' % (row))
print(row)
self.output('%d items added to \'%s\'.' % (len(rows), filename))
| praetorian-inc/pentestly | modules/reporting/list.py | Python | gpl-3.0 | 1,569 |
"""
Jowa Header, Code # 4
Battery Voltage
"""
### INCLUDES ###
from common import JOWA_BATT_THRESHOLD, LOW_BATTERY
### CONSTANTS ###
## Battery Voltage Units and Variables ##
# Battery voltage calculation notes:
# Vbat = ((R5+R4)/R5)*Vch3 = (2*R/R)*Vch3 =
# 2*Vch3 = 2*(ADC/ADCmax)*Vref
# Where:
# Vref, ADC Voltage Reference (Currently 2.5V)
# ADC, Current value of ADC (read DB value for that)
# ADCmax, Max value of ADC => 0xFFFFFF = 16777215
VOLTAGE = {
'name': 'voltage',
'formula': '2*adc_ref*(self/adc_max)',
'measuring_units': 'V',
'min_value': 0,
'max_value': 5,
'min_alarm': JOWA_BATT_THRESHOLD,
'min_alarm_message': LOW_BATTERY,
}
## Headers Instance ##
HEADER = {
'name': 'Battery',
'groups': {
'unit_list': [VOLTAGE]
},
'diagnostics': True
}
| Barmaley13/BA-Software | headers/jowa_4.py | Python | gpl-3.0 | 813 |
# 132. Palindrome Partitioning II QuestionEditorial Solution My Submissions
# Total Accepted: 58982
# Total Submissions: 256833
# Difficulty: Hard
# Given a string s, partition s such that every substring of the partition is a palindrome.
#
# Return the minimum cuts needed for a palindrome partitioning of s.
#
# For example, given s = "aab",
# Return 1 since the palindrome partitioning ["aa","b"] could be produced using 1 cut.
class Solution(object):
def minCut(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
if n <= 1: return 0
dp = [ n + 1 for x in xrange(n) ]
for i in xrange(n): # i Center, j center len
print("i ", i)
j = 0
while i - j >= 0 and i + j < n and s[i-j] == s[i+j]:
dp[i+j] = min(dp[i+j], dp[i-j-1] + 1) if i - j - 1 >= 0 else 0
print("1 ", i, j, s[i-j], s[i+j], dp)
j += 1
j = 0
while i - j >= 0 and i + j + 1 < n and s[i - j] == s[i + j + 1]:
dp[i+j+1] = min(dp[i+j+1], dp[i-j-1] + 1) if i - j - 1 >= 0 else 0
print("2 ", i, j, s[i-j], s[i+j+1], dp)
j += 1
return dp[-1]
if __name__ == "__main__":
s = "aab"
print(s)
print(Solution().minCut(s))
| shawncaojob/LC | QUESTIONS/132_palindrome_partitioning_ii.py | Python | gpl-3.0 | 1,362 |
from ipctest import IpcTest
from gi.repository import i3ipc
import pytest
@pytest.mark.skip(reason='TODO')
class TestGetConfig(IpcTest):
def test_get_config(self, i3):
config = i3.get_config()
assert isinstance(config, i3ipc.ConfigReply)
with open('test/i3.config') as f:
assert config.config == f.read()
| acrisci/i3ipc-glib | test/test_get_config.py | Python | gpl-3.0 | 347 |
from distutils.core import setup
import py2exe
setup(console=['newsputnik.py']) | maxikov/tatmon | trash/setup.py | Python | gpl-3.0 | 83 |
from django import forms
from poi_manager.models import Poi, PoiCategory
from mptt.forms import TreeNodeChoiceField
class PoiCategoryForm(forms.ModelForm):
cat_name = forms.CharField(max_length=128, help_text="Please enter the category name.")
parent = TreeNodeChoiceField(queryset=PoiCategory.objects.all(), required=False)
class Meta:
model = PoiCategory
fields = ('cat_name', 'parent',)
class PoiForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the title of the page.")
floor_num = forms.IntegerField(initial=0, required=False)
category = TreeNodeChoiceField(queryset=PoiCategory.objects.all())
class Meta:
model = Poi
fields = ('name', 'floor_num', 'category',)
| indrz/indrz | indrz/poi_manager/forms.py | Python | gpl-3.0 | 769 |
#!/usr/bin/env python
#
# Copyright 2010 Communications Engineering Lab, KIT
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks, fft
import specest_gendpss
import specest_swig
## Estimates PSD using Thomson's multitaper method
# @param[in] N: Length of the FFT
# @param[in] NW: Time Bandwidth Product usually is of value 2, 2.5, 3.0, 3.5, or 4
# @param[in] K: Numbers of Tapers to use. K should be smaller than 2*NW
# @param[in] weighting: Which type of weighting to use for the eigenspectra. Choices can be 'unity','eigenvalues' or adaptive
class mtm(gr.hier_block2):
""" Estimates PSD using Thomson's multitaper method. """
def __init__(self, N=512 , NW=3 , K=5, weighting='adaptive', fftshift=False):
gr.hier_block2.__init__(self, "mtm",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_float*N))
self.check_parameters(N, NW, K)
self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, N)
self.connect(self, self.s2v)
dpss = specest_gendpss.gendpss(N=N, NW=NW, K=K)
self.mtm = [eigenspectrum(dpss.dpssarray[i], fftshift) for i in xrange(K)]
if weighting == 'adaptive':
self.sum = specest_swig.adaptiveweighting_vff(N, dpss.lambdas)
self.connect_mtm(K)
self.connect(self.sum, self)
elif weighting == 'unity':
self.sum = blocks.add_ff(N)
self.divide = blocks.multiply_const_vff([1./K]*N)
self.connect_mtm(K)
self.connect(self.sum, self.divide, self)
elif weighting == 'eigenvalues':
self.eigvalmulti = []
self.lambdasum = 0
for i in xrange(K):
self.eigvalmulti.append(blocks.multiply_const_vff([dpss.lambdas[i]]*N))
self.lambdasum += dpss.lambdas[i]
self.divide = blocks.multiply_const_vff([1./self.lambdasum]*N)
self.sum = blocks.add_ff(N)
self.connect_mtm(K)
self.connect(self.sum, self.divide, self)
else:
raise ValueError, 'weighting-type should be: adaptive, unity or eigenvalues'
def connect_mtm(self, K):
""" Connects up all the eigenspectrum calculators. """
for i in xrange(K):
self.connect(self.s2v, self.mtm[i])
self.connect(self.mtm[i], (self.sum, i))
## Checks the validity of parameters
# @param[in] N: Length of the FFT
# @param[in] NW: Time Bandwidth Product
# @param[in] K: Numbers of Tapers to used
def check_parameters(self, N, NW, K):
""" Checks the validity of parameters. """
if NW < 1: raise ValueError, 'NW must be greater than or equal to 1'
if K < 2: raise ValueError, 'K must be greater than or equal to 2'
if (N % 1): raise TypeError, 'N has to be an integer'
if N < 1: raise ValueError, 'N has to be greater than 1'
## Computes the eigenspectra for the multitaper spectrum estimator:
# data ----> multiplication dpss ----> FFT ----> square ----> output eigenspectrum
# @param[in] dpss: the dpss used as a data taper
class eigenspectrum(gr.hier_block2):
""" Computes the eigenspectra for the multitaper spectrum estimator:
data --> multiplication dpss --> FFT --> mag-square --> output eigenspectrum """
def __init__(self, dpss, fftshift=False):
gr.hier_block2.__init__(self, "eigenspectrum",
gr.io_signature(1, 1, gr.sizeof_gr_complex*len(dpss)),
gr.io_signature(1, 1, gr.sizeof_float*len(dpss)))
self.window = dpss
self.fft = fft.fft_vcc(len(dpss), True, self.window, fftshift)
self.c2mag = blocks.complex_to_mag_squared(len(dpss))
self.connect(self, self.fft, self.c2mag, self)
| robertwatsonbath/gr-specest-3.7 | python/specest_mtm.py | Python | gpl-3.0 | 4,472 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tool to recreate all tables/procedures in the database.
"""
import argparse
import copy
import re
from os import path
from src.sqllist import re_sub
try:
import monetdb.sql as db
try:
import monetdb.exceptions as me
except ImportError:
# Older version
import monetdb.monetdb_exceptions as me
HAS_MONET = True
except ImportError:
HAS_MONET = False
try:
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
HAS_POSTGRESQL = True
except ImportError:
HAS_POSTGRESQL = False
import subprocess
class Recreator(object):
"""
Tool to recreate all tables/procedures in the database.
"""
# all procedures to be recreated
PROCEDURES = ['fill_temp_assoc_kind']
# list of views to be recreated
VIEWS = ['v_catalog_info']
# list of tables to be recreated
TABLES = ['frequencybands', 'datasets', 'runs', 'images',
'extractedsources', 'assocxtrsources', 'detections',
'runningcatalog', 'runningcatalog_fluxes',
'temp_associations', 'image_stats']
def __init__(self, database="test", use_monet=True):
self.monet = use_monet
if use_monet:
db_port = 50000
db_autocommit = True
db_host = "localhost"
db_dbase = database
self.database = database
db_user = "monetdb"
db_passwd = "monetdb"
if use_monet:
self.conn = db.connect(hostname=db_host, database=db_dbase,
username=db_user, password=db_passwd,
port=db_port,
autocommit=db_autocommit)
else:
connect = psycopg2.connect(host=db_host, user=db_user,
password=db_passwd, database=db_dbase)
connect.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.conn = connect.cursor()
def get_table_exists(self, tab_name):
"""
Check if the table exists in the database.
"""
if self.monet:
cur = self.conn.cursor()
cur.execute(
"select count(*) from sys.tables where name = '%s';"
% tab_name)
else:
cur = self.conn
cur.execute(
"select count(*) from pg_tables where tablename ='%s';"
% tab_name)
data = cur.fetchone()
return data[0] == 1
def drop_table(self, tab_name):
"""
Drop table if it exists.
"""
if self.get_table_exists(tab_name):
if self.monet:
self.conn.execute("drop table %s;" % tab_name)
else:
self.conn.execute("drop table %s cascade;" % tab_name)
self.conn.execute(
"drop sequence if exists seq_%s cascade;" % tab_name)
print('Table %s dropped' % tab_name)
# For MonetDB-PostgreSQL convertion.
PG_SUBSTITUTOR = [
(r'next value for "(.*?)"', r"nextval('\1'::regclass)"),
#(r'^create sequence (.*?)$', ''),
(r'as integer', ''),
(r' double ', ' double precision '),
(r'current_timestamp\(\)', 'current_timestamp'),
]
def refactor_lines(self, sql_lines):
"""
Prepare SQL code for MonetDB/PostgreSQL.
Remove all comments, make necessary substitutions.
"""
sql_lines = re_sub(r'/\*.*?\*/', '', sql_lines, flags=re.DOTALL)
sql_lines = re_sub(r'--.*$', '', sql_lines, flags=re.MULTILINE)
if not self.monet:
# Has to apply substitutions for PostgreSQL.
for from_, to_ in self.PG_SUBSTITUTOR:
sql_lines = re_sub(from_, to_, sql_lines,
flags=re.MULTILINE | re.IGNORECASE)
return sql_lines
def create_table(self, tab_name):
"""
Create a table with a given name.
"""
self.run_sql_file("sql/tables/create.table.%s.sql" % tab_name)
print("Table %s recreated" % tab_name)
def create_view(self, view_name):
"""
Create a view with a given name.
"""
self.run_sql_file("sql/create.view.%s.sql" % view_name)
print("View %s recreated" % view_name)
def create_procedure(self, tab_name):
"""
Create a procedure with a given name.
Procedure SQL is located in the project files:
sql/pg/create.procedure.NAME.sql (PostrgeSQL) or
sql/create.procedure.NAME.sql (MonetDB).
"""
if self.monet:
sql_file = open("sql/create.procedure.%s.sql" % tab_name, 'r')
else:
sql_file = open("sql/pg/create.procedure.%s.sql" % tab_name, 'r')
sql_lines = ''.join(sql_file.readlines())
sql_lines = self.refactor_lines(sql_lines)
#print sql_lines
self.conn.execute(sql_lines)
print("Procedure %s recreated" % tab_name)
def run_sql_file(self, filename):
"""
Execute SQL from file (with proper substitutions for psql.
"""
sql_file = open(filename, 'r')
sql_lines = ''.join(sql_file.readlines())
sql_lines = self.refactor_lines(sql_lines)
self.conn.execute(sql_lines)
def reload_frequencies(self):
"""
Load frequencies tables from file "sql/tables/freq.dat".
Use bulk load from MonetDB/PostgreSQL.
"""
if self.monet:
self.conn.execute("copy into frequencybands from '%s';" %
path.realpath('sql/tables/freq.dat'))
else:
sp = subprocess.Popen(['psql', '-U', 'monetdb',
'-d', self.database, '-c',
"copy frequencybands " \
"from stdin delimiter '|'" \
" null 'null';"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
for line in open('sql/tables/freq.dat', 'r').readlines():
sp.stdin.write(line)
sp.communicate()
print('Frequencies loaded')
def run_set(self, aset, subroutine):
for item in aset:
subroutine(item)
def run(self):
error_set = []
if HAS_MONET:
error_set.append(me.OperationalError)
if HAS_POSTGRESQL:
error_set.append(psycopg2.ProgrammingError)
error_set = tuple(error_set)
try:
for procedure in self.PROCEDURES:
if self.monet:
try:
self.conn.execute("drop procedure %s;" %
procedure)
print("drop procedure %s;" % procedure)
except error_set:
pass
for view in self.VIEWS:
try:
self.conn.execute("drop view %s;" % view)
except error_set:
pass
print("drop view %s;" % view)
drop_tables = copy.copy(self.TABLES)
drop_tables.reverse()
print('=' * 20)
self.run_set(drop_tables, self.drop_table)
print('=' * 20)
self.run_set(self.TABLES, self.create_table)
if not self.monet:
self.run_sql_file('sql/pg/indices.sql')
print('Indices recreated')
self.run_sql_file('sql/pg/pg_comments.sql')
print('Comments added')
print('=' * 20)
self.run_set(self.PROCEDURES, self.create_procedure)
self.run_set(self.VIEWS, self.create_view)
self.reload_frequencies()
except db.Error as exc:
raise exc
self.conn.close()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
***Database recreator.
***Created by A. Mints (2012).
*WARNING!!!* Clears all data from the database.""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-D', '--database', type=str, default='test',
help='Database to recreate')
parser.add_argument('-M', '--monetdb', action="store_true",
default=False,
help='Use MonetDB instead of PostgreSQL')
args = parser.parse_args()
recr = Recreator(use_monet=args.monetdb, database=args.database)
recr.run()
| kernsuite-debian/lofar | CEP/GSM/bremen/recreate_tables.py | Python | gpl-3.0 | 8,597 |
# -*-Python-*-
################################################################################
#
# File: frontend.py
# RCS: $Header: $
# Description: frontend:
# responsibility:
# init backend
# init processors
# handle two query types:
# 1) metadata
# response: metadata from backend and processors
# 2) informational
# response: proccess(proc(query))(backend(info(query)))
# Author: Staal Vinterbo
# Created: Wed May 8 16:28:56 2013
# Modified: Sun Jun 23 14:31:31 2013 (Staal Vinterbo) staal@mats
# Language: Python
# Package: N/A
# Status: Experimental
#
# (c) Copyright 2013, Staal Vinterbo, all rights reserved.
#
# frontend.py is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# frontend.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with frontend.py; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from backend import init_backend, query_backend
def init_frontend(database, processors, reinit=False):
if len(processors) == 0:
raise Exception('Failed to initialize frontend: no processors given.')
try:
if reinit:
backend = reinit_backend(backend)
else:
backend = init_backend(database)
except Exception as e:
raise Exception('Could not initialize backend: ' + str(e))
pdict = {}
for (k,v) in processors.items():
pdict[k] = v['meta']
meta = dict(backend['meta'])
meta['processors'] = pdict
return {'backend' : backend, 'processors' : processors, 'meta' : meta}
def handle_query(frontend, eps, query):
if eps <= 0:
raise Exception('Privacy risk must be positive.')
try:
(ddesc, proc) = query
(pname, parms) = proc
(dname, sel, pro) = ddesc
except Exception as e:
raise Exception('Malformed data query.')
# check if data set exists and if processor is allowed
if dname not in frontend['backend']['meta']['datasets'].keys():
raise Exception('Requested data set not available.')
if pname not in frontend['backend']['meta']['datasets'][dname]['processors']:
raise Exception('Requested information not appropriate for data set.')
try:
proc = frontend['processors'][pname]
except Exception as e:
raise Exception('Could not find query type: ' + str(e))
try:
if proc.has_key('query_edit'):
parms += [('orig_query', {'predicate' :sel, 'attributes' : pro})]
(sel, pro) = proc['query_edit'](sel, pro)
ddesc = (dname, sel, pro)
except Exception as e:
raise Exception('Query edit failed: ' + str(e))
try:
res = query_backend(frontend['backend'], ddesc)
except Exception as e:
raise Exception('Data query failed: ' + str(e))
try:
pres = proc['f'](eps, parms, res)
except Exception as e:
raise Exception('Information processing failed: ' + str(e))
return pres
| laats/dpdq | src/qp/frontend.py | Python | gpl-3.0 | 3,696 |
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from uitest.framework import UITestCase
import time
class tdf92611(UITestCase):
def test_launch_and_close_bibliography(self):
self.ui_test.create_doc_in_start_center("writer")
self.xUITest.executeCommand(".uno:BibliographyComponent")
time.sleep(2)
self.xUITest.executeCommand(".uno:CloseWin")
time.sleep(2)
self.ui_test.close_doc()
# vim: set shiftwidth=4 softtabstop=4 expandtab:
| beppec56/core | uitest/writer_tests/tdf92611.py | Python | gpl-3.0 | 647 |
"""djangochat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('chatdemo.urls')),
url(r'^admin/', admin.site.urls),
]
| ploggingdev/djangochat | djangochat/urls.py | Python | gpl-3.0 | 817 |
# -*- coding: utf-8 -*-
import io
import pytest
import re
from collections import namedtuple
from unittest import mock
from toot import console, User, App, http
from toot.exceptions import ConsoleError
from tests.utils import MockResponse
app = App('habunek.com', 'https://habunek.com', 'foo', 'bar')
user = User('habunek.com', 'ivan@habunek.com', 'xxx')
MockUuid = namedtuple("MockUuid", ["hex"])
def uncolorize(text):
"""Remove ANSI color sequences from a string"""
return re.sub(r'\x1b[^m]*m', '', text)
def test_print_usage(capsys):
console.print_usage()
out, err = capsys.readouterr()
assert "toot - a Mastodon CLI client" in out
@mock.patch('uuid.uuid4')
@mock.patch('toot.http.post')
def test_post_defaults(mock_post, mock_uuid, capsys):
mock_uuid.return_value = MockUuid("rock-on")
mock_post.return_value = MockResponse({
'url': 'https://habunek.com/@ihabunek/1234567890'
})
console.run_command(app, user, 'post', ['Hello world'])
mock_post.assert_called_once_with(app, user, '/api/v1/statuses', {
'status': 'Hello world',
'visibility': 'public',
'media_ids[]': [],
'sensitive': "false",
'spoiler_text': None,
'in_reply_to_id': None,
'language': None,
'scheduled_at': None,
}, headers={"Idempotency-Key": "rock-on"})
out, err = capsys.readouterr()
assert 'Toot posted' in out
assert 'https://habunek.com/@ihabunek/1234567890' in out
assert not err
@mock.patch('uuid.uuid4')
@mock.patch('toot.http.post')
def test_post_with_options(mock_post, mock_uuid, capsys):
mock_uuid.return_value = MockUuid("up-the-irons")
args = [
'Hello world',
'--visibility', 'unlisted',
'--sensitive',
'--spoiler-text', 'Spoiler!',
'--reply-to', '123a',
'--language', 'hrv',
]
mock_post.return_value = MockResponse({
'url': 'https://habunek.com/@ihabunek/1234567890'
})
console.run_command(app, user, 'post', args)
mock_post.assert_called_once_with(app, user, '/api/v1/statuses', {
'status': 'Hello world',
'media_ids[]': [],
'visibility': 'unlisted',
'sensitive': "true",
'spoiler_text': "Spoiler!",
'in_reply_to_id': '123a',
'language': 'hrv',
'scheduled_at': None,
}, headers={"Idempotency-Key": "up-the-irons"})
out, err = capsys.readouterr()
assert 'Toot posted' in out
assert 'https://habunek.com/@ihabunek/1234567890' in out
assert not err
def test_post_invalid_visibility(capsys):
args = ['Hello world', '--visibility', 'foo']
with pytest.raises(SystemExit):
console.run_command(app, user, 'post', args)
out, err = capsys.readouterr()
assert "invalid visibility value: 'foo'" in err
def test_post_invalid_media(capsys):
args = ['Hello world', '--media', 'does_not_exist.jpg']
with pytest.raises(SystemExit):
console.run_command(app, user, 'post', args)
out, err = capsys.readouterr()
assert "can't open 'does_not_exist.jpg'" in err
@mock.patch('toot.http.delete')
def test_delete(mock_delete, capsys):
console.run_command(app, user, 'delete', ['12321'])
mock_delete.assert_called_once_with(app, user, '/api/v1/statuses/12321')
out, err = capsys.readouterr()
assert 'Status deleted' in out
assert not err
@mock.patch('toot.http.get')
def test_timeline(mock_get, monkeypatch, capsys):
mock_get.return_value = MockResponse([{
'id': '111111111111111111',
'account': {
'display_name': 'Frank Zappa 🎸',
'acct': 'fz'
},
'created_at': '2017-04-12T15:53:18.174Z',
'content': "<p>The computer can't tell you the emotional story. It can give you the exact mathematical design, but what's missing is the eyebrows.</p>",
'reblog': None,
'in_reply_to_id': None,
'media_attachments': [],
}])
console.run_command(app, user, 'timeline', ['--once'])
mock_get.assert_called_once_with(app, user, '/api/v1/timelines/home?limit=10', None)
out, err = capsys.readouterr()
lines = out.split("\n")
assert "Frank Zappa 🎸" in lines[1]
assert "@fz" in lines[1]
assert "2017-04-12 15:53" in lines[1]
assert (
"The computer can't tell you the emotional story. It can give you the "
"exact mathematical design, but\nwhat's missing is the eyebrows." in out)
assert "111111111111111111" in lines[-3]
assert err == ""
@mock.patch('toot.http.get')
def test_timeline_with_re(mock_get, monkeypatch, capsys):
mock_get.return_value = MockResponse([{
'id': '111111111111111111',
'created_at': '2017-04-12T15:53:18.174Z',
'account': {
'display_name': 'Frank Zappa',
'acct': 'fz'
},
'reblog': {
'account': {
'display_name': 'Johnny Cash',
'acct': 'jc'
},
'content': "<p>The computer can't tell you the emotional story. It can give you the exact mathematical design, but what's missing is the eyebrows.</p>",
'media_attachments': [],
},
'in_reply_to_id': '111111111111111110',
'media_attachments': [],
}])
console.run_command(app, user, 'timeline', ['--once'])
mock_get.assert_called_once_with(app, user, '/api/v1/timelines/home?limit=10', None)
out, err = capsys.readouterr()
lines = out.split("\n")
assert "Frank Zappa" in lines[1]
assert "@fz" in lines[1]
assert "2017-04-12 15:53" in lines[1]
assert (
"The computer can't tell you the emotional story. It can give you the "
"exact mathematical design, but\nwhat's missing is the eyebrows." in out)
assert "111111111111111111" in lines[-3]
assert "↻ Reblogged @jc" in lines[-3]
assert err == ""
@mock.patch('toot.http.get')
def test_thread(mock_get, monkeypatch, capsys):
mock_get.side_effect = [
MockResponse({
'id': '111111111111111111',
'account': {
'display_name': 'Frank Zappa',
'acct': 'fz'
},
'created_at': '2017-04-12T15:53:18.174Z',
'content': "my response in the middle",
'reblog': None,
'in_reply_to_id': '111111111111111110',
'media_attachments': [],
}),
MockResponse({
'ancestors': [{
'id': '111111111111111110',
'account': {
'display_name': 'Frank Zappa',
'acct': 'fz'
},
'created_at': '2017-04-12T15:53:18.174Z',
'content': "original content",
'media_attachments': [],
'reblog': None,
'in_reply_to_id': None}],
'descendants': [{
'id': '111111111111111112',
'account': {
'display_name': 'Frank Zappa',
'acct': 'fz'
},
'created_at': '2017-04-12T15:53:18.174Z',
'content': "response message",
'media_attachments': [],
'reblog': None,
'in_reply_to_id': '111111111111111111'}],
}),
]
console.run_command(app, user, 'thread', ['111111111111111111'])
calls = [
mock.call(app, user, '/api/v1/statuses/111111111111111111'),
mock.call(app, user, '/api/v1/statuses/111111111111111111/context'),
]
mock_get.assert_has_calls(calls, any_order=False)
out, err = capsys.readouterr()
assert not err
# Display order
assert out.index('original content') < out.index('my response in the middle')
assert out.index('my response in the middle') < out.index('response message')
assert "original content" in out
assert "my response in the middle" in out
assert "response message" in out
assert "Frank Zappa" in out
assert "@fz" in out
assert "111111111111111111" in out
assert "In reply to" in out
@mock.patch('toot.http.get')
def test_reblogged_by(mock_get, monkeypatch, capsys):
mock_get.return_value = MockResponse([{
'display_name': 'Terry Bozzio',
'acct': 'bozzio@drummers.social',
}, {
'display_name': 'Dweezil',
'acct': 'dweezil@zappafamily.social',
}])
console.run_command(app, user, 'reblogged_by', ['111111111111111111'])
calls = [
mock.call(app, user, '/api/v1/statuses/111111111111111111/reblogged_by'),
]
mock_get.assert_has_calls(calls, any_order=False)
out, err = capsys.readouterr()
# Display order
expected = "\n".join([
"Terry Bozzio",
" @bozzio@drummers.social",
"Dweezil",
" @dweezil@zappafamily.social",
"",
])
assert out == expected
@mock.patch('toot.http.post')
def test_upload(mock_post, capsys):
mock_post.return_value = MockResponse({
'id': 123,
'url': 'https://bigfish.software/123/456',
'preview_url': 'https://bigfish.software/789/012',
'text_url': 'https://bigfish.software/345/678',
'type': 'image',
})
console.run_command(app, user, 'upload', [__file__])
mock_post.call_count == 1
args, kwargs = http.post.call_args
assert args == (app, user, '/api/v1/media')
assert isinstance(kwargs['files']['file'], io.BufferedReader)
out, err = capsys.readouterr()
assert "Uploading media" in out
assert __file__ in out
@mock.patch('toot.http.get')
def test_search(mock_get, capsys):
mock_get.return_value = MockResponse({
'hashtags': [
{
'history': [],
'name': 'foo',
'url': 'https://mastodon.social/tags/foo'
},
{
'history': [],
'name': 'bar',
'url': 'https://mastodon.social/tags/bar'
},
{
'history': [],
'name': 'baz',
'url': 'https://mastodon.social/tags/baz'
},
],
'accounts': [{
'acct': 'thequeen',
'display_name': 'Freddy Mercury'
}, {
'acct': 'thequeen@other.instance',
'display_name': 'Mercury Freddy'
}],
'statuses': [],
})
console.run_command(app, user, 'search', ['freddy'])
mock_get.assert_called_once_with(app, user, '/api/v2/search', {
'q': 'freddy',
'resolve': False,
})
out, err = capsys.readouterr()
assert "Hashtags:\n#foo, #bar, #baz" in out
assert "Accounts:" in out
assert "@thequeen Freddy Mercury" in out
assert "@thequeen@other.instance Mercury Freddy" in out
@mock.patch('toot.http.post')
@mock.patch('toot.http.get')
def test_follow(mock_get, mock_post, capsys):
mock_get.return_value = MockResponse([
{'id': 123, 'acct': 'blixa@other.acc'},
{'id': 321, 'acct': 'blixa'},
])
mock_post.return_value = MockResponse()
console.run_command(app, user, 'follow', ['blixa'])
mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'})
mock_post.assert_called_once_with(app, user, '/api/v1/accounts/321/follow')
out, err = capsys.readouterr()
assert "You are now following blixa" in out
@mock.patch('toot.http.get')
def test_follow_not_found(mock_get, capsys):
mock_get.return_value = MockResponse()
with pytest.raises(ConsoleError) as ex:
console.run_command(app, user, 'follow', ['blixa'])
mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'})
assert "Account not found" == str(ex.value)
@mock.patch('toot.http.post')
@mock.patch('toot.http.get')
def test_unfollow(mock_get, mock_post, capsys):
mock_get.return_value = MockResponse([
{'id': 123, 'acct': 'blixa@other.acc'},
{'id': 321, 'acct': 'blixa'},
])
mock_post.return_value = MockResponse()
console.run_command(app, user, 'unfollow', ['blixa'])
mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'})
mock_post.assert_called_once_with(app, user, '/api/v1/accounts/321/unfollow')
out, err = capsys.readouterr()
assert "You are no longer following blixa" in out
@mock.patch('toot.http.get')
def test_unfollow_not_found(mock_get, capsys):
mock_get.return_value = MockResponse([])
with pytest.raises(ConsoleError) as ex:
console.run_command(app, user, 'unfollow', ['blixa'])
mock_get.assert_called_once_with(app, user, '/api/v1/accounts/search', {'q': 'blixa'})
assert "Account not found" == str(ex.value)
@mock.patch('toot.http.get')
def test_whoami(mock_get, capsys):
mock_get.return_value = MockResponse({
'acct': 'ihabunek',
'avatar': 'https://files.mastodon.social/accounts/avatars/000/046/103/original/6a1304e135cac514.jpg?1491312434',
'avatar_static': 'https://files.mastodon.social/accounts/avatars/000/046/103/original/6a1304e135cac514.jpg?1491312434',
'created_at': '2017-04-04T13:23:09.777Z',
'display_name': 'Ivan Habunek',
'followers_count': 5,
'following_count': 9,
'header': '/headers/original/missing.png',
'header_static': '/headers/original/missing.png',
'id': 46103,
'locked': False,
'note': 'A developer.',
'statuses_count': 19,
'url': 'https://mastodon.social/@ihabunek',
'username': 'ihabunek'
})
console.run_command(app, user, 'whoami', [])
mock_get.assert_called_once_with(app, user, '/api/v1/accounts/verify_credentials')
out, err = capsys.readouterr()
out = uncolorize(out)
assert "@ihabunek Ivan Habunek" in out
assert "A developer." in out
assert "https://mastodon.social/@ihabunek" in out
assert "ID: 46103" in out
assert "Since: 2017-04-04 @ 13:23:09" in out
assert "Followers: 5" in out
assert "Following: 9" in out
assert "Statuses: 19" in out
@mock.patch('toot.http.get')
def test_notifications(mock_get, capsys):
mock_get.return_value = MockResponse([{
'id': '1',
'type': 'follow',
'created_at': '2019-02-16T07:01:20.714Z',
'account': {
'display_name': 'Frank Zappa',
'acct': 'frank@zappa.social',
},
}, {
'id': '2',
'type': 'mention',
'created_at': '2017-01-12T12:12:12.0Z',
'account': {
'display_name': 'Dweezil Zappa',
'acct': 'dweezil@zappa.social',
},
'status': {
'id': '111111111111111111',
'account': {
'display_name': 'Dweezil Zappa',
'acct': 'dweezil@zappa.social',
},
'created_at': '2017-04-12T15:53:18.174Z',
'content': "<p>We still have fans in 2017 @fan123</p>",
'reblog': None,
'in_reply_to_id': None,
'media_attachments': [],
},
}, {
'id': '3',
'type': 'reblog',
'created_at': '1983-11-03T03:03:03.333Z',
'account': {
'display_name': 'Terry Bozzio',
'acct': 'terry@bozzio.social',
},
'status': {
'id': '1234',
'account': {
'display_name': 'Zappa Fan',
'acct': 'fan123@zappa-fans.social'
},
'created_at': '1983-11-04T15:53:18.174Z',
'content': "<p>The Black Page, a masterpiece</p>",
'reblog': None,
'in_reply_to_id': None,
'media_attachments': [],
},
}, {
'id': '4',
'type': 'favourite',
'created_at': '1983-12-13T01:02:03.444Z',
'account': {
'display_name': 'Zappa Old Fan',
'acct': 'fan9@zappa-fans.social',
},
'status': {
'id': '1234',
'account': {
'display_name': 'Zappa Fan',
'acct': 'fan123@zappa-fans.social'
},
'created_at': '1983-11-04T15:53:18.174Z',
'content': "<p>The Black Page, a masterpiece</p>",
'reblog': None,
'in_reply_to_id': None,
'media_attachments': [],
},
}])
console.run_command(app, user, 'notifications', [])
mock_get.assert_called_once_with(app, user, '/api/v1/notifications', {'exclude_types[]': [], 'limit': 20})
out, err = capsys.readouterr()
out = uncolorize(out)
width = 100
assert not err
assert out == "\n".join([
"─" * width,
"Frank Zappa @frank@zappa.social now follows you",
"─" * width,
"Dweezil Zappa @dweezil@zappa.social mentioned you in",
"Dweezil Zappa @dweezil@zappa.social 2017-04-12 15:53",
"",
"We still have fans in 2017 @fan123",
"",
"ID 111111111111111111 ",
"─" * width,
"Terry Bozzio @terry@bozzio.social reblogged your status",
"Zappa Fan @fan123@zappa-fans.social 1983-11-04 15:53",
"",
"The Black Page, a masterpiece",
"",
"ID 1234 ",
"─" * width,
"Zappa Old Fan @fan9@zappa-fans.social favourited your status",
"Zappa Fan @fan123@zappa-fans.social 1983-11-04 15:53",
"",
"The Black Page, a masterpiece",
"",
"ID 1234 ",
"─" * width,
"",
])
@mock.patch('toot.http.get')
def test_notifications_empty(mock_get, capsys):
mock_get.return_value = MockResponse([])
console.run_command(app, user, 'notifications', [])
mock_get.assert_called_once_with(app, user, '/api/v1/notifications', {'exclude_types[]': [], 'limit': 20})
out, err = capsys.readouterr()
out = uncolorize(out)
assert not err
assert out == "No notification\n"
@mock.patch('toot.http.post')
def test_notifications_clear(mock_post, capsys):
console.run_command(app, user, 'notifications', ['--clear'])
out, err = capsys.readouterr()
out = uncolorize(out)
mock_post.assert_called_once_with(app, user, '/api/v1/notifications/clear')
assert not err
assert out == 'Cleared notifications\n'
def u(user_id, access_token="abc"):
username, instance = user_id.split("@")
return {
"instance": instance,
"username": username,
"access_token": access_token,
}
@mock.patch('toot.config.save_config')
@mock.patch('toot.config.load_config')
def test_logout(mock_load, mock_save, capsys):
mock_load.return_value = {
"users": {
"king@gizzard.social": u("king@gizzard.social"),
"lizard@wizard.social": u("lizard@wizard.social"),
},
"active_user": "king@gizzard.social",
}
console.run_command(app, user, "logout", ["king@gizzard.social"])
mock_save.assert_called_once_with({
'users': {
'lizard@wizard.social': u("lizard@wizard.social")
},
'active_user': None
})
out, err = capsys.readouterr()
assert "✓ User king@gizzard.social logged out" in out
@mock.patch('toot.config.save_config')
@mock.patch('toot.config.load_config')
def test_activate(mock_load, mock_save, capsys):
mock_load.return_value = {
"users": {
"king@gizzard.social": u("king@gizzard.social"),
"lizard@wizard.social": u("lizard@wizard.social"),
},
"active_user": "king@gizzard.social",
}
console.run_command(app, user, "activate", ["lizard@wizard.social"])
mock_save.assert_called_once_with({
'users': {
"king@gizzard.social": u("king@gizzard.social"),
'lizard@wizard.social': u("lizard@wizard.social")
},
'active_user': "lizard@wizard.social"
})
out, err = capsys.readouterr()
assert "✓ User lizard@wizard.social active" in out
| ihabunek/toot | tests/test_console.py | Python | gpl-3.0 | 20,144 |
import json
from pimlico.datatypes.corpora.json import JsonDocumentType
from pimlico.cli.browser.tools.formatter import DocumentBrowserFormatter
class JsonFormatter(DocumentBrowserFormatter):
DATATYPE = JsonDocumentType()
def format_document(self, doc):
return json.dumps(doc.data, indent=4)
| markgw/pimlico | src/python/pimlico/datatypes/corpora/formatters/json.py | Python | gpl-3.0 | 313 |
import sys
import os
import numpy as np
sys.path.append(os.path.join(os.getcwd(), ".."))
from run_utils import run_kmc, parse_input
from ParameterJuggler import ParameterSet
def main():
controller, path, app, cfg, n_procs = parse_input(sys.argv)
alpha_values = ParameterSet(cfg, "alpha\s*=\s*(.*)\;")
alpha_values.initialize_set(np.linspace(0.5, 2, 16))
heights = ParameterSet(cfg, "confiningSurfaceHeight\s*=\s*(.*)\;")
heights.initialize_set([20.])
diffusions = ParameterSet(cfg, "diffuse\s*=\s*(.*)\;")
diffusions.initialize_set([3])
controller.register_parameter_set(alpha_values)
controller.register_parameter_set(heights)
controller.register_parameter_set(diffusions)
controller.set_repeats(20)
controller.run(run_kmc, path, app, cfg, ask=False, n_procs=n_procs, shuffle=True)
if __name__ == "__main__":
main()
| jorgehog/Deux-kMC | scripts/autocorr/run.py | Python | gpl-3.0 | 881 |
__author__ = 'student'
import turtle
import math
turtle.shape('turtle')
def duga(r):
for i in range (20):
turtle.forward(r)
turtle.left(9)
def krug(r):
for i in range(72):
turtle.forward(r)
turtle.left(5)
turtle.penup()
turtle.forward(200) #Head
turtle.left(90)
turtle.pendown()
turtle.pencolor('black')
turtle.fillcolor('yellow')
turtle.begin_fill()
krug(2*math.pi*200/72)
turtle.end_fill()
turtle.penup()
turtle.goto(-75, 75) #L eye
turtle.pendown()
turtle.pencolor('black')
turtle.fillcolor('blue')
turtle.begin_fill()
krug(2)
turtle.end_fill()
turtle.penup()
turtle.goto(100, 75) #R eye
turtle.pendown()
turtle.pencolor('black')
turtle.fillcolor('blue')
turtle.begin_fill()
krug(2)
turtle.end_fill()
turtle.penup()
turtle.goto(-10, 50) #Nose
turtle.left(180)
turtle.pendown()
turtle.pencolor('black')
turtle.width(20)
turtle.forward(50)
turtle.penup()
turtle.goto(-65, -35) #Mouth
turtle.pendown()
turtle.pencolor('red')
turtle.width(10)
duga(10) | goha2/labs_2016 | past/sem 3/13.py | Python | gpl-3.0 | 1,074 |
import unittest
from aquarius.Aquarius import Aquarius
class ConsoleTestBase(unittest.TestCase):
def initialise_app_mock(self):
self.app = Aquarius(None, None, None)
def assert_called(self, method):
self.assertTrue(method.called) | jeroanan/Aquarius | tests/output/console/ConsoleTestBase.py | Python | gpl-3.0 | 257 |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from common.utils import encapsulate
from documents.models import Document
from documents.permissions import (PERMISSION_DOCUMENT_NEW_VERSION,
PERMISSION_DOCUMENT_CREATE)
from navigation.api import register_links, register_model_list_columns
from project_setup.api import register_setup
from .staging import StagingFile
from .models import (WebForm, StagingFolder, SourceTransformation,
WatchFolder)
from .widgets import staging_file_thumbnail
from .permissions import (PERMISSION_SOURCES_SETUP_VIEW,
PERMISSION_SOURCES_SETUP_EDIT, PERMISSION_SOURCES_SETUP_DELETE,
PERMISSION_SOURCES_SETUP_CREATE)
document_create_multiple = {'text': _(u'upload new documents'), 'view': 'document_create_multiple', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_CREATE], 'children_view_regex': [r'upload_interactive']}
document_create_siblings = {'text': _(u'clone metadata'), 'view': 'document_create_siblings', 'args': 'object.id', 'famfam': 'page_copy', 'permissions': [PERMISSION_DOCUMENT_CREATE]}
staging_file_preview = {'text': _(u'preview'), 'class': 'fancybox-noscaling', 'view': 'staging_file_preview', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'zoom', 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
staging_file_delete = {'text': _(u'delete'), 'view': 'staging_file_delete', 'args': ['source.source_type', 'source.pk', 'object.id'], 'famfam': 'delete', 'keep_query': True, 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION, PERMISSION_DOCUMENT_CREATE]}
setup_sources = {'text': _(u'sources'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW], 'children_view_regex': [r'setup_web_form', r'setup_staging_folder', r'setup_source_']}
setup_web_form_list = {'text': _(u'web forms'), 'view': 'setup_web_form_list', 'famfam': 'application_form', 'icon': 'application_form.png', 'children_classes': [WebForm], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_staging_folder_list = {'text': _(u'staging folders'), 'view': 'setup_staging_folder_list', 'famfam': 'folder_camera', 'children_classes': [StagingFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_watch_folder_list = {'text': _(u'watch folders'), 'view': 'setup_watch_folder_list', 'famfam': 'folder_magnify', 'children_classes': [WatchFolder], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
setup_source_edit = {'text': _(u'edit'), 'view': 'setup_source_edit', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_delete = {'text': _(u'delete'), 'view': 'setup_source_delete', 'args': ['source.source_type', 'source.pk'], 'famfam': 'application_form_delete', 'permissions': [PERMISSION_SOURCES_SETUP_DELETE]}
setup_source_create = {'text': _(u'add new source'), 'view': 'setup_source_create', 'args': 'source_type', 'famfam': 'application_form_add', 'permissions': [PERMISSION_SOURCES_SETUP_CREATE]}
setup_source_transformation_list = {'text': _(u'transformations'), 'view': 'setup_source_transformation_list', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_move_front', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_create = {'text': _(u'add transformation'), 'view': 'setup_source_transformation_create', 'args': ['source.source_type', 'source.pk'], 'famfam': 'shape_square_add', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_edit = {'text': _(u'edit'), 'view': 'setup_source_transformation_edit', 'args': 'transformation.pk', 'famfam': 'shape_square_edit', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
setup_source_transformation_delete = {'text': _(u'delete'), 'view': 'setup_source_transformation_delete', 'args': 'transformation.pk', 'famfam': 'shape_square_delete', 'permissions': [PERMISSION_SOURCES_SETUP_EDIT]}
source_list = {'text': _(u'Document sources'), 'view': 'setup_web_form_list', 'famfam': 'page_add', 'children_url_regex': [r'sources/setup'], 'permissions': [PERMISSION_SOURCES_SETUP_VIEW]}
upload_version = {'text': _(u'upload new version'), 'view': 'upload_version', 'args': 'object.pk', 'famfam': 'page_add', 'permissions': [PERMISSION_DOCUMENT_NEW_VERSION]}
register_links(StagingFile, [staging_file_delete])
register_links(SourceTransformation, [setup_source_transformation_edit, setup_source_transformation_delete])
#register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_create'], [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
#register_links(WebForm, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WebForm, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(WebForm, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create'], [setup_sources, setup_source_create], menu_name='sidebar')
#register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_web_form_list, setup_staging_folder_list], menu_name='form_header')
register_links(StagingFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
register_links(WatchFolder, [setup_web_form_list, setup_staging_folder_list, setup_watch_folder_list], menu_name='form_header')
register_links(WatchFolder, [setup_source_transformation_list, setup_source_edit, setup_source_delete])
# Document version
register_links(['document_version_list', 'upload_version', 'document_version_revert'], [upload_version], menu_name='sidebar')
register_links(['setup_source_transformation_create', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_list'], [setup_source_transformation_create], menu_name='sidebar')
source_views = ['setup_web_form_list', 'setup_staging_folder_list', 'setup_watch_folder_list', 'setup_source_edit', 'setup_source_delete', 'setup_source_create', 'setup_source_transformation_list', 'setup_source_transformation_edit', 'setup_source_transformation_delete', 'setup_source_transformation_create']
register_model_list_columns(StagingFile, [
{'name':_(u'thumbnail'), 'attribute':
encapsulate(lambda x: staging_file_thumbnail(x))
},
])
register_setup(setup_sources)
register_links(['document_list_recent', 'document_list', 'document_create', 'document_create_multiple', 'upload_interactive', 'staging_file_delete'], [document_create_multiple], menu_name='secondary_menu')
register_links(Document, [document_create_siblings])
| rosarior/mayan | apps/sources/__init__.py | Python | gpl-3.0 | 7,263 |
VERSION = "0.12beta4"
VERSION_NAME = "Anderssen"
| btrent/knave | pychess/__init__.py | Python | gpl-3.0 | 49 |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""
Raspymc is a multimedia centre exposed via a http server built with bottlepy
Copyright (C) 2013 Giancarlo Fringuello
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, inspect, ConfigParser, pickle
from utils import *
from logger import*
from track_obj import *
CNF_SERVER_PATH = sys.path[0]
CNF_FOLDER_PATH = ""
CNF_PLAYLIST_PATH = CNF_SERVER_PATH + "/config/playlist.pkl"
CNF_FOLDER_PATH = CNF_SERVER_PATH + "/config/"
CNF_CONFIG_FILE = CNF_FOLDER_PATH + "config.ini"
#
# Loads the saved playlist from file
def get_playlist():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()")
l_playlist = []
try:
with open(CNF_PLAYLIST_PATH, 'rb') as l_input:
l_playlist = pickle.load(l_input)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
return l_playlist
def store_playlist(p_list):
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()")
try:
with open(CNF_PLAYLIST_PATH, 'wb') as l_output:
pickle.dump(p_list, l_output, pickle.HIGHEST_PROTOCOL)
except:
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::store_playlist()", "unexisting playlist file: " + CNF_PLAYLIST_PATH)
#
# Loads the configuration from file
def get_folder_path():
log(LOG_INFO, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()")
global CNF_FOLDER_PATH
global CNF_PLAYLIST_PATH
global SERVER_PATH
l_config_parser = ConfigParser.ConfigParser()
l_clean_configuration = False
if not os.path.isdir(CNF_FOLDER_PATH): # if config directory does not exist, create it
os.makedirs(CNF_FOLDER_PATH)
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", CNF_FOLDER_PATH + " did not exist, it has been created")
if os.path.isfile(CNF_CONFIG_FILE):
try:
l_config_parser.read(CNF_CONFIG_FILE)
if l_config_parser.has_section("PATH"):
if l_config_parser.has_option("PATH", "CNF_FOLDER_PATH"):
CNF_FOLDER_PATH = l_config_parser.get("PATH","CNF_FOLDER_PATH")
else:
l_clean_configuration = True
else:
# if section does not exist
l_clean_configuration = True
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "unable to load CNF_FOLDER_PATH, using home as default, new config.ini will be generated.")
except:
# if unable to read file (e.g. file damaged)
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "exception: unable to load CNF_FOLDER_PATH from " + CNF_CONFIG_FILE + ", using home path as default, new config.ini will be generated.")
else:
l_clean_configuration = True
log(LOG_WARNING, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "no configuration file found, new config.ini will be generated.")
if l_clean_configuration:
# cleanup config file
for l_section in l_config_parser.sections():
l_config_parser.remove_section(l_section)
l_config_parser.add_section("PATH")
l_config_parser.set("PATH", "CNF_FOLDER_PATH", os.path.expanduser("~"))
l_config_parser.write(file(CNF_CONFIG_FILE, 'w'))
if "" == CNF_FOLDER_PATH:
CNF_FOLDER_PATH = os.path.expanduser("~")
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_FOLDER_PATH = " + CNF_FOLDER_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_PLAYLIST_PATH = " + CNF_PLAYLIST_PATH)
log(LOG_VERBOSE, inspect.currentframe().f_lineno, "conf_manager.py::load_configuration()", "CNF_SERVER_PATH = " + CNF_SERVER_PATH)
return CNF_FOLDER_PATH
def get_server_path():
return SERVER_PATH
def get_playlist_path():
return CNF_PLAYLIST_PATH | GiancarloF/raspymc_server | core/conf_manager.py | Python | gpl-3.0 | 4,528 |
# -*- coding: utf-8 -*-
"""#Versión de la calculadora."""
version = '1.0.0'
| Ryszard-Ps/rsr-calculator | rsr_calculator/version.py | Python | gpl-3.0 | 77 |
import itertools as it
from operator import attrgetter
from flask import Markup, render_template, Blueprint, redirect, url_for, flash, abort, request
from sqlalchemy import desc
from .. import Execution, Stage, Task, TaskStatus
from ..job.JobManager import JobManager
from . import filters
from ..graph.draw import draw_task_graph, draw_stage_graph
def gen_bprint(cosmos_app):
session = cosmos_app.session
def get_execution(id):
return session.query(Execution).filter_by(id=id).one()
bprint = Blueprint('cosmos', __name__, template_folder='templates', static_folder='static',
static_url_path='/cosmos/static')
filters.add_filters(bprint)
@bprint.route('/execution/delete/<int:id>')
def execution_delete(id):
e = get_execution(id)
e.delete(delete_files=True)
flash('Deleted %s' % e)
return redirect(url_for('cosmos.index'))
@bprint.route('/')
def index():
executions = session.query(Execution).order_by(desc(Execution.created_on)).all()
session.expire_all()
return render_template('cosmos/index.html', executions=executions)
@bprint.route('/')
def home():
return index()
@bprint.route('/execution/<name>/')
# @bprint.route('/execution/<int:id>/')
def execution(name):
execution = session.query(Execution).filter_by(name=name).one()
return render_template('cosmos/execution.html', execution=execution)
@bprint.route('/execution/<execution_name>/<stage_name>/')
def stage(execution_name, stage_name):
ex = session.query(Execution).filter_by(name=execution_name).one()
stage = session.query(Stage).filter_by(execution_id=ex.id, name=stage_name).one()
if stage is None:
return abort(404)
submitted = filter(lambda t: t.status == TaskStatus.submitted, stage.tasks)
jm = JobManager(cosmos_app.get_submit_args)
f = attrgetter('drm')
drm_statuses = {}
for drm, tasks in it.groupby(sorted(submitted, key=f), f):
drm_statuses.update(jm.drms[drm].drm_statuses(list(tasks)))
return render_template('cosmos/stage.html', stage=stage, drm_statuses=drm_statuses)
# x=filter(lambda t: t.status == TaskStatus.submitted, stage.tasks))
@bprint.route('/execution/<int:ex_id>/stage/<stage_name>/delete/')
def stage_delete(ex_id, stage_name):
s = session.query(Stage).filter(Stage.execution_id == ex_id, Stage.name == stage_name).one()
flash('Deleted %s' % s)
ex_url = s.execution.url
s.delete(delete_files=False)
return redirect(ex_url)
# @bprint.route('/task/<int:id>/')
# def task(id):
# task = session.query(Task).get(id)
# if task is None:
# return abort(404)
# return redirect(url_for('cosmos.task_friendly', ex_name=task.execution.name, stage_name=task.stage.name, task_id=task.id))
# @bprint.route('/execution/<ex_name>/<stage_name>/task/')
# def task(ex_name, stage_name):
# # resource_usage = [(category, field, getattr(task, field), profile_help[field]) for category, fields in
# # task.profile_fields for field in fields]
# assert request.method == 'GET'
# tags = request.args
# ex = session.query(Execution).filter_by(name=ex_name).one()
# stage = session.query(Stage).filter_by(execution=ex, name=stage_name).one()
# task = session.query(Task).filter_by(stage=stage, tags=tags).one()
# if task is None:
# return abort(404)
# resource_usage = [(field, getattr(task, field)) for field in task.profile_fields]
# return render_template('cosmos/task.html', task=task, resource_usage=resource_usage)
@bprint.route('/execution/<ex_name>/<stage_name>/task/<task_id>')
def task(ex_name, stage_name, task_id):
# resource_usage = [(category, field, getattr(task, field), profile_help[field]) for category, fields in
# task.profile_fields for field in fields]
task = session.query(Task).get(task_id)
if task is None:
return abort(404)
resource_usage = [(field, getattr(task, field)) for field in task.profile_fields]
return render_template('cosmos/task.html', task=task, resource_usage=resource_usage)
@bprint.route('/execution/<int:id>/taskgraph/<type>/')
def taskgraph(id, type):
from ..graph.draw import pygraphviz_available
ex = get_execution(id)
if pygraphviz_available:
if type == 'task':
svg = Markup(draw_task_graph(ex.task_graph(), url=True))
else:
svg = Markup(draw_stage_graph(ex.stage_graph(), url=True))
else:
svg = 'Pygraphviz not installed, cannot visualize. (Usually: apt-get install graphviz && pip install pygraphviz)'
return render_template('cosmos/taskgraph.html', execution=ex, type=type,
svg=svg)
# @bprint.route('/execution/<int:id>/taskgraph/svg/<type>/')
# def taskgraph_svg(id, type, ):
# e = get_execution(id)
#
# if type == 'task':
# return send_file(io.BytesIO(taskgraph_.tasks_to_image(e.tasks)), mimetype='image/svg+xml')
# else:
# return send_file(io.BytesIO(stages_to_image(e.stages)), mimetype='image/svg+xml')
#
return bprint
profile_help = dict(
# time
system_time='Amount of time that this process has been scheduled in kernel mode',
user_time='Amount of time that this process has been scheduled in user mode. This includes guest time, guest_time (time spent running a virtual CPU, see below), so that applications that are not aware of the guest time field do not lose that time from their calculations',
cpu_time='system_time + user_time',
wall_time='Elapsed real (wall clock) time used by the process.',
percent_cpu='(cpu_time / wall_time) * 100',
# memory
avg_rss_mem='Average resident set size (Kb)',
max_rss_mem='Maximum resident set size (Kb)',
single_proc_max_peak_rss='Maximum single process rss used (Kb)',
avg_virtual_mem='Average virtual memory used (Kb)',
max_virtual_mem='Maximum virtual memory used (Kb)',
single_proc_max_peak_virtual_mem='Maximum single process virtual memory used (Kb)',
major_page_faults='The number of major faults the process has made which have required loading a memory page from disk',
minor_page_faults='The number of minor faults the process has made which have not required loading a memory page from disk',
avg_data_mem='Average size of data segments (Kb)',
max_data_mem='Maximum size of data segments (Kb)',
avg_lib_mem='Average library memory size (Kb)',
max_lib_mem='Maximum library memory size (Kb)',
avg_locked_mem='Average locked memory size (Kb)',
max_locked_mem='Maximum locked memory size (Kb)',
avg_num_threads='Average number of threads',
max_num_threads='Maximum number of threads',
avg_pte_mem='Average page table entries size (Kb)',
max_pte_mem='Maximum page table entries size (Kb)',
#io
nonvoluntary_context_switches='Number of non voluntary context switches',
voluntary_context_switches='Number of voluntary context switches',
block_io_delays='Aggregated block I/O delays',
avg_fdsize='Average number of file descriptor slots allocated',
max_fdsize='Maximum number of file descriptor slots allocated',
#misc
num_polls='Number of times the resource usage statistics were polled from /proc',
names='Names of all descendnt processes (there is always a python process for the profile_working.py script)',
num_processes='Total number of descendant processes that were spawned',
pids='Pids of all the descendant processes',
exit_status='Exit status of the primary process being profiled',
SC_CLK_TCK='sysconf(_SC_CLK_TCK), an operating system variable that is usually equal to 100, or centiseconds',
)
| yassineS/COSMOS-2.0 | cosmos/web/views.py | Python | gpl-3.0 | 8,041 |
__problem_title__ = "Maximum Integer Partition Product"
__problem_url___ = "https://projecteuler.net/problem=374"
__problem_description__ = "An integer partition of a number is a way of writing as a sum of " \
"positive integers. Partitions that differ only in the order of their " \
"summands are considered the same. A partition of into is a partition " \
"of in which every part occurs at most once. The partitions of 5 into " \
"distinct parts are: 5, 4+1 and 3+2. Let f( ) be the maximum product " \
"of the parts of any such partition of into distinct parts and let m( " \
") be the number of elements of any such partition of with that " \
"product. So f(5)=6 and m(5)=2. For =10 the partition with the largest " \
"product is 10=2+3+5, which gives f(10)=30 and m(10)=3. And their " \
"product, f(10)·m(10) = 30·3 = 90 It can be verified that ∑f( )·m( ) " \
"for 1 ≤ ≤ 100 = 1683550844462. Find ∑f( )·m( ) for 1 ≤ ≤ 10 . Give " \
"your answer modulo 982451653, the 50 millionth prime."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| jrichte43/ProjectEuler | Problem-0374/solutions.py | Python | gpl-3.0 | 1,665 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Check local CleanerML files as a security measure
"""
from __future__ import absolute_import, print_function
from bleachbit import _, _p
import bleachbit
from bleachbit.CleanerML import list_cleanerml_files
from bleachbit.Options import options
import hashlib
import logging
import os
import random
import sys
logger = logging.getLogger(__name__)
KNOWN = 1
CHANGED = 2
NEW = 3
def cleaner_change_dialog(changes, parent):
"""Present a dialog regarding the change of cleaner definitions"""
def toggled(cell, path, model):
"""Callback for clicking the checkbox"""
__iter = model.get_iter_from_string(path)
value = not model.get_value(__iter, 0)
model.set(__iter, 0, value)
import pygtk
pygtk.require('2.0')
import gtk
dialog = gtk.Dialog(title=_("Security warning"),
parent=parent,
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
dialog.set_default_size(600, 500)
# create warning
warnbox = gtk.HBox()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_DIALOG)
warnbox.pack_start(image, False)
# TRANSLATORS: Cleaner definitions are XML data files that define
# which files will be cleaned.
label = gtk.Label(
_("These cleaner definitions are new or have changed. Malicious definitions can damage your system. If you do not trust these changes, delete the files or quit."))
label.set_line_wrap(True)
warnbox.pack_start(label, True)
dialog.vbox.pack_start(warnbox, False)
# create tree view
import gobject
liststore = gtk.ListStore(gobject.TYPE_BOOLEAN, gobject.TYPE_STRING)
treeview = gtk.TreeView(model=liststore)
renderer0 = gtk.CellRendererToggle()
renderer0.set_property('activatable', True)
renderer0.connect('toggled', toggled, liststore)
# TRANSLATORS: This is the column label (header) in the tree view for the
# security dialog
treeview.append_column(
gtk.TreeViewColumn(_p('column_label', 'Delete'), renderer0, active=0))
renderer1 = gtk.CellRendererText()
# TRANSLATORS: This is the column label (header) in the tree view for the
# security dialog
treeview.append_column(
gtk.TreeViewColumn(_p('column_label', 'Filename'), renderer1, text=1))
# populate tree view
for change in changes:
liststore.append([False, change[0]])
# populate dialog with widgets
scrolled_window = gtk.ScrolledWindow()
scrolled_window.add_with_viewport(treeview)
dialog.vbox.pack_start(scrolled_window)
dialog.add_button(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
dialog.add_button(gtk.STOCK_QUIT, gtk.RESPONSE_CLOSE)
# run dialog
dialog.show_all()
while True:
if gtk.RESPONSE_ACCEPT != dialog.run():
sys.exit(0)
delete = []
for row in liststore:
b = row[0]
path = row[1]
if b:
delete.append(path)
if 0 == len(delete):
# no files selected to delete
break
import GuiBasic
if not GuiBasic.delete_confirmation_dialog(parent, mention_preview=False):
# confirmation not accepted, so do not delete files
continue
for path in delete:
logger.info("deleting unrecognized CleanerML '%s'", path)
os.remove(path)
break
dialog.destroy()
def hashdigest(string):
"""Return hex digest of hash for a string"""
# hashlib requires Python 2.5
return hashlib.sha512(string).hexdigest()
class RecognizeCleanerML:
"""Check local CleanerML files as a security measure"""
def __init__(self, parent_window=None):
self.parent_window = parent_window
try:
self.salt = options.get('hashsalt')
except bleachbit.NoOptionError:
self.salt = hashdigest(str(random.random()))
options.set('hashsalt', self.salt)
self.__scan()
def __recognized(self, pathname):
"""Is pathname recognized?"""
with open(pathname) as f:
body = f.read()
new_hash = hashdigest(self.salt + body)
try:
known_hash = options.get_hashpath(pathname)
except bleachbit.NoOptionError:
return NEW, new_hash
if new_hash == known_hash:
return KNOWN, new_hash
return CHANGED, new_hash
def __scan(self):
"""Look for files and act accordingly"""
changes = []
for pathname in sorted(list_cleanerml_files(local_only=True)):
pathname = os.path.abspath(pathname)
(status, myhash) = self.__recognized(pathname)
if NEW == status or CHANGED == status:
changes.append([pathname, status, myhash])
if len(changes) > 0:
cleaner_change_dialog(changes, self.parent_window)
for change in changes:
pathname = change[0]
myhash = change[2]
logger.info("remembering CleanerML file '%s'", pathname)
if os.path.exists(pathname):
options.set_hashpath(pathname, myhash)
| brahmastra2016/bleachbit | bleachbit/RecognizeCleanerML.py | Python | gpl-3.0 | 5,942 |
# coding: utf8
# qtplayer.py
# 10/1/2014 jichi
__all__ = 'HiddenPlayer',
from PySide.QtCore import QUrl
from sakurakit.skdebug import dprint
class _HiddenPlayer:
def __init__(self, parent):
self.parent = parent # QWidget
self._webView = None # QWebView
@property
def webView(self):
if not self._webView:
dprint("create web view")
from PySide.QtWebKit import QWebView
self._webView = QWebView(self.parent)
update_web_settings(self._webView.settings())
self._webView.resize(0, 0) # zero size
return self._webView
def setParent(self, value):
self.parent = value
if self._webView:
self._webView.setParent(value)
def stop(self):
if self._webView:
self._webView.stop()
class HiddenPlayer(object):
def __init__(self, parent=None):
self.__d = _HiddenPlayer(parent)
def parentWidget(self): return self.__d.parent
def setParentWidget(self, value): self.__d.setParent(value)
def webView(self): return self.__d.webView
def stop(self):
self.__d.stop()
def play(self, url, **kwargs):
"""
@param url str or QUrl
"""
if not isinstance(url, QUrl):
url = QUrl(url)
for k,v in kwargs.iteritems():
#url.addQueryItem(k, v)
if not isinstance(v, basestring):
v = "%s" % v
url.addEncodedQueryItem(k, QUrl.toPercentEncoding(v))
self.__d.webView.load(url)
def update_web_settings(settings=None):
"""
@param settings QWebSettings or None
"""
from PySide.QtWebKit import QWebSettings
ws = settings or QWebSettings.globalSettings()
ws.setAttribute(QWebSettings.PluginsEnabled, True)
ws.setAttribute(QWebSettings.JavaEnabled, True)
ws.setAttribute(QWebSettings.DnsPrefetchEnabled, True) # better performance
ws.setAttribute(QWebSettings.AutoLoadImages, False) # do NOT load images
#ws.setAttribute(QWebSettings.JavascriptCanOpenWindows, True)
#ws.setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
#ws.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
#ws.setAttribute(QWebSettings.OfflineStorageDatabaseEnabled, True)
#ws.setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
#ws.setAttribute(QWebSettings.LocalStorageEnabled, True)
#ws.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
#ws.setAttribute(QWebSettings.ZoomTextOnly, False)
#ws.setDefaultTextEncoding("SHIFT-JIS")
#ws.setDefaultTextEncoding("EUC-JP")
#ws.setLocalStoragePath(G_PATH_CACHES)
#QWebSettings.setIconDatabasePath(G_PATH_CACHES)
#QWebSettings.setOfflineStoragePath(G_PATH_CACHES)
#QWebSettings.setOfflineWebApplicationCachePath(G_PATH_CACHES)
# See: http://webkit.org/blog/427/webkit-page-cache-i-the-basics/
ws.setMaximumPagesInCache(10) # do not cache lots of pages
# EOF
| Dangetsu/vnr | Frameworks/Sakura/py/libs/qtbrowser/qtplayer.py | Python | gpl-3.0 | 2,795 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# repolist.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from slpkg.messages import Msg
from slpkg.repositories import Repo
from slpkg.__metadata__ import MetaData as _meta_
class RepoList:
"""List of repositories
"""
def __init__(self):
self.meta = _meta_
self.green = self.meta.color["GREEN"]
self.red = self.meta.color["RED"]
self.grey = self.meta.color["GREY"]
self.endc = self.meta.color["ENDC"]
self.msg = Msg()
self.all_repos = Repo().default_repository()
self.all_repos["slack"] = Repo().slack()
self.all_repos.update(Repo().custom_repository())
def repos(self):
"""View or enabled or disabled repositories
"""
def_cnt, cus_cnt = 0, 0
self.msg.template(78)
print("{0}{1}{2}{3}{4}{5}{6}".format(
"| Repo id", " " * 2,
"Repo URL", " " * 44,
"Default", " " * 3,
"Status"))
self.msg.template(78)
for repo_id, repo_URL in sorted(self.all_repos.items()):
status, COLOR = "disabled", self.red
default = "yes"
if len(repo_URL) > 49:
repo_URL = repo_URL[:48] + "~"
if repo_id in self.meta.repositories:
def_cnt += 1
status, COLOR = "enabled", self.green
if repo_id not in self.meta.default_repositories:
cus_cnt += 1
default = "no"
print(" {0}{1}{2}{3}{4}{5}{6}{7:>8}{8}".format(
repo_id, " " * (9 - len(repo_id)),
repo_URL, " " * (52 - len(repo_URL)),
default, " " * (8 - len(default)),
COLOR, status, self.endc))
print("\nRepositories summary")
print("=" * 79)
print(f"{self.grey}{def_cnt}/{len(self.all_repos)} enabled default "
f"repositories and {cus_cnt} custom.")
print("Edit the file '/etc/slpkg/repositories.conf' for enable "
"and disable default\nrepositories or run 'slpkg "
f"repo-enable' command.{self.endc}")
| dslackw/slpkg | slpkg/repolist.py | Python | gpl-3.0 | 2,957 |
from bs4 import BeautifulSoup
import requests, re
n = int(input('How many sudoku\'s do you want to download (between 1 and 10)? '))
if n < 1 or n > 10:
die()
url = 'http://show.websudoku.com/?level=4'
for i in range(n):
page = requests.get(url)
page.raise_for_status()
rawPage=page.text
sudokuid = int(re.search(r'\d+', rawPage.split('\n')[20]).group())
soup = BeautifulSoup(rawPage,'html.parser')
sudokuTable = soup.findAll(True, {'class':['s0', 'd0']})
sudoku = [ [(int(item['value']) if item.get('class')[0] == 's0' else 0) for item in sudokuTable][i:i+9] for i in range(0, 81, 9) ]
filename = 'sudokus/sudoku_%i.txt'%sudokuid
sudokufile = open(filename, 'w')
for line in sudoku:
sudokufile.write( str(line).replace(',',' ').replace('[','').replace(']',' ') + '\n' )
input('Done!') | cawc/sudokusolve | grabsudoku.py | Python | gpl-3.0 | 804 |
# -*- coding: utf-8 -*-
# Roastero, released under GPLv3
import os
import json
import time
import functools
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from openroast import tools
from openroast.views import customqtwidgets
from openroast import utils as utils
class RecipeEditor(QtWidgets.QDialog):
def __init__(self, recipeLocation=None):
super(RecipeEditor, self).__init__()
# Define main window for the application.
self.setWindowTitle('Openroast')
self.setMinimumSize(800, 600)
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.create_ui()
self.recipe = {}
self.recipe["steps"] = [{'fanSpeed': 5, 'targetTemp': 150,
'sectionTime': 0}]
if recipeLocation:
self.load_recipe_file(recipeLocation)
self.preload_recipe_information()
else:
self.preload_recipe_steps(self.recipeSteps)
def create_ui(self):
"""A method used to create the basic ui for the Recipe Editor Window"""
# Create main layout for window.
self.layout = QtWidgets.QGridLayout(self)
self.layout.setRowStretch(1, 3)
# Create input fields.
self.create_input_fields()
self.layout.addLayout(self.inputFieldLayout, 0, 0, 1, 2)
# Create big edit boxes.
self.create_big_edit_boxes()
self.layout.addLayout(self.bigEditLayout, 1, 0, 1, 2)
# Create Bottom Buttons.
self.create_bottom_buttons()
self.layout.addLayout(self.bottomButtonLayout, 2, 0, 1, 2)
def create_input_fields(self):
"""Creates all of the UI components for the top of the Recipe Editor
Window."""
# Create layout for section.
self.inputFieldLayout = QtWidgets.QGridLayout()
# Create labels for fields.
recipeNameLabel = QtWidgets.QLabel("Recipe Name: ")
recipeCreatorLabel = QtWidgets.QLabel("Created by: ")
recipeRoastTypeLabel = QtWidgets.QLabel("Roast Type: ")
beanRegionLabel = QtWidgets.QLabel("Bean Region: ")
beanCountryLabel = QtWidgets.QLabel("Bean Country: ")
beanLinkLabel = QtWidgets.QLabel("Bean Link: ")
beanStoreLabel = QtWidgets.QLabel("Bean Store Name: ")
# Create input fields.
self.recipeName = QtWidgets.QLineEdit()
self.recipeCreator = QtWidgets.QLineEdit()
self.recipeRoastType = QtWidgets.QLineEdit()
self.beanRegion = QtWidgets.QLineEdit()
self.beanCountry = QtWidgets.QLineEdit()
self.beanLink = QtWidgets.QLineEdit()
self.beanStore = QtWidgets.QLineEdit()
# Remove focus from input boxes.
self.recipeName.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeCreator.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeRoastType.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanRegion.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanCountry.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanLink.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanStore.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
# Add objects to the inputFieldLayout
self.inputFieldLayout.addWidget(recipeNameLabel, 0, 0)
self.inputFieldLayout.addWidget(self.recipeName, 0, 1)
self.inputFieldLayout.addWidget(recipeCreatorLabel, 1, 0)
self.inputFieldLayout.addWidget(self.recipeCreator, 1, 1)
self.inputFieldLayout.addWidget(recipeRoastTypeLabel, 2, 0)
self.inputFieldLayout.addWidget(self.recipeRoastType, 2, 1)
self.inputFieldLayout.addWidget(beanRegionLabel, 3, 0)
self.inputFieldLayout.addWidget(self.beanRegion, 3, 1)
self.inputFieldLayout.addWidget(beanCountryLabel, 4, 0)
self.inputFieldLayout.addWidget(self.beanCountry, 4, 1)
self.inputFieldLayout.addWidget(beanLinkLabel, 5, 0)
self.inputFieldLayout.addWidget(self.beanLink, 5, 1)
self.inputFieldLayout.addWidget(beanStoreLabel, 6, 0)
self.inputFieldLayout.addWidget(self.beanStore, 6, 1)
def create_big_edit_boxes(self):
"""Creates the Bottom section of the Recipe Editor Window. This method
creates the Description box and calls another method to make the
recipe steps table."""
# Create big edit box layout.
self.bigEditLayout = QtWidgets.QGridLayout()
# Create labels for the edit boxes.
recipeDescriptionBoxLabel = QtWidgets.QLabel("Description: ")
recipeStepsLabel = QtWidgets.QLabel("Steps: ")
# Create widgets.
self.recipeDescriptionBox = QtWidgets.QTextEdit()
self.recipeSteps = self.create_steps_spreadsheet()
# Add widgets to layout.
self.bigEditLayout.addWidget(recipeDescriptionBoxLabel, 0, 0)
self.bigEditLayout.addWidget(self.recipeDescriptionBox, 1, 0)
self.bigEditLayout.addWidget(recipeStepsLabel, 0, 1)
self.bigEditLayout.addWidget(self.recipeSteps, 1, 1)
def create_bottom_buttons(self):
"""Creates the button panel on the bottom of the Recipe Editor
Window."""
# Set bottom button layout.
self.bottomButtonLayout = QtWidgets.QHBoxLayout()
self.bottomButtonLayout.setSpacing(0)
# Create buttons.
self.saveButton = QtWidgets.QPushButton("SAVE")
self.closeButton = QtWidgets.QPushButton("CLOSE")
# Assign object names to the buttons.
self.saveButton.setObjectName("smallButton")
self.saveButton.clicked.connect(self.save_recipe)
self.closeButton.setObjectName("smallButton")
self.closeButton.clicked.connect(self.close_edit_window)
# Create Spacer.
self.spacer = QtWidgets.QWidget()
self.spacer.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# Add widgets to the layout.
self.bottomButtonLayout.addWidget(self.spacer)
self.bottomButtonLayout.addWidget(self.closeButton)
self.bottomButtonLayout.addWidget(self.saveButton)
def create_steps_spreadsheet(self):
"""Creates Recipe Steps table. It does not populate the table in this
method."""
recipeStepsTable = QtWidgets.QTableWidget()
recipeStepsTable.setShowGrid(False)
recipeStepsTable.setAlternatingRowColors(True)
recipeStepsTable.setCornerButtonEnabled(False)
recipeStepsTable.horizontalHeader().setSectionResizeMode(1)
recipeStepsTable.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
# Steps spreadsheet
recipeStepsTable.setColumnCount(4)
recipeStepsTable.setHorizontalHeaderLabels(["Temperature",
"Fan Speed", "Section Time", "Modify"])
return recipeStepsTable
def close_edit_window(self):
"""Method used to close the Recipe Editor Window."""
self.close()
def preload_recipe_steps(self, recipeStepsTable):
"""Method that just calls load_recipe_steps() with a table specified and
uses the pre-existing loaded recipe steps in the object."""
steps = self.recipe["steps"]
self.load_recipe_steps(recipeStepsTable, steps)
def load_recipe_steps(self, recipeStepsTable, steps):
"""Takes two arguments. One being the table and the second being the
rows you'd like to add. It does not clear the table and simply adds the
rows on the bottom if there are exiting rows."""
# Create spreadsheet choices
fanSpeedChoices = [str(x) for x in range(1,10)]
targetTempChoices = ["Cooling"] + [str(x) for x in range(150, 551, 10)]
# loop through recipe and load each step
for row in range(len(steps)):
recipeStepsTable.insertRow(recipeStepsTable.rowCount())
# Temperature Value
sectionTempWidget = customqtwidgets.ComboBoxNoWheel()
sectionTempWidget.setObjectName("recipeEditCombo")
sectionTempWidget.addItems(targetTempChoices)
sectionTempWidget.insertSeparator(1)
if 'targetTemp' in steps[row]:
sectionTemp = steps[row]["targetTemp"]
# Accommodate for temperature not fitting in 10 increment list
if str(steps[row]["targetTemp"]) in targetTempChoices:
sectionTempWidget.setCurrentIndex(
targetTempChoices.index(
str(steps[row]["targetTemp"]))+1)
else:
roundedNumber = steps[row]["targetTemp"] - (steps[row]["targetTemp"] % 10)
sectionTempWidget.insertItem(targetTempChoices.index(str(roundedNumber))+2, str(steps[row]["targetTemp"]))
sectionTempWidget.setCurrentIndex(targetTempChoices.index(str(roundedNumber))+2)
elif 'cooling' in steps[row]:
sectionTemp = "Cooling"
sectionTempWidget.setCurrentIndex(targetTempChoices.index("Cooling"))
# Time Value
sectionTimeWidget = customqtwidgets.TimeEditNoWheel()
sectionTimeWidget.setObjectName("recipeEditTime")
sectionTimeWidget.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
sectionTimeWidget.setDisplayFormat("mm:ss")
# Set QTimeEdit to the right time from recipe
sectionTimeStr = time.strftime("%M:%S", time.gmtime(steps[row]["sectionTime"]))
sectionTime = QtCore.QTime().fromString(sectionTimeStr, "mm:ss")
sectionTimeWidget.setTime(sectionTime)
# Fan Speed Value
sectionFanSpeedWidget = customqtwidgets.ComboBoxNoWheel()
sectionFanSpeedWidget.setObjectName("recipeEditCombo")
sectionFanSpeedWidget.addItems(fanSpeedChoices)
sectionFanSpeedWidget.setCurrentIndex(fanSpeedChoices.index(str(steps[row]["fanSpeed"])))
# Modify Row field
upArrow = QtWidgets.QPushButton()
upArrow.setObjectName("upArrow")
#upArrow.setIcon(QtGui.QIcon('static/images/upSmall.png'))
upArrow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/upSmall.png'
)
)
)
upArrow.clicked.connect(functools.partial(self.move_recipe_step_up, row))
downArrow = QtWidgets.QPushButton()
downArrow.setObjectName("downArrow")
#downArrow.setIcon(QtGui.QIcon('static/images/downSmall.png'))
downArrow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/downSmall.png'
)
)
)
downArrow.clicked.connect(functools.partial(self.move_recipe_step_down, row))
deleteRow = QtWidgets.QPushButton()
# deleteRow.setIcon(QtGui.QIcon('static/images/delete.png'))
deleteRow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/delete.png'
)
)
)
deleteRow.setObjectName("deleteRow")
deleteRow.clicked.connect(functools.partial(self.delete_recipe_step, row))
insertRow = QtWidgets.QPushButton()
# insertRow.setIcon(QtGui.QIcon('static/images/plus.png'))
insertRow.setIcon(
QtGui.QIcon(
utils.get_resource_filename(
'static/images/plus.png'
)
)
)
insertRow.setObjectName("insertRow")
insertRow.clicked.connect(functools.partial(self.insert_recipe_step, row))
# Create a grid layout to add all the widgets to
modifyRowWidgetLayout = QtWidgets.QHBoxLayout()
modifyRowWidgetLayout.setSpacing(0)
modifyRowWidgetLayout.setContentsMargins(0,0,0,0)
modifyRowWidgetLayout.addWidget(upArrow)
modifyRowWidgetLayout.addWidget(downArrow)
modifyRowWidgetLayout.addWidget(deleteRow)
modifyRowWidgetLayout.addWidget(insertRow)
# Assign Layout to a QWidget to add to a single column
modifyRowWidget = QtWidgets.QWidget()
modifyRowWidget.setObjectName("buttonTable")
modifyRowWidget.setLayout(modifyRowWidgetLayout)
# Add widgets
recipeStepsTable.setCellWidget(row, 0, sectionTempWidget)
recipeStepsTable.setCellWidget(row, 1, sectionFanSpeedWidget)
recipeStepsTable.setCellWidget(row, 2, sectionTimeWidget)
recipeStepsTable.setCellWidget(row, 3, modifyRowWidget)
def load_recipe_file(self, recipeFile):
"""Takes a file location and opens that file. It then loads the contents
which should be JSON and makes a python dictionary from the contents.
The python dictionary is created as self.recipe."""
# Load recipe file
recipeFileHandler = open(recipeFile)
self.recipe = json.load(recipeFileHandler)
self.recipe["file"] = recipeFile
recipeFileHandler.close()
def preload_recipe_information(self):
"""Loads information from self.recipe and prefills all the fields in the
form."""
self.recipeName.setText(self.recipe["roastName"])
self.recipeCreator.setText(self.recipe["creator"])
self.recipeRoastType.setText(self.recipe["roastDescription"]["roastType"])
self.beanRegion.setText(self.recipe["bean"]["region"])
self.beanCountry.setText(self.recipe["bean"]["country"])
self.beanLink.setText(self.recipe["bean"]["source"]["link"])
self.beanStore.setText(self.recipe["bean"]["source"]["reseller"])
self.recipeDescriptionBox.setText(self.recipe["roastDescription"]["description"])
self.preload_recipe_steps(self.recipeSteps)
def move_recipe_step_up(self, row):
"""This method will take a row and swap it the row above it."""
if row != 0:
steps = self.get_current_table_values()
newSteps = steps
# Swap the steps
newSteps[row], newSteps[row-1] = newSteps[row-1], newSteps[row]
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def move_recipe_step_down(self, row):
"""This method will take a row and swap it the row below it."""
if row != self.recipeSteps.rowCount()-1:
steps = self.get_current_table_values()
newSteps = steps
# Swap the steps
newSteps[row], newSteps[row+1] = newSteps[row+1], newSteps[row]
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def delete_recipe_step(self, row):
"""This method will take a row delete it."""
steps = self.get_current_table_values()
newSteps = steps
# Delete step
newSteps.pop(row)
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def insert_recipe_step(self, row):
"""Inserts a row below the specified row wit generic values."""
steps = self.get_current_table_values()
newSteps = steps
# insert step
newSteps.insert(row+1, {'fanSpeed': 5, 'targetTemp': 150, 'sectionTime': 0})
# Rebuild table with new steps
self.rebuild_recipe_steps_table(newSteps)
def get_current_table_values(self):
"""Used to read all the current table values from the recipeSteps table
and build a dictionary of all the values."""
recipeSteps = []
for row in range(0, self.recipeSteps.rowCount()):
currentRow = {}
currentRow["sectionTime"] = QtCore.QTime(0, 0, 0).secsTo(self.recipeSteps.cellWidget(row, 2).time())
currentRow["fanSpeed"] = int(self.recipeSteps.cellWidget(row, 1).currentText())
# Get Temperature or cooling
if self.recipeSteps.cellWidget(row, 0).currentText() == "Cooling":
currentRow["cooling"] = True
else:
currentRow["targetTemp"] = int(self.recipeSteps.cellWidget(row, 0).currentText())
recipeSteps.append(currentRow)
# Return copied rows
return recipeSteps
def rebuild_recipe_steps_table(self, newSteps):
"""Used to reload all the rows in the recipe steps table with new steps.
"""
# Alert user if they try to delete all the steps
if len(newSteps) < 1:
alert = QtWidgets.QMessageBox()
alert.setWindowTitle('openroast')
alert.setStyleSheet(self.style)
alert.setText("You must have atleast one step!")
alert.exec_()
else:
# Delete all the current rows
while self.recipeSteps.rowCount() > 0:
self.recipeSteps.removeRow(0)
# Add the new step sequence
self.load_recipe_steps(self.recipeSteps, newSteps)
def save_recipe(self):
"""Pulls in all of the information in the window and creates a new
recipe file with the specified contents."""
# Determine Recipe File Name
if "file" in self.recipe:
filePath = self.recipe["file"]
else:
filePath = os.path.expanduser('~/Documents/Openroast/Recipes/My Recipes/') + tools.format_filename(self.recipeName.text()) + ".json"
# TODO: Account for existing file with same name
# Create Dictionary with all the new recipe information
self.newRecipe = {}
self.newRecipe["roastName"] = self.recipeName.text()
self.newRecipe["steps"] = self.get_current_table_values()
self.newRecipe["roastDescription"] = {}
self.newRecipe["roastDescription"]["roastType"] = self.recipeRoastType.text()
self.newRecipe["roastDescription"]["description"] = self.recipeDescriptionBox.toPlainText()
self.newRecipe["creator"] = self.recipeCreator.text()
self.newRecipe["bean"] = {}
self.newRecipe["bean"]["region"] = self.beanRegion.text()
self.newRecipe["bean"]["country"] = self.beanCountry.text()
self.newRecipe["bean"]["source"] = {}
self.newRecipe["bean"]["source"]["reseller"] = self.beanStore.text()
self.newRecipe["bean"]["source"]["link"] = self.beanLink.text()
self.newRecipe["totalTime"] = 0
for step in self.newRecipe["steps"]:
self.newRecipe["totalTime"] += step["sectionTime"]
# Write the recipe to a file
jsonObject = json.dumps(self.newRecipe, indent=4)
# will need to create dir if it doesn't exist
# note that this should never happen because this folder is created
# at OpenroastApp.__init__() time.
if not os.path.exists(os.path.dirname(filePath)):
try:
os.makedirs(os.path.dirname(filePath))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filePath, 'w')
file.write(jsonObject)
file.close()
| Roastero/Openroast | openroast/views/recipeeditorwindow.py | Python | gpl-3.0 | 19,366 |
"""
input: a loaded image;
output: [[x,y],[width,height]] of the detected mouth area
"""
import cv
def findmouth(img):
# INITIALIZE: loading the classifiers
haarFace = cv.Load('haarcascade_frontalface_default.xml')
haarMouth = cv.Load('haarcascade_mouth.xml')
# running the classifiers
storage = cv.CreateMemStorage()
detectedFace = cv.HaarDetectObjects(img, haarFace, storage)
detectedMouth = cv.HaarDetectObjects(img, haarMouth, storage)
# FACE: find the largest detected face as detected face
maxFaceSize = 0
maxFace = 0
if detectedFace:
for face in detectedFace: # face: [0][0]: x; [0][1]: y; [0][2]: width; [0][3]: height
if face[0][3]* face[0][2] > maxFaceSize:
maxFaceSize = face[0][3]* face[0][2]
maxFace = face
if maxFace == 0: # did not detect face
return 2
def mouth_in_lower_face(mouth,face):
# if the mouth is in the lower 2/5 of the face
# and the lower edge of mouth is above that of the face
# and the horizontal center of the mouth is the center of the face
if (mouth[0][1] > face[0][1] + face[0][3] * 3 / float(5)
and mouth[0][1] + mouth[0][3] < face[0][1] + face[0][3]
and abs((mouth[0][0] + mouth[0][2] / float(2))
- (face[0][0] + face[0][2] / float(2))) < face[0][2] / float(10)):
return True
else:
return False
# FILTER MOUTH
filteredMouth = []
if detectedMouth:
for mouth in detectedMouth:
if mouth_in_lower_face(mouth,maxFace):
filteredMouth.append(mouth)
maxMouthSize = 0
for mouth in filteredMouth:
if mouth[0][3]* mouth[0][2] > maxMouthSize:
maxMouthSize = mouth[0][3]* mouth[0][2]
maxMouth = mouth
try:
return maxMouth
except UnboundLocalError:
return 2
| divija96/Emotion-Detection | code/mouthdetection.py | Python | gpl-3.0 | 1,760 |
import json
import requests
from transliterate import translit
_eng_chars = "~!@#$%^&qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"|ZXCVBNM<>?"
_rus_chars = "ё!\"№;%:?йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ,"
_trans_table = dict(zip(_eng_chars, _rus_chars))
def _fix_layout(s):
return "".join([_trans_table.get(c, c) for c in s])
def es_predict(es_url, keywords):
query = set(
keywords +
[_fix_layout(word) for word in keywords] +
[translit(word, "ru") for word in keywords]
)
post_data = json.dumps({
"size": 5,
"query": {
"simple_query_string": {
"query": "|".join(query),
"flags": "OR|PREFIX"
}
}
})
response = requests.post(es_url + "/_search", data=post_data).json()
if "hits" in response and "hits" in response["hits"]:
for it in response["hits"]["hits"]:
if "_source" in it and "query" in it["_source"]:
yield it["_source"]["query"]
| bashkirtsevich/autocode | text_preprocessing/es_predict.py | Python | gpl-3.0 | 1,121 |
import sys
def main():
print(1)
if __name__ == '__main__':
main() | abhishekshanbhag/emotion_based_spotify | spotifyAPITest/python/test.py | Python | gpl-3.0 | 75 |
#!/usr/bin/python
AGO_SCHEDULER_VERSION = '0.0.1'
############################################
"""
Basic class for device and device group schedule
"""
__author__ = "Joakim Lindbom"
__copyright__ = "Copyright 2017, Joakim Lindbom"
__date__ = "2017-01-27"
__credits__ = ["Joakim Lindbom", "The ago control team"]
__license__ = "GPL Public License Version 3"
__maintainer__ = "Joakim Lindbom"
__email__ = 'Joakim.Lindbom@gmail.com'
__status__ = "Experimental"
__version__ = AGO_SCHEDULER_VERSION
############################################
import time
from datetime import date, datetime
import sys
import json
all_days = {"mo", "tu", "we", "th", "fr", "sa", "su"}
class Scheduler:
def __init__(self, app):
self.rules = None
self.schedules = []
self.log = None
self.app = app
try:
self.log = app.log
except AttributeError:
#We seem to be in test mode, need a local logger
self.log = llog()
def parseJSON(self, filename):
with open(filename) as schedule_file:
schedule = json.load(schedule_file)
self.log.info("JSON file: {}".format(schedule))
if "rules" in schedule:
self.rules = Rules(schedule["rules"])
if "items" in schedule:
self.schedules = Schedules(schedule["items"], self.rules)
def new_day(self, weekday):
""" Load the schedules for the new day
E.g. called when it's 00:00
"""
self.schedules.weekday = weekday
class Schedules:
def __init__(self, jsonstr, rules):
self.schedules = []
self.activities = []
self.weekday = None
for element in jsonstr:
# self.log.trace(element)
item = Schedule(element, rules)
self.schedules.append(item)
# print item
def find(self, uuid):
rule = None
for r in self.rules:
if r.uuid == uuid:
rule = r
return rule
@property
def weekday(self):
"""Weekday property."""
print "getter of weekday called"
return self._weekday
@weekday.setter
def weekday(self, day):
print "setter of weekday called"
if day not in all_days:
raise ValueError
if self._weekday != day:
self.new_day(day)
self._weekday = day
def new_day(self, weekday):
self.activities = []
for s in self.schedules:
if weekday in s.days:
#found a day to include
self.activities.append(s)
print self.activities
print " "
class Schedule:
def __init__(self, jsonstr, rules=None):
self.device = None
self.scenario = None
self.group = None
if "device" in jsonstr:
self.device= jsonstr["device"]
if "scenario" in jsonstr:
self.scenario = jsonstr["scenario"]
if "group-uuid" in jsonstr:
self.group = jsonstr["group"]
self.enabled = jsonstr["enabled"]
self.schedules = {}
seq = 0
for a in jsonstr["actions"]:
seq += 1
x = {"action": a["action"], # On/Off/Run etc
"time": a["time"],
"enabled": a["enabled"]}
if "days" in a:
if a["days"] == "weekdays":
x["days"] = ["mo", "tu", "we", "th", "fr"]
elif a["days"] == "weekends":
x["days"] = ["sa", "su"]
elif a["days"] == "all":
x["days"] = ["mo", "tu", "we", "th", "fr", "sa", "su"]
else:
x["days"] = a["days"]
if "level" in a:
x["level"] = a["level"]
if "tolevel" in a:
x["tolevel"] = a["tolevel"]
if "endtime" in a:
x["endtime"] = a["endtime"]
if "seq" in a:
x["seq"] = a["seq"]
if "rule" in a:
x["rule-uuid"] = a["rule"]
x["rule"] = rules.find(a["rule"])
#print x["rule"]
self.schedules[seq] = x
#print (seq, self.schedules[seq])
def __str__(self):
s = "Schedule: "
if self.device is not None:
s += "Device {}".format(self.device)
if self.scenario is not None:
s += "Scenario {}".format(self.scenario)
if self.group is not None:
s += "Group {}".format(self.group)
s += "Enaled" if self.enabled else "Disabled"
s += "# schedules: {}".format(len(self.schedules))
return s
class Rules:
def __init__(self, jsonstr):
self.rules = []
for element in jsonstr:
# self.log.trace(element)
rule = Rule(element)
self.rules.append(rule)
#print rule
def find(self, uuid):
rule = None
for r in self.rules:
if r.uuid == uuid:
rule = r
return rule
class Rule:
def __init__(self, jsonstr):
self.name = jsonstr["name"]
self.uuid = jsonstr["uuid"]
self.rules = {}
#print self.name
seq = 0
for r in jsonstr["rules"]:
seq += 1
x = {"type": r["type"],
"variable": r["variable"],
"operator": r["operator"],
"value": r["value"]}
#print x
self.rules[seq] = x
#print (seq, self.rules[seq])
def __str__(self):
"""Return a string representing content f the Rule object"""
s = "name={}, uuid={}, type={}, # rules: {} ".format(self.name, self.uuid, self.type, len(self.rules))
return s
def execute(self):
results = []
for k, r in self.rules.iteritems():
if r["type"] == "variable check":
if r["variable"] == "HouseMode":
vv = "At home" # TODO: Get variable from inventory using r["variable"]
if r["variable"] == "test":
vv = "True"
if r["operator"] == 'eq':
if vv == r["value"]:
results.append(True)
else:
results.append(False)
return False
if r["operator"] == 'lt':
if vv < r["value"]:
results.append(True)
else:
results.append(False)
return False
return True
return True
def addlog(self, log):
self.log = log
class Days:
def __init__(self):
pass
class llog:
def __init__(self):
pass
def info(self, msg):
print ("INFO: %s" % msg)
def trace(self, msg):
print ("TRACE: %s" % msg)
def debug(self, msg):
print ("DEBUG: %s" % msg)
def error(self, msg):
print ("ERROR: %s" % msg) | JoakimLindbom/ago | scheduler/scheduler.py | Python | gpl-3.0 | 7,085 |
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution:
def depthSum(self, nestedList, d = 1):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
if not nestedList: return 0
ans = 0
for nested_integer in nestedList:
if nested_integer.isInteger():
ans += nested_integer.getInteger() * d
else:
ans += self.depthSum(nested_integer.getList(), d + 1)
return ans | YiqunPeng/Leetcode-pyq | solutions/339NestedListWeightSum.py | Python | gpl-3.0 | 1,788 |
#7/5/2014
import Image
from pytesser import *
import telnetlib
import base64
import StringIO
def main():
tn = telnetlib.Telnet("41.231.53.40",9090)
ret = tn.read_until("\n")
print ret
base64_str = ret.strip()
decode = base64.b64decode(base64_str)
buff = StringIO.StringIO()
buff.write(decode)
buff.seek(0)
img = Image.open(buff)
#construct new image
img = img.rotate(90)
img2 = img.rotate(180)
new_img = Image.blend(img,img2,0.5)
new_img = new_img.convert("RGBA")
pixels = new_img.load()
width, height = new_img.size
for x in range(width):
for y in range(height):
r, g, b, a = pixels[x, y]
#print r,g,b
if g==255 and b==127:
pixels[x,y] = (255,255,255,a)
new_img.save("newnew.png")
text = image_file_to_string("newnew.png")#read text from the image
#new_img.show()
text = text.split("\n")[1]
text = "".join(text.split())
#print text,
ret = tn.read_until("Answer:")
print ret,
print text
tn.write(text + "\n")
ret = tn.read_all()
print ret
if __name__ == "__main__":
main()
| SeMorgana/ctf | pwnium2014/rot300/rot300.py | Python | gpl-3.0 | 1,187 |
'''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--maxy',
help="Max mbps on y-axis..",
type=int,
default=1000,
action="store",
dest="maxy")
parser.add_argument('--miny',
help="Min mbps on y-axis..",
type=int,
default=0,
action="store",
dest="miny")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('-s', '--summarise',
help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.",
default=False,
dest="summarise",
action="store_true")
parser.add_argument('--cdf',
help="Plot CDF of queue timeseries (first 10 and last 10 values are ignored)",
default=False,
dest="cdf",
action="store_true")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
args = parser.parse_args()
if args.labels is None:
args.labels = args.files
if args.legend is None:
args.legend = args.files
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'red'}
else:
return {'color': 'black', 'ls': '-.'}
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
start_time = xaxis[0]
xaxis = map(lambda x: x - start_time, xaxis)
qlens = map(float, col(1, data))
if args.summarise or args.cdf:
to_plot.append(qlens[10:-10])
else:
plt.plot(xaxis, qlens, label=args.legend[i], lw=2, **get_style(i))
plt.title("Queue sizes")
plt.ylabel("Packets")
plt.grid(True)
#yaxis = range(0, 1101, 50)
#ylabels = map(lambda y: str(y) if y%100==0 else '', yaxis)
#plt.yticks(yaxis, ylabels)
#plt.ylim((0,1100))
plt.ylim((args.miny,args.maxy))
if args.summarise:
plt.xlabel("Link Rates")
plt.boxplot(to_plot)
xaxis = range(1, 1+len(args.files))
plt.xticks(xaxis, args.labels)
for x in xaxis:
y = pc99(to_plot[x-1])
print x, y
if x == 1:
s = '99pc: %d' % y
offset = (-20,20)
else:
s = str(y)
offset = (-10, 20)
plt.annotate(s, (x,y+1), xycoords='data',
xytext=offset, textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
elif args.cdf:
for i,data in enumerate(to_plot):
xs, ys = cdf(map(int, data))
plt.plot(xs, ys, label=args.legend[i], lw=2, **get_style(i))
plt.ylabel("Fraction")
plt.xlabel("Packets")
plt.ylim((0, 1.0))
plt.legend(args.legend, loc="upper left")
plt.title("")
else:
plt.xlabel("Seconds")
if args.legend:
plt.legend(args.legend, loc="upper left")
else:
plt.legend(args.files)
if args.out:
plt.savefig(args.out)
else:
plt.show()
| XianliangJ/collections | ShrewAttack/util/plot_queue.py | Python | gpl-3.0 | 3,877 |
#!/usr/bin/env python
# File created on 10 Mar 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Greg Caporaso","Morgan Langille"]
__license__ = "GPL"
__version__ = "1.1.0"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
#from qiime.util import make_option
#from qiime.util import parse_command_line_parameters
from cogent.util.option_parsing import parse_command_line_parameters, make_option
from subprocess import Popen
from os import makedirs, chmod, getenv, remove
from os.path import exists
from shutil import rmtree
from stat import S_IRWXU
from picrust.parallel import grouper
from math import ceil
#from qiime.util import load_qiime_config
#qiime_config = load_qiime_config()
script_info = {}
script_info['brief_description'] = "Starts multiple jobs in parallel on multicore or multiprocessor systems."
script_info['script_description'] = "This script is designed to start multiple jobs in parallel on systems with no queueing system, for example a multiple processor or multiple core laptop/desktop machine. This also serves as an example 'cluster_jobs' which users can use a template to define scripts to start parallel jobs in their environment."
script_info['script_usage'] = [\
("Example",\
"Start each command listed in test_jobs.txt in parallel. The run id for these jobs will be RUNID. ",\
"%prog -ms test_jobs.txt RUNID")]
script_info['output_description']= "No output is created."
script_info['required_options'] = []
script_info['optional_options'] = [\
make_option('-m','--make_jobs',action='store_true',\
help='make the job files [default: %default]'),\
make_option('-s','--submit_jobs',action='store_true',\
help='submit the job files [default: %default]'),\
make_option('-d','--delay',action='store',type='int',default=0,
help='Number of seconds to pause between launching each job [default: %default]'),
make_option('-n','--num_jobs',action='store',type='int',\
help='Number of jobs to group commands into. [default: %default]',\
default=4)\
]
script_info['version'] = __version__
script_info['disallow_positional_arguments'] = False
def write_job_files(output_dir,commands,run_id,num_jobs=4):
jobs_dir = '%s/jobs/' % output_dir
job_fps = []
if not exists(jobs_dir):
try:
makedirs(jobs_dir)
except OSError,e:
raise OSError, "Error creating jobs directory in working dir: %s" % output_dir +\
" (specified in qiime_config). Do you have write access?. "+\
"Original error message follows:\n%s" % str(e)
paths_to_remove = [jobs_dir]
else:
# point paths_to_remove at job_fps
paths_to_remove = job_fps
# This is messy right now as our clusters (bmf, bmf2) require us to
# start and exit a shell for some reason which we haven't figured out.
# Running these commands as parallel shell scripts gets screwed up by
# this. For the time-being, I'm stripping this out here. Once the new
# clusters are up, I'm going to move the wrapping of commands in
# bash/exit to the cluster_jobs script. At that point this function
# will be greatly simplified.
ignored_subcommands = {}.fromkeys(['/bin/bash','exit'])
#calculate the number of commands to put in each job
num_commands_per_job=int(ceil(len(commands)/float(num_jobs)))
for i,command_group in enumerate(grouper(commands,num_commands_per_job,'')):
job_fp = '%s/%s%d' % (jobs_dir, run_id, i)
f = open(job_fp,'w')
for command in command_group:
f.write('\n'.join([subcommand \
for subcommand in command.split(';')
if subcommand.strip() not in ignored_subcommands]))
f.close()
chmod(job_fp, S_IRWXU)
job_fps.append(job_fp)
return job_fps, paths_to_remove
def run_commands(output_dir,commands,run_id,submit_jobs,keep_temp,num_jobs=4):
"""
"""
# Popen is not a big fan of how we join commands with semi-colons,
# so each command is written to a shell script which is then called
# by Popen
job_fps, paths_to_remove = write_job_files(output_dir,commands,run_id,num_jobs=num_jobs)
# Call the jobs
if submit_jobs:
for job_fp in job_fps:
Popen(['/bin/sh', job_fp])
# clean up the shell scripts that were created
if not keep_temp:
for p in paths_to_remove:
try:
# p is file
remove(p)
except OSError:
# p is directory
rmtree(p)
return
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
if opts.submit_jobs and not opts.make_jobs:
option_parser.error('Must pass -m if passing -s. (Sorry about this, '+\
'it\'s for backwards-compatibility.)')
min_args = 2
if len(args) < min_args:
option_parser.error('Exactly two arguments are required.')
output_dir = './'
run_commands(output_dir,open(args[0]).readlines(),args[1],\
submit_jobs=opts.submit_jobs,\
keep_temp=True,num_jobs=opts.num_jobs)
if __name__ == "__main__":
main()
| zaneveld/picrust | scripts/start_parallel_jobs.py | Python | gpl-3.0 | 5,341 |
import numpy as np
from astropy.coordinates import Angle, Distance
from astropy import units as u
from .angles2Plane import gal_theta
def vdm_2001_dep_dist_kpc(rho, phi, glx_theta, glx_incl, D_0):
"""
Deprojected angular distance from vdM & Cioni (2001).
D is the distance associated to a point defined by its (ra, dec)
coordinates (included in the (rho, phi) values passed) assuming the point
is located directly on a plane, i.e.: z'=0 in Eq (7).
The plane itself is defined by its center coordinates (ra_0, dec_0),
included in the (rho, phi) values passed, the distance to those
coordinates (D_0), and the inclination (rotation) angles: glx_theta,
glx_incl.
d_kpc is the distance from point (ra, dec, D) to the center of said plane,
i.e.: (ra_0, dec_0, D_0).
"""
# Eq (8) from van der Marel & Cioni (2001).
s = np.sin(phi.radian - glx_theta.radian)
A = 0.5 * ((1 - s) * np.cos(glx_incl.radian - rho.radian) +
(1 + s) * np.cos(glx_incl.radian + rho.radian))
# This is really D/D_0, to simplify the d_kpc equation.
D = np.cos(glx_incl.radian) / A
# Apply the cosine law to obtain the deprojected distance in kpc.
d_kpc = D_0 * np.sqrt(1. + D**2 - 2 * D * np.cos(rho.radian))
# # The above is equivalent to obtaining the (x', y', z') coordinates of
# # the point on the inclined plane, and calculating its Euclidean
# # distance to the center of the plane.
# D = D * D_0 # Since D above is really D/D_0
# # Eqs (7) in vdM & Cioni (2001).
# x_p = D * np.sin(rho.radian) * np.cos(phi.radian - glx_theta.radian)
# y_p = D * (np.sin(rho.radian) * np.cos(glx_incl.radian) *
# np.sin(phi.radian - glx_theta.radian) + np.cos(rho.radian) *
# np.sin(glx_incl.radian)) - D_0 * np.sin(glx_incl.radian)
# # z_p = 0 since the source is located *on* the inclined disk.
# z_p = D * (np.sin(rho.radian) * np.sin(glx_incl.radian) *
# np.sin(phi.radian - glx_theta.radian) - np.cos(rho.radian) *
# np.cos(glx_incl.radian)) + D_0 * np.cos(glx_incl.radian)
# d_kpc2 = np.sqrt(x_p**2 + y_p**2 + z_p**2)
return d_kpc.value
def get_deproj_dist(glx_PA, glx_incl, glx_dist, rho, phi):
"""
Computes deprojected galactocentric distance between cluster and the
center of the MC in kpc.
Based on: https://gist.github.com/jonathansick/9399842
Parameters
----------
glx_PA : :class:`astropy.coordinates.Angle`
Position angle of the galaxy disk.
glx_incl : :class:`astropy.coordinates.Angle`
Inclination angle of the galaxy disk.
glx_dist : :class:`astropy.coordinates.Distance`
Distance to galaxy.
rho :
Projected angular distance from cluster to center of galaxy.
phi :
Position angle of the cluster (West to East)
Returns
-------
dist_kpc : class:`astropy.coordinates.Distance`
Galactocentric distance(s) for coordinate point(s).
"""
# Obtain 'theta' position angle for the galaxy.
theta = gal_theta(glx_PA)
# Distance to galaxy in kpc.
D_0 = Distance(glx_dist.kpc, unit=u.kpc)
# Deprojected distance in kpc.
dist_kpc = vdm_2001_dep_dist_kpc(rho, phi, theta, glx_incl, D_0)
return dist_kpc
def main(rho, phi, inc_lst, pa_lst, gal_dist):
"""
Calculate deprojected distances for all clusters in this galaxy,
for all inclination and position angles defined.
These values depend on the coordinates of the clusters (rho, phi), the
rotation angles that define each inclined plane (inc_lst, pa_lst), and the
distance (gal_dist) and center coordinates of the galaxy.
"""
# Create empty list with correct shape.
dep_dist_i_PA_vals = [[[] for _ in inc_lst] for _ in pa_lst]
for i, inc in enumerate(inc_lst):
for j, pa in enumerate(pa_lst):
# Assign 'degrees' units before passing.
inc, pa = Angle(inc, unit=u.degree), Angle(pa, unit=u.degree)
# Obtain deprojected distances for all the clusters, in kpc,
# using the values of inclination and position angles passed.
dep_dist_kpc = get_deproj_dist(pa, inc, gal_dist, rho, phi)
# Store deprojected distance values.
dep_dist_i_PA_vals[i][j] = dep_dist_kpc
return dep_dist_i_PA_vals
| Gabriel-p/mcs_rot_angles | modules/i_PA_DeprjDist.py | Python | gpl-3.0 | 4,382 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import io
import sys
import tempfile
import time
import os
import traceback
from pathlib import Path
import gitlab
import requests
from .functions import (setLocalPath, showCommitDialog, logInPavlovia,
noGitWarning)
from psychopy.localization import _translate
from psychopy.projects import pavlovia
from psychopy import logging
from psychopy.app.pavlovia_ui import sync, functions
import wx
from wx.lib import scrolledpanel as scrlpanel
from .. import utils
from ...projects.pavlovia import PavloviaProject
try:
import wx.lib.agw.hyperlink as wxhl # 4.0+
except ImportError:
import wx.lib.hyperlink as wxhl # <3.0.2
_starred = u"\u2605"
_unstarred = u"\u2606"
class ProjectEditor(wx.Dialog):
def __init__(self, parent=None, id=wx.ID_ANY, project=None, localRoot="",
*args, **kwargs):
wx.Dialog.__init__(self, parent, id,
*args, **kwargs)
panel = wx.Panel(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
# when a project is successfully created these will be populated
if hasattr(parent, 'filename'):
self.filename = parent.filename
else:
self.filename = None
self.project = project # type: pavlovia.PavloviaProject
self.projInfo = None
self.parent = parent
if project:
# edit existing project
self.isNew = False
if project.localRoot and not localRoot:
localRoot = project.localRoot
else:
self.isNew = True
# create the controls
nameLabel = wx.StaticText(panel, -1, _translate("Name:"))
self.nameBox = wx.TextCtrl(panel, -1, size=(400, -1))
# Path can contain only letters, digits, '_', '-' and '.'.
# Cannot start with '-', end in '.git' or end in '.atom']
pavSession = pavlovia.getCurrentSession()
try:
username = pavSession.user.username
except AttributeError as e:
raise pavlovia.NoUserError("{}: Tried to create project with no user logged in.".format(e))
gpChoices = [username]
gpChoices.extend(pavSession.listUserGroups())
groupLabel = wx.StaticText(panel, -1, _translate("Group/owner:"))
self.groupBox = wx.Choice(panel, -1, size=(400, -1),
choices=gpChoices)
descrLabel = wx.StaticText(panel, -1, _translate("Description:"))
self.descrBox = wx.TextCtrl(panel, -1, size=(400, 200),
style=wx.TE_MULTILINE | wx.SUNKEN_BORDER)
localLabel = wx.StaticText(panel, -1, _translate("Local folder:"))
self.localBox = wx.TextCtrl(panel, -1, size=(400, -1),
value=localRoot)
self.btnLocalBrowse = wx.Button(panel, wx.ID_ANY, _translate("Browse..."))
self.btnLocalBrowse.Bind(wx.EVT_BUTTON, self.onBrowseLocal)
localPathSizer = wx.BoxSizer(wx.HORIZONTAL)
localPathSizer.Add(self.localBox)
localPathSizer.Add(self.btnLocalBrowse)
tagsLabel = wx.StaticText(panel, -1,
_translate("Tags (comma separated):"))
self.tagsBox = wx.TextCtrl(panel, -1, size=(400, 100),
value="PsychoPy, Builder, Coder",
style=wx.TE_MULTILINE | wx.SUNKEN_BORDER)
publicLabel = wx.StaticText(panel, -1, _translate("Public:"))
self.publicBox = wx.CheckBox(panel, -1)
# buttons
if self.isNew:
buttonMsg = _translate("Create project on Pavlovia")
else:
buttonMsg = _translate("Submit changes to Pavlovia")
updateBtn = wx.Button(panel, -1, buttonMsg)
updateBtn.Bind(wx.EVT_BUTTON, self.submitChanges)
cancelBtn = wx.Button(panel, -1, _translate("Cancel"))
cancelBtn.Bind(wx.EVT_BUTTON, self.onCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
if sys.platform == "win32":
btns = [updateBtn, cancelBtn]
else:
btns = [cancelBtn, updateBtn]
btnSizer.AddMany(btns)
# do layout
fieldsSizer = wx.FlexGridSizer(cols=2, rows=6, vgap=5, hgap=5)
fieldsSizer.AddMany([(nameLabel, 0, wx.ALIGN_RIGHT), self.nameBox,
(groupLabel, 0, wx.ALIGN_RIGHT), self.groupBox,
(localLabel, 0, wx.ALIGN_RIGHT), localPathSizer,
(descrLabel, 0, wx.ALIGN_RIGHT), self.descrBox,
(tagsLabel, 0, wx.ALIGN_RIGHT), self.tagsBox,
(publicLabel, 0, wx.ALIGN_RIGHT), self.publicBox])
border = wx.BoxSizer(wx.VERTICAL)
border.Add(fieldsSizer, 0, wx.ALL, 5)
border.Add(btnSizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
panel.SetSizerAndFit(border)
self.Fit()
def onCancel(self, evt=None):
self.EndModal(wx.ID_CANCEL)
def submitChanges(self, evt=None):
session = pavlovia.getCurrentSession()
if not session.user:
user = logInPavlovia(parent=self.parent)
if not session.user:
return
# get current values
name = self.nameBox.GetValue()
namespace = self.groupBox.GetStringSelection()
descr = self.descrBox.GetValue()
visibility = self.publicBox.GetValue()
# tags need splitting and then
tagsList = self.tagsBox.GetValue().split(',')
tags = [thisTag.strip() for thisTag in tagsList]
localRoot = self.localBox.GetValue()
if not localRoot:
localRoot = setLocalPath(self.parent, project=None, path="")
# then create/update
if self.isNew:
project = session.createProject(name=name,
description=descr,
tags=tags,
visibility=visibility,
localRoot=localRoot,
namespace=namespace)
self.project = project
self.project._newRemote = True
else: # we're changing metadata of an existing project. Don't sync
self.project.pavlovia.name = name
self.project.pavlovia.description = descr
self.project.tags = tags
self.project.visibility = visibility
self.project.localRoot = localRoot
self.project.save() # pushes changed metadata to gitlab
self.project._newRemote = False
self.EndModal(wx.ID_OK)
pavlovia.knownProjects.save()
self.project.getRepo(forceRefresh=True)
self.parent.project = self.project
def onBrowseLocal(self, evt=None):
newPath = setLocalPath(self, path=self.filename)
if newPath:
self.localBox.SetLabel(newPath)
self.Layout()
if self.project:
self.project.localRoot = newPath
self.Raise()
class DetailsPanel(wx.Panel):
class StarBtn(wx.Button):
def __init__(self, parent, iconCache, value=False):
wx.Button.__init__(self, parent, label=_translate("Star"))
# Setup icons
self.icons = {
True: iconCache.getBitmap(name="starred", size=16),
False: iconCache.getBitmap(name="unstarred", size=16),
}
self.SetBitmapDisabled(self.icons[False]) # Always appear empty when disabled
# Set start value
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# Store value
self._value = bool(value)
# Change icon
self.SetBitmap(self.icons[self._value])
self.SetBitmapCurrent(self.icons[self._value])
self.SetBitmapFocus(self.icons[self._value])
def toggle(self):
self.value = (not self.value)
def __init__(self, parent, project=None,
size=(650, 550),
style=wx.NO_BORDER):
wx.Panel.__init__(self, parent, -1,
size=size,
style=style)
self.SetBackgroundColour("white")
iconCache = parent.app.iconCache
# Setup sizer
self.contentBox = wx.BoxSizer()
self.SetSizer(self.contentBox)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.contentBox.Add(self.sizer, proportion=1, border=12, flag=wx.ALL | wx.EXPAND)
# Head sizer
self.headSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.headSizer, border=0, flag=wx.EXPAND)
# Icon
self.icon = utils.ImageCtrl(self, bitmap=wx.Bitmap(), size=(128, 128))
self.icon.SetBackgroundColour("#f2f2f2")
self.icon.Bind(wx.EVT_FILEPICKER_CHANGED, self.updateProject)
self.headSizer.Add(self.icon, border=6, flag=wx.ALL)
self.icon.SetToolTip(_translate(
"An image to represent this project, this helps it stand out when browsing on Pavlovia."
))
# Title sizer
self.titleSizer = wx.BoxSizer(wx.VERTICAL)
self.headSizer.Add(self.titleSizer, proportion=1, flag=wx.EXPAND)
# Title
self.title = wx.TextCtrl(self,
size=(-1, 30 if sys.platform == 'darwin' else -1),
value="")
self.title.Bind(wx.EVT_KILL_FOCUS, self.updateProject)
self.title.SetFont(
wx.Font(24, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
)
self.titleSizer.Add(self.title, border=6, flag=wx.ALL | wx.EXPAND)
self.title.SetToolTip(_translate(
"Title of the project. Unlike the project name, this isn't used as a filename anywhere; so you can "
"add spaces, apostrophes and emojis to your heart's content! 🦕✨"
))
# Author
self.author = wx.StaticText(self, size=(-1, -1), label="by ---")
self.titleSizer.Add(self.author, border=6, flag=wx.LEFT | wx.RIGHT)
# Pavlovia link
self.link = wxhl.HyperLinkCtrl(self, -1,
label="https://pavlovia.org/",
URL="https://pavlovia.org/",
)
self.link.SetBackgroundColour("white")
self.titleSizer.Add(self.link, border=6, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM)
self.link.SetToolTip(_translate(
"Click to view the project in Pavlovia."
))
# Button sizer
self.btnSizer = wx.BoxSizer(wx.HORIZONTAL)
self.titleSizer.Add(self.btnSizer, flag=wx.EXPAND)
# Star button
self.starLbl = wx.StaticText(self, label="-")
self.btnSizer.Add(self.starLbl, border=6, flag=wx.LEFT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
self.starBtn = self.StarBtn(self, iconCache=iconCache)
self.starBtn.Bind(wx.EVT_BUTTON, self.star)
self.btnSizer.Add(self.starBtn, border=6, flag=wx.ALL | wx.EXPAND)
self.starBtn.SetToolTip(_translate(
"'Star' this project to get back to it easily. Projects you've starred will appear first in your searches "
"and projects with more stars in total will appear higher in everyone's searches."
))
# Fork button
self.forkLbl = wx.StaticText(self, label="-")
self.btnSizer.Add(self.forkLbl, border=6, flag=wx.LEFT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
self.forkBtn = wx.Button(self, label=_translate("Fork"))
self.forkBtn.SetBitmap(iconCache.getBitmap(name="fork", size=16))
self.forkBtn.Bind(wx.EVT_BUTTON, self.fork)
self.btnSizer.Add(self.forkBtn, border=6, flag=wx.ALL | wx.EXPAND)
self.forkBtn.SetToolTip(_translate(
"Create a copy of this project on your own Pavlovia account so that you can make changes without affecting "
"the original project."
))
# Create button
self.createBtn = wx.Button(self, label=_translate("Create"))
self.createBtn.SetBitmap(iconCache.getBitmap(name="plus", size=16))
self.createBtn.Bind(wx.EVT_BUTTON, self.create)
self.btnSizer.Add(self.createBtn, border=6, flag=wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
self.createBtn.SetToolTip(_translate(
"Create a Pavlovia project for the current experiment."
))
# Sync button
self.syncBtn = wx.Button(self, label=_translate("Sync"))
self.syncBtn.SetBitmap(iconCache.getBitmap(name="view-refresh", size=16))
self.syncBtn.Bind(wx.EVT_BUTTON, self.sync)
self.btnSizer.Add(self.syncBtn, border=6, flag=wx.ALL | wx.EXPAND)
self.syncBtn.SetToolTip(_translate(
"Synchronise this project's local files with their online counterparts. This will 'pull' changes from "
"Pavlovia and 'push' changes from your local files."
))
# Get button
self.downloadBtn = wx.Button(self, label=_translate("Download"))
self.downloadBtn.SetBitmap(iconCache.getBitmap(name="download", size=16))
self.downloadBtn.Bind(wx.EVT_BUTTON, self.sync)
self.btnSizer.Add(self.downloadBtn, border=6, flag=wx.ALL | wx.EXPAND)
self.downloadBtn.SetToolTip(_translate(
"'Clone' this project, creating local copies of all its files and tracking any changes you make so that "
"they can be applied when you next 'sync' the project."
))
# Sync label
self.syncLbl = wx.StaticText(self, size=(-1, -1), label="---")
self.btnSizer.Add(self.syncLbl, border=6, flag=wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL)
self.syncLbl.SetToolTip(_translate(
"Last synced at..."
))
self.btnSizer.AddStretchSpacer(1)
# Sep
self.sizer.Add(wx.StaticLine(self, -1), border=6, flag=wx.EXPAND | wx.ALL)
# Local root
self.rootSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.rootSizer, flag=wx.EXPAND)
self.localRootLabel = wx.StaticText(self, label="Local root:")
self.rootSizer.Add(self.localRootLabel, border=6, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL)
self.localRoot = utils.FileCtrl(self, dlgtype="dir")
self.localRoot.Bind(wx.EVT_FILEPICKER_CHANGED, self.updateProject)
self.rootSizer.Add(self.localRoot, proportion=1, border=6, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM)
self.localRoot.SetToolTip(_translate(
"Folder in which local files are stored for this project. Changes to files in this folder will be tracked "
"and applied to the project when you 'sync', so make sure the only files in this folder are relevant!"
))
# Sep
self.sizer.Add(wx.StaticLine(self, -1), border=6, flag=wx.EXPAND | wx.ALL)
# Description
self.description = wx.TextCtrl(self, size=(-1, -1), value="", style=wx.TE_MULTILINE)
self.description.Bind(wx.EVT_KILL_FOCUS, self.updateProject)
self.sizer.Add(self.description, proportion=1, border=6, flag=wx.ALL | wx.EXPAND)
self.description.SetToolTip(_translate(
"Description of the project to be shown on Pavlovia. Note: This is different than a README file!"
))
# Sep
self.sizer.Add(wx.StaticLine(self, -1), border=6, flag=wx.EXPAND | wx.ALL)
# Visibility
self.visSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.visSizer, flag=wx.EXPAND)
self.visLbl = wx.StaticText(self, label=_translate("Visibility:"))
self.visSizer.Add(self.visLbl, border=6, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL)
self.visibility = wx.Choice(self, choices=["Private", "Public"])
self.visibility.Bind(wx.EVT_CHOICE, self.updateProject)
self.visSizer.Add(self.visibility, proportion=1, border=6, flag=wx.EXPAND | wx.ALL)
self.visibility.SetToolTip(_translate(
"Visibility of the current project; whether its visible only to its creator (Private) or to any user "
"(Public)."
))
# Status
self.statusSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.statusSizer, flag=wx.EXPAND)
self.statusLbl = wx.StaticText(self, label=_translate("Status:"))
self.statusSizer.Add(self.statusLbl, border=6, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL)
self.status = wx.Choice(self, choices=["Running", "Piloting", "Inactive"])
self.status.Bind(wx.EVT_CHOICE, self.updateProject)
self.statusSizer.Add(self.status, proportion=1, border=6, flag=wx.EXPAND | wx.ALL)
self.status.SetToolTip(_translate(
"Project status; whether it can be run to collect data (Running), run by its creator without saving "
"data (Piloting) or cannot be run (Inactive)."
))
# Tags
self.tagSizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.tagSizer, flag=wx.EXPAND)
self.tagLbl = wx.StaticText(self, label=_translate("Keywords:"))
self.tagSizer.Add(self.tagLbl, border=6, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL)
self.tags = utils.ButtonArray(self, orient=wx.HORIZONTAL, items=[], itemAlias=_translate("tag"))
self.tags.Bind(wx.EVT_LIST_INSERT_ITEM, self.updateProject)
self.tags.Bind(wx.EVT_LIST_DELETE_ITEM, self.updateProject)
self.tagSizer.Add(self.tags, proportion=1, border=6, flag=wx.EXPAND | wx.ALL)
self.tags.SetToolTip(_translate(
"Keywords associated with this project, helping others to find it. For example, if your experiment is "
"useful to psychophysicists, you may want to add the keyword 'psychophysics'."
))
# Populate
if project is not None:
project.refresh()
self.project = project
@property
def project(self):
return self._project
@project.setter
def project(self, project):
self._project = project
# Populate fields
if project is None:
# Icon
self.icon.SetBitmap(wx.Bitmap())
self.icon.SetBackgroundColour("#f2f2f2")
self.icon.Disable()
# Title
self.title.SetValue("")
self.title.Disable()
# Author
self.author.SetLabel("by --- on ---")
self.author.Disable()
# Link
self.link.SetLabel("---/---")
self.link.SetURL("https://pavlovia.org/")
self.link.Disable()
# Star button
self.starBtn.Disable()
self.starBtn.value = False
# Star label
self.starLbl.SetLabel("-")
self.starLbl.Disable()
# Fork button
self.forkBtn.Disable()
# Fork label
self.forkLbl.SetLabel("-")
self.forkLbl.Disable()
# Create button
self.createBtn.Show()
self.createBtn.Enable(bool(self.session.user))
# Sync button
self.syncBtn.Hide()
# Get button
self.downloadBtn.Hide()
# Sync label
self.syncLbl.SetLabel("---")
self.syncLbl.Disable()
# Local root
self.localRootLabel.Disable()
wx.TextCtrl.SetValue(self.localRoot, "") # use base method to avoid callback
self.localRoot.Disable()
# Description
self.description.SetValue("")
self.description.Disable()
# Visibility
self.visibility.SetSelection(wx.NOT_FOUND)
self.visibility.Disable()
# Status
self.status.SetSelection(wx.NOT_FOUND)
self.status.Disable()
# Tags
self.tags.clear()
self.tags.Disable()
else:
# Refresh project to make sure it has info
if not hasattr(project, "_info"):
project.refresh()
# Icon
if 'avatarUrl' in project.info:
try:
content = requests.get(project['avatar_url']).content
icon = wx.Bitmap(wx.Image(io.BytesIO(content)))
except requests.exceptions.MissingSchema:
icon = wx.Bitmap()
else:
icon = wx.Bitmap()
self.icon.SetBitmap(icon)
self.icon.SetBackgroundColour("#f2f2f2")
self.icon.Enable(project.editable)
# Title
self.title.SetValue(project['name'])
self.title.Enable(project.editable)
# Author
self.author.SetLabel(f"by {project['path_with_namespace'].split('/')[0]} on {project['created_at']:%d %B %Y}")
self.author.Enable()
# Link
self.link.SetLabel(project['path_with_namespace'])
self.link.SetURL("https://pavlovia.org/" + project['path_with_namespace'])
self.link.Enable()
# Star button
self.starBtn.value = project.starred
self.starBtn.Enable(bool(project.session.user))
# Star label
self.starLbl.SetLabel(str(project['star_count']))
self.starLbl.Enable()
# Fork button
self.forkBtn.Enable(bool(project.session.user) and not project.owned)
# Fork label
self.forkLbl.SetLabel(str(project['forks_count']))
self.forkLbl.Enable()
# Create button
self.createBtn.Hide()
# Sync button
self.syncBtn.Show(bool(project.localRoot) or (not project.editable))
self.syncBtn.Enable(project.editable)
# Get button
self.downloadBtn.Show(not bool(project.localRoot) and project.editable)
self.downloadBtn.Enable(project.editable)
# Sync label
self.syncLbl.SetLabel(f"{project['last_activity_at']:%d %B %Y, %I:%M%p}")
self.syncLbl.Show(bool(project.localRoot) or (not project.editable))
self.syncLbl.Enable(project.editable)
# Local root
wx.TextCtrl.SetValue(self.localRoot, project.localRoot or "") # use base method to avoid callback
self.localRootLabel.Enable(project.editable)
self.localRoot.Enable(project.editable)
# Description
self.description.SetValue(project['description'])
self.description.Enable(project.editable)
# Visibility
self.visibility.SetStringSelection(project['visibility'])
self.visibility.Enable(project.editable)
# Status
self.status.SetStringSelection(str(project['status2']).title())
self.status.Enable(project.editable)
# Tags
self.tags.items = project['keywords']
self.tags.Enable(project.editable)
# Layout
self.Layout()
@property
def session(self):
# Cache session if not cached
if not hasattr(self, "_session"):
self._session = pavlovia.getCurrentSession()
# Return cached session
return self._session
def create(self, evt=None):
"""
Create a new project
"""
dlg = sync.CreateDlg(self, user=self.session.user)
dlg.ShowModal()
self.project = dlg.project
def sync(self, evt=None):
# If not synced locally, choose a folder
if not self.localRoot.GetValue():
self.localRoot.browse()
# If cancelled, return
if not self.localRoot.GetValue():
return
self.project.localRoot = self.localRoot.GetValue()
# Enable ctrl now that there is a local root
self.localRoot.Enable()
self.localRootLabel.Enable()
# Show sync dlg (does sync)
dlg = sync.SyncDialog(self, self.project)
dlg.sync()
functions.showCommitDialog(self, self.project, initMsg="", infoStream=dlg.status)
# Update project
self.project.refresh()
# Update last sync date & show
self.syncLbl.SetLabel(f"{self.project['last_activity_at']:%d %B %Y, %I:%M%p}")
self.syncLbl.Show()
self.syncLbl.Enable()
# Switch buttons to show Sync rather than Download/Create
self.createBtn.Hide()
self.downloadBtn.Hide()
self.syncBtn.Show()
self.syncBtn.Enable()
def fork(self, evt=None):
# Do fork
try:
proj = self.project.fork()
except gitlab.GitlabCreateError as e:
# If project already exists, ask user if they want to view it rather than create again
dlg = wx.MessageDialog(self, f"{e.error_message}\n\nOpen forked project?", style=wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
# If yes, show forked project
projData = requests.get(
f"https://pavlovia.org/api/v2/experiments/{self.project.session.user['username']}/{self.project.info['pathWithNamespace'].split('/')[1]}"
).json()
self.project = PavloviaProject(projData['experiment']['gitlabId'])
return
else:
# If no, return
return
# Switch to new project
self.project = proj
# Sync
dlg = wx.MessageDialog(self, "Fork created! Sync it to a local folder?", style=wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
self.sync()
def star(self, evt=None):
# Toggle button
self.starBtn.toggle()
# Star/unstar project
self.updateProject(evt)
# todo: Refresh stars count
def updateProject(self, evt=None):
# Skip if no project
if self.project is None or evt is None:
return
# Get object
obj = evt.GetEventObject()
# Update project attribute according to supplying object
if obj == self.title and self.project.editable:
self.project['name'] = self.title.Value
self.project.save()
if obj == self.icon:
# Create temporary image file
_, temp = tempfile.mkstemp(suffix=".png")
self.icon.BitmapFull.SaveFile(temp, wx.BITMAP_TYPE_PNG)
# Load and upload from temp file
self.project['avatar'] = open(temp, "rb")
self.project.save()
# Delete temp file
#os.remove(temp)
if obj == self.starBtn:
self.project.starred = self.starBtn.value
self.starLbl.SetLabel(str(self.project.info['nbStars']))
if obj == self.localRoot:
if Path(self.localRoot.Value).is_dir():
self.project.localRoot = self.localRoot.Value
else:
dlg = wx.MessageDialog(self,
message=_translate(
"Could not find folder {directory}, please select a different "
"local root.".format(directory=self.localRoot.Value)
),
caption="Directory not found",
style=wx.ICON_ERROR)
self.localRoot.SetValue("")
self.project.localRoot = ""
dlg.ShowModal()
# Set project again to trigger a refresh
self.project = self.project
if obj == self.description and self.project.editable:
self.project['description'] = self.description.Value
self.project.save()
if obj == self.visibility and self.project.editable:
self.project['visibility'] = self.visibility.GetStringSelection().lower()
self.project.save()
if obj == self.status and self.project.editable:
requests.put(f"https://pavlovia.org/api/v2/experiments/{self.project.id}",
data={"status2": self.status.GetStringSelection()},
headers={'OauthToken': self.session.getToken()})
if obj == self.tags and self.project.editable:
requests.put(f"https://pavlovia.org/api/v2/experiments/{self.project.id}",
data={"keywords": self.tags.GetValue()},
headers={'OauthToken': self.session.getToken()})
class ProjectFrame(wx.Dialog):
def __init__(self, app, parent=None, style=None,
pos=wx.DefaultPosition, project=None):
if style is None:
style = (wx.DEFAULT_DIALOG_STYLE | wx.CENTER |
wx.TAB_TRAVERSAL | wx.RESIZE_BORDER)
if project:
title = project['name']
else:
title = _translate("Project info")
self.frameType = 'ProjectInfo'
wx.Dialog.__init__(self, parent, -1, title=title, style=style,
size=(700, 500), pos=pos)
self.app = app
self.project = project
self.parent = parent
self.detailsPanel = DetailsPanel(parent=self, project=self.project)
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.mainSizer.Add(self.detailsPanel, proportion=1, border=12, flag=wx.EXPAND | wx.ALL)
self.SetSizerAndFit(self.mainSizer)
if self.parent:
self.CenterOnParent()
self.Layout()
def syncProject(parent, project, file="", closeFrameWhenDone=False):
"""A function to sync the current project (if there is one)
Returns
-----------
1 for success
0 for fail
-1 for cancel at some point in the process
"""
# If not in a project, make one
if project is None:
msgDlg = wx.MessageDialog(parent,
message=_translate("This file doesn't belong to any existing project."),
style=wx.OK | wx.CANCEL | wx.CENTER)
msgDlg.SetOKLabel(_translate("Create a project"))
if msgDlg.ShowModal() == wx.ID_OK:
# Get start path and name from builder/coder if possible
if file:
file = Path(file)
name = file.stem
path = file.parent
else:
name = path = ""
# Open dlg to create new project
createDlg = sync.CreateDlg(parent,
user=pavlovia.getCurrentSession().user,
name=name,
path=path)
if createDlg.ShowModal() == wx.ID_OK and createDlg.project is not None:
project = createDlg.project
else:
return
else:
return
# If no local root, prompt to make one
if not project.localRoot:
defaultRoot = Path(file).parent
# Ask user if they want to
dlg = wx.MessageDialog(parent, message=_translate("Project root folder is not yet specified, specify one now?"), style=wx.YES_NO)
# Open folder picker
if dlg.ShowModal() == wx.ID_YES:
dlg = wx.DirDialog(parent, message=_translate("Specify folder..."), defaultPath=str(defaultRoot))
if dlg.ShowModal() == wx.ID_OK:
localRoot = Path(dlg.GetPath())
project.localRoot = str(localRoot)
else:
# If cancelled, cancel sync
return
else:
# If they don't want to specify, cancel sync
return
# Assign project to parent frame
parent.project = project
# If there is (now) a project, do sync
if project is not None:
dlg = sync.SyncDialog(parent, project)
functions.showCommitDialog(parent, project, initMsg="", infoStream=dlg.status)
dlg.sync()
class ForkDlg(wx.Dialog):
"""Simple dialog to help choose the location/name of a forked project"""
# this dialog is working fine, but the API call to fork to a specific
# namespace doesn't appear to work
def __init__(self, project, *args, **kwargs):
wx.Dialog.__init__(self, *args, **kwargs)
existingName = project.name
session = pavlovia.getCurrentSession()
groups = [session.user['username']]
groups.extend(session.listUserGroups())
msg = wx.StaticText(self, label="Where shall we fork to?")
groupLbl = wx.StaticText(self, label="Group:")
self.groupField = wx.Choice(self, choices=groups)
nameLbl = wx.StaticText(self, label="Project name:")
self.nameField = wx.TextCtrl(self, value=project.name)
fieldsSizer = wx.FlexGridSizer(cols=2, rows=2, vgap=5, hgap=5)
fieldsSizer.AddMany([groupLbl, self.groupField,
nameLbl, self.nameField])
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.Add(wx.Button(self, id=wx.ID_OK, label="OK"))
buttonSizer.Add(wx.Button(self, id=wx.ID_CANCEL, label="Cancel"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(msg, 1, wx.ALL, 5)
mainSizer.Add(fieldsSizer, 1, wx.ALL, 5)
mainSizer.Add(buttonSizer, 1, wx.ALL | wx.ALIGN_RIGHT, 5)
self.SetSizerAndFit(mainSizer)
self.Layout()
class ProjectRecreator(wx.Dialog):
"""Use this Dlg to handle the case of a missing (deleted?) remote project
"""
def __init__(self, project, parent, *args, **kwargs):
wx.Dialog.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.project = project
existingName = project.name
msgText = _translate("points to a remote that doesn't exist (deleted?).")
msgText += (" "+_translate("What shall we do?"))
msg = wx.StaticText(self, label="{} {}".format(existingName, msgText))
choices = [_translate("(Re)create a project"),
"{} ({})".format(_translate("Point to an different location"),
_translate("not yet supported")),
_translate("Forget the local git repository (deletes history keeps files)")]
self.radioCtrl = wx.RadioBox(self, label='RadioBox', choices=choices,
majorDimension=1)
self.radioCtrl.EnableItem(1, False)
self.radioCtrl.EnableItem(2, False)
mainSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.Add(wx.Button(self, id=wx.ID_OK, label=_translate("OK")),
1, wx.ALL, 5)
buttonSizer.Add(wx.Button(self, id=wx.ID_CANCEL, label=_translate("Cancel")),
1, wx.ALL, 5)
mainSizer.Add(msg, 1, wx.ALL, 5)
mainSizer.Add(self.radioCtrl, 1, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
mainSizer.Add(buttonSizer, 1, wx.ALL | wx.ALIGN_RIGHT, 1)
self.SetSizer(mainSizer)
self.Layout()
def ShowModal(self):
if wx.Dialog.ShowModal(self) == wx.ID_OK:
choice = self.radioCtrl.GetSelection()
if choice == 0:
editor = ProjectEditor(parent=self.parent,
localRoot=self.project.localRoot)
if editor.ShowModal() == wx.ID_OK:
self.project = editor.project
return 1 # success!
else:
return -1 # user cancelled
elif choice == 1:
raise NotImplementedError("We don't yet support redirecting "
"your project to a new location.")
elif choice == 2:
raise NotImplementedError("Deleting the local git repo is not "
"yet implemented")
else:
return -1
| psychopy/psychopy | psychopy/app/pavlovia_ui/project.py | Python | gpl-3.0 | 36,285 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Calendar-Indicator
#
# Copyright (C) 2011-2019 Lorenzo Carbonell Cerezo
# lorenzo.carbonell.cerezo@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
try:
gi.require_version('Gtk', '3.0')
gi.require_version('Handy', '0.0')
except Exception as e:
print(e)
exit(1)
from gi.repository import Gtk
from gi.repository import Handy
import os
import urllib
import comun
from comun import _
from sidewidget import SideWidget
from settingsrow import SettingRow
from logindialog import LoginDialog
from googlecalendarapi import GoogleCalendar
class LoginOption(Gtk.Overlay):
def __init__(self):
Gtk.Overlay.__init__(self)
self.__set_ui()
def __set_ui(self):
handycolumn = Handy.Column()
handycolumn.set_maximum_width(700)
handycolumn.set_margin_top(24)
self.add(handycolumn)
box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 5)
handycolumn.add(box)
label = Gtk.Label(_('Google calendar permissions'))
label.set_name('special')
label.set_alignment(0, 0.5)
box.add(label)
listbox0 = Gtk.ListBox()
box.add(listbox0)
self.switch1 = Gtk.Switch()
self.switch1.connect('button-press-event',self.on_switch1_changed)
self.switch1.connect('activate',self.on_switch1_changed)
self.switch1.set_valign(Gtk.Align.CENTER)
listbox0.add(SettingRow(_('Permissions for Google Calendar'),
_('Enable read and write permissions for Google Calendar.'),
self.switch1))
self.switch1.set_active(os.path.exists(comun.TOKEN_FILE))
def on_switch1_changed(self,widget,data):
if self.switch1.get_active():
if os.path.exists(comun.TOKEN_FILE):
os.remove(comun.TOKEN_FILE)
else:
googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
if googlecalendar.do_refresh_authorization() is None:
authorize_url = googlecalendar.get_authorize_url()
ld = LoginDialog(authorize_url)
ld.run()
googlecalendar.get_authorization(ld.code)
ld.destroy()
if googlecalendar.do_refresh_authorization() is None:
md = Gtk.MessageDialog( parent = self,
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = _('You have to authorize Calendar-Indicator to use it, do you want to authorize?'))
if md.run() == Gtk.ResponseType.CANCEL:
exit(3)
else:
if googlecalendar.do_refresh_authorization() is None:
exit(3)
self.switch1.set_active(True)
| atareao/calendar-indicator | src/loginoption.py | Python | gpl-3.0 | 3,654 |
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pymysql
import dbInfo
import optparse
import smtplib
from email.message import EmailMessage
from smtplib import SMTPRecipientsRefused
import time
from datetime import timedelta, datetime
import mailInfo
emailIDs = ['spawns', 'activity']
def ghConn():
conn = pymysql.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
conn.autocommit(True)
return conn
def sendAlertMail(conn, userID, msgText, link, alertID, alertTitle, emailIndex):
# Don't try to send mail if we exceeded quota within last hour
lastFailureTime = datetime(2000, 1, 1, 12)
currentTime = datetime.fromtimestamp(time.time())
timeSinceFailure = currentTime - lastFailureTime
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt")
lastFailureTime = datetime.strptime(f.read().strip(), "%Y-%m-%d %H:%M:%S")
f.close()
timeSinceFailure = currentTime - lastFailureTime
except IOError as e:
sys.stdout.write("No last failure time\n")
if timeSinceFailure.days < 1 and timeSinceFailure.seconds < 3660:
sys.stderr.write(str(timeSinceFailure.seconds) + " less than 3660 no mail.\n")
return 1
# look up the user email
cursor = conn.cursor()
cursor.execute("SELECT emailAddress FROM tUsers WHERE userID='" + userID + "';")
row = cursor.fetchone()
if row == None:
result = "bad username"
else:
email = row[0]
if (email.find("@") > -1 and email.find(".") > -1):
# send message
message = EmailMessage()
message['From'] = "\"Galaxy Harvester Alerts\" <" + emailIDs[emailIndex] + "@galaxyharvester.net>"
message['To'] = email
message['Subject'] = "".join(("Galaxy Harvester ", alertTitle))
message.set_content("".join(("Hello ", userID, ",\n\n", msgText, "\n\n", link, "\n\n You can manage your alerts at http://galaxyharvester.net/myAlerts.py\n")))
message.add_alternative("".join(("<div><img src='http://galaxyharvester.net/images/ghLogoLarge.png'/></div><p>Hello ", userID, ",</p><br/><p>", msgText.replace("\n", "<br/>"), "</p><p><a style='text-decoration:none;' href='", link, "'><div style='width:170px;font-size:18px;font-weight:600;color:#feffa1;background-color:#003344;padding:8px;margin:4px;border:1px solid black;'>View in Galaxy Harvester</div></a><br/>or copy and paste link: ", link, "</p><br/><p>You can manage your alerts at <a href='http://galaxyharvester.net/myAlerts.py'>http://galaxyharvester.net/myAlerts.py</a></p><p>-Galaxy Harvester Administrator</p>")), subtype='html')
mailer = smtplib.SMTP(mailInfo.MAIL_HOST)
mailer.login(emailIDs[emailIndex] + "@galaxyharvester.net", mailInfo.MAIL_PASS)
try:
mailer.send_message(message)
result = 'email sent'
except SMTPRecipientsRefused as e:
result = 'email failed'
sys.stderr.write('Email failed - ' + str(e))
trackEmailFailure(datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S"), emailIndex)
mailer.quit()
# update alert status
if ( result == 'email sent' ):
cursor.execute('UPDATE tAlerts SET alertStatus=1, statusChanged=NOW() WHERE alertID=' + str(alertID) + ';')
else:
result = 'Invalid email.'
cursor.close()
def main():
emailIndex = 0
# check for command line argument for email to use
if len(sys.argv) > 1:
emailIndex = int(sys.argv[1])
conn = ghConn()
# try sending any backed up alert mails
retryPendingMail(conn, emailIndex)
def trackEmailFailure(failureTime, emailIndex):
# Update tracking file
try:
f = open("last_notification_failure_" + emailIDs[emailIndex] + ".txt", "w")
f.write(failureTime)
f.close()
except IOError as e:
sys.stderr.write("Could not write email failure tracking file")
def retryPendingMail(conn, emailIndex):
# open email alerts that have not been sucessfully sent less than 48 hours old
minTime = datetime.fromtimestamp(time.time()) - timedelta(days=4)
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTime, alertMessage, alertLink, alertID FROM tAlerts WHERE alertType=2 AND alertStatus=0 and alertTime > '" + minTime.strftime("%Y-%m-%d %H:%M:%S") + "' and alertMessage LIKE '% - %';")
row = cursor.fetchone()
# try to send as long as not exceeding quota
while row != None:
fullText = row[2]
splitPos = fullText.find(" - ")
alertTitle = fullText[:splitPos]
alertBody = fullText[splitPos+3:]
result = sendAlertMail(conn, row[0], alertBody, row[3], row[4], alertTitle, emailIndex)
if result == 1:
sys.stderr.write("Delayed retrying rest of mail since quota reached.\n")
break
row = cursor.fetchone()
cursor.close()
if __name__ == "__main__":
main()
| pwillworth/galaxyharvester | catchupMail.py | Python | gpl-3.0 | 5,380 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_ExportData.ui'
#
# Created: Sat May 28 00:16:57 2011
# by: PyQt4 UI code generator 4.8.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ExportData(object):
def setupUi(self, ExportData):
ExportData.setObjectName(_fromUtf8("ExportData"))
ExportData.resize(354, 527)
self.verticalLayout_5 = QtGui.QVBoxLayout(ExportData)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.groupBox_2 = QtGui.QGroupBox(ExportData)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.fileName = QtGui.QLineEdit(self.groupBox_2)
self.fileName.setObjectName(_fromUtf8("fileName"))
self.gridLayout.addWidget(self.fileName, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.outputType = QtGui.QComboBox(self.groupBox_2)
self.outputType.setObjectName(_fromUtf8("outputType"))
self.outputType.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.outputType, 1, 1, 1, 2)
self.stackedWidget = QtGui.QStackedWidget(self.groupBox_2)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.delimitedStackedWidget = QtGui.QWidget()
self.delimitedStackedWidget.setObjectName(_fromUtf8("delimitedStackedWidget"))
self.gridLayout_2 = QtGui.QGridLayout(self.delimitedStackedWidget)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_3 = QtGui.QLabel(self.delimitedStackedWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.delimitedDelimiterGroupBox = QtGui.QGroupBox(self.delimitedStackedWidget)
self.delimitedDelimiterGroupBox.setTitle(_fromUtf8(""))
self.delimitedDelimiterGroupBox.setObjectName(_fromUtf8("delimitedDelimiterGroupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.delimitedDelimiterGroupBox)
self.horizontalLayout.setContentsMargins(2, 0, 0, 0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.delimitedCommaRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedCommaRadio.setChecked(True)
self.delimitedCommaRadio.setObjectName(_fromUtf8("delimitedCommaRadio"))
self.delimiterButtonGroup = QtGui.QButtonGroup(ExportData)
self.delimiterButtonGroup.setObjectName(_fromUtf8("delimiterButtonGroup"))
self.delimiterButtonGroup.addButton(self.delimitedCommaRadio)
self.horizontalLayout.addWidget(self.delimitedCommaRadio)
self.delimitedTabRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedTabRadio.setObjectName(_fromUtf8("delimitedTabRadio"))
self.delimiterButtonGroup.addButton(self.delimitedTabRadio)
self.horizontalLayout.addWidget(self.delimitedTabRadio)
self.delimitedOtherRadio = QtGui.QRadioButton(self.delimitedDelimiterGroupBox)
self.delimitedOtherRadio.setObjectName(_fromUtf8("delimitedOtherRadio"))
self.delimiterButtonGroup.addButton(self.delimitedOtherRadio)
self.horizontalLayout.addWidget(self.delimitedOtherRadio)
self.delimitedOtherDelimiter = QtGui.QLineEdit(self.delimitedDelimiterGroupBox)
self.delimitedOtherDelimiter.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.delimitedOtherDelimiter.sizePolicy().hasHeightForWidth())
self.delimitedOtherDelimiter.setSizePolicy(sizePolicy)
self.delimitedOtherDelimiter.setMaximumSize(QtCore.QSize(20, 16777215))
self.delimitedOtherDelimiter.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(12)
self.delimitedOtherDelimiter.setFont(font)
self.delimitedOtherDelimiter.setMaxLength(1)
self.delimitedOtherDelimiter.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.delimitedOtherDelimiter.setObjectName(_fromUtf8("delimitedOtherDelimiter"))
self.horizontalLayout.addWidget(self.delimitedOtherDelimiter)
self.horizontalLayout.setStretch(0, 5)
self.horizontalLayout.setStretch(1, 5)
self.gridLayout_2.addWidget(self.delimitedDelimiterGroupBox, 0, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.delimitedStackedWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.delimitedDataDirectionGroupBox = QtGui.QGroupBox(self.delimitedStackedWidget)
self.delimitedDataDirectionGroupBox.setTitle(_fromUtf8(""))
self.delimitedDataDirectionGroupBox.setObjectName(_fromUtf8("delimitedDataDirectionGroupBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.delimitedDataDirectionGroupBox)
self.horizontalLayout_3.setContentsMargins(2, 0, 0, 0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.dataDirectionColumns = QtGui.QRadioButton(self.delimitedDataDirectionGroupBox)
self.dataDirectionColumns.setChecked(True)
self.dataDirectionColumns.setObjectName(_fromUtf8("dataDirectionColumns"))
self.dataDirectionButtonGroup = QtGui.QButtonGroup(ExportData)
self.dataDirectionButtonGroup.setObjectName(_fromUtf8("dataDirectionButtonGroup"))
self.dataDirectionButtonGroup.addButton(self.dataDirectionColumns)
self.horizontalLayout_3.addWidget(self.dataDirectionColumns)
self.dataDirectionRows = QtGui.QRadioButton(self.delimitedDataDirectionGroupBox)
self.dataDirectionRows.setChecked(False)
self.dataDirectionRows.setObjectName(_fromUtf8("dataDirectionRows"))
self.dataDirectionButtonGroup.addButton(self.dataDirectionRows)
self.horizontalLayout_3.addWidget(self.dataDirectionRows)
self.gridLayout_2.addWidget(self.delimitedDataDirectionGroupBox, 1, 1, 1, 1)
self.stackedWidget.addWidget(self.delimitedStackedWidget)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName(_fromUtf8("page_2"))
self.stackedWidget.addWidget(self.page_2)
self.gridLayout.addWidget(self.stackedWidget, 2, 0, 1, 3)
self.fileNameButton = QtGui.QPushButton(self.groupBox_2)
self.fileNameButton.setObjectName(_fromUtf8("fileNameButton"))
self.gridLayout.addWidget(self.fileNameButton, 0, 2, 1, 1)
self.verticalLayout_5.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(ExportData)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_6 = QtGui.QLabel(self.groupBox_3)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout.addWidget(self.label_6)
self.allWavesListView = QtGui.QListView(self.groupBox_3)
self.allWavesListView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.allWavesListView.setObjectName(_fromUtf8("allWavesListView"))
self.verticalLayout.addWidget(self.allWavesListView)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.addWaveButton = QtGui.QPushButton(self.groupBox_3)
self.addWaveButton.setObjectName(_fromUtf8("addWaveButton"))
self.verticalLayout_2.addWidget(self.addWaveButton)
self.removeWaveButton = QtGui.QPushButton(self.groupBox_3)
self.removeWaveButton.setObjectName(_fromUtf8("removeWaveButton"))
self.verticalLayout_2.addWidget(self.removeWaveButton)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_5 = QtGui.QLabel(self.groupBox_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_3.addWidget(self.label_5)
self.fileWavesListView = QtGui.QListView(self.groupBox_3)
self.fileWavesListView.setDragEnabled(True)
self.fileWavesListView.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.fileWavesListView.setDefaultDropAction(QtCore.Qt.MoveAction)
self.fileWavesListView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.fileWavesListView.setObjectName(_fromUtf8("fileWavesListView"))
self.verticalLayout_3.addWidget(self.fileWavesListView)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout_5.addWidget(self.groupBox_3)
self.groupBox_5 = QtGui.QGroupBox(ExportData)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_5)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.exportDataButton = QtGui.QPushButton(self.groupBox_5)
self.exportDataButton.setObjectName(_fromUtf8("exportDataButton"))
self.verticalLayout_4.addWidget(self.exportDataButton)
self.verticalLayout_5.addWidget(self.groupBox_5)
self.retranslateUi(ExportData)
self.stackedWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.delimitedOtherRadio, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.delimitedOtherDelimiter.setEnabled)
QtCore.QObject.connect(self.outputType, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.stackedWidget.setCurrentIndex)
def retranslateUi(self, ExportData):
ExportData.setWindowTitle(QtGui.QApplication.translate("ExportData", "Export Data", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("ExportData", "Step 1 - File Options", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("ExportData", "File", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("ExportData", "Type", None, QtGui.QApplication.UnicodeUTF8))
self.outputType.setItemText(0, QtGui.QApplication.translate("ExportData", "Delimited", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("ExportData", "Delimiter", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedCommaRadio.setText(QtGui.QApplication.translate("ExportData", "Comma", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedTabRadio.setText(QtGui.QApplication.translate("ExportData", "Tab", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedOtherRadio.setText(QtGui.QApplication.translate("ExportData", "Other", None, QtGui.QApplication.UnicodeUTF8))
self.delimitedOtherDelimiter.setText(QtGui.QApplication.translate("ExportData", ",", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("ExportData", "Data as", None, QtGui.QApplication.UnicodeUTF8))
self.dataDirectionColumns.setText(QtGui.QApplication.translate("ExportData", "Columns", None, QtGui.QApplication.UnicodeUTF8))
self.dataDirectionRows.setText(QtGui.QApplication.translate("ExportData", "Rows", None, QtGui.QApplication.UnicodeUTF8))
self.fileNameButton.setText(QtGui.QApplication.translate("ExportData", "Select...", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("ExportData", "Step 2 - Select Data", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("ExportData", "All Waves", None, QtGui.QApplication.UnicodeUTF8))
self.addWaveButton.setText(QtGui.QApplication.translate("ExportData", "Add -->", None, QtGui.QApplication.UnicodeUTF8))
self.removeWaveButton.setText(QtGui.QApplication.translate("ExportData", "<-- Remove", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("ExportData", "Waves to Export", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_5.setTitle(QtGui.QApplication.translate("ExportData", "Step 3 - Export", None, QtGui.QApplication.UnicodeUTF8))
self.exportDataButton.setText(QtGui.QApplication.translate("ExportData", "Export Data", None, QtGui.QApplication.UnicodeUTF8))
| bbreslauer/PySciPlot | src/ui/Ui_ExportData.py | Python | gpl-3.0 | 13,612 |
from __future__ import absolute_import
import os
import re
import numpy as np
import tensorflow as tf
stop_words=set(["a","an","the"])
def load_candidates(data_dir, task_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
#candidates_f='candidates.txt'
candidates_f='candidates' + str(task_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_test_candidates(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates.txt'
else:
candidates_f='candidates-ext.txt'
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates' + str(task_id) + '.txt'
else:
candidates_f='candidates' + str(task_id) + '_tst'+ str(test_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_dialog_task(data_dir, task_id, candid_dic, isOOV):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
train_file = [f for f in files if s in f and 'train' in f][0]
test_file = [f for f in files if s in f and 'dev' in f][0]
val_file = [f for f in files if s in f and 'dev' in f][0]
train_data = get_dialogs(train_file,candid_dic)
test_data = get_dialogs(test_file,candid_dic)
val_data = get_dialogs(val_file,candid_dic)
return train_data, test_data, val_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
if sent=='<silence>':
return [sent]
result=[x.strip() for x in re.split('(\W+)?', sent) if x.strip() and x.strip() not in stop_words]
if not result:
result=['<silence>']
if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
result=result[:-1]
return result
def load_dialog_test_data(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
t = 'tst_' + str(test_id)
test_file = [f for f in files if s in f and t in f][0]
test_data = get_test_dialogs(test_file)
return test_data
def get_test_dialogs(f):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_test_dialogs(f.readlines())
def parse_test_dialogs(lines):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
a=-1
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
# data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
data.append((context[:-2],u[:],a,dialog_id))
# clear context
u=None
r=None
a=None
context=[]
dialog_id=dialog_id+1
return data
def parse_dialogs_per_response(lines,candid_dic):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
a = candid_dic[r]
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
dialog_id=dialog_id+1
# clear context
context=[]
return data
def get_dialogs(f,candid_dic):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_dialogs_per_response(f.readlines(),candid_dic)
def vectorize_candidates_sparse(candidates,word_idx):
shape=(len(candidates),len(word_idx)+1)
indices=[]
values=[]
for i,candidate in enumerate(candidates):
for w in candidate:
indices.append([i,word_idx[w]])
values.append(1.0)
return tf.SparseTensor(indices,values,shape)
def vectorize_candidates(candidates,word_idx,sentence_size):
shape=(len(candidates),sentence_size)
C=[]
for i,candidate in enumerate(candidates):
lc=max(0,sentence_size-len(candidate))
C.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc)
return tf.constant(C,shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size, candidates, match_feature_flag):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
atmosphere_restriction_set={'casual','romantic','business','glutenfree','vegan','vegetarian'}
S = []
Q = []
A = []
C = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer, start) in enumerate(data):
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
ss = []
story_query_vocab = set()
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
for w in sentence:
story_query_vocab.add(w)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
for w in query:
story_query_vocab.add(w)
story_query_vocab = story_query_vocab.intersection(atmosphere_restriction_set)
c = []
for j,candidate in enumerate(candidates):
candidate_vocab = set()
for w in candidate:
candidate_vocab.add(w)
candidate_vocab = candidate_vocab.intersection(atmosphere_restriction_set)
extra_feature_len=0
match_feature=[]
if candidate_vocab <= story_query_vocab and len(candidate_vocab) > 0 and match_feature_flag:
extra_feature_len=1
match_feature.append(word_idx['MATCH_ATMOSPHERE_RESTRICTION'])
lc=max(0,sentence_size-len(candidate)-extra_feature_len)
c.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc + match_feature)
S.append(np.array(ss))
Q.append(np.array(q))
A.append(np.array(answer))
C.append(np.array(c))
return S, Q, A, C
def vectorize_data_with_surface_form(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size, candidates, match_feature_flag):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
atmosphere_restriction_set={'casual','romantic','business','glutenfree','vegan','vegetarian'}
S = []
Q = []
A = []
C = []
S_in_readable_form = []
Q_in_readable_form = []
dialogIDs = []
last_db_results = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer, dialog_id) in enumerate(data):
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
ss = []
story_string = []
story_query_vocab = set()
dbentries =set([])
dbEntriesRead=False
last_db_result=""
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
for w in sentence:
story_query_vocab.add(w)
story_element = ' '.join([str(x) for x in sentence[:-2]])
# if the story element is a database response/result
if 'r_' in story_element and 'api_call' not in story_element:
dbEntriesRead = True
if 'r_rating' in story_element:
dbentries.add( sentence[0] + '(' + sentence[2] + ')')
else:
if dbEntriesRead:
#story_string.append('$db : ' + ' '.join([str(x) for x in dbentries]))
last_db_result = '$db : ' + ' '.join([str(x) for x in dbentries])
dbentries =set([])
dbEntriesRead = False
#story_string.append(' '.join([str(x) for x in sentence[-2:]]) + ' : ' + story_element)
story_string.append(' '.join([str(x) for x in sentence[-2:]]) + ' : ' + story_element)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
for w in query:
story_query_vocab.add(w)
story_query_vocab = story_query_vocab.intersection(atmosphere_restriction_set)
c = []
for j,candidate in enumerate(candidates):
candidate_vocab = set()
for w in candidate:
candidate_vocab.add(w)
candidate_vocab = candidate_vocab.intersection(atmosphere_restriction_set)
extra_feature_len=0
match_feature=[]
if candidate_vocab == story_query_vocab and len(candidate_vocab) > 0 and match_feature_flag:
extra_feature_len=1
match_feature.append(word_idx['MATCH_ATMOSPHERE_RESTRICTION'])
lc=max(0,sentence_size-len(candidate)-extra_feature_len)
c.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc + match_feature)
S.append(np.array(ss))
Q.append(np.array(q))
A.append(np.array(answer))
C.append(np.array(c))
S_in_readable_form.append(story_string)
Q_in_readable_form.append(' '.join([str(x) for x in query]))
last_db_results.append(last_db_result)
dialogIDs.append(dialog_id)
return S, Q, A, C, S_in_readable_form, Q_in_readable_form, last_db_results, dialogIDs
def restaurant_reco_evluation(test_preds, testA, indx2candid):
total = 0
match = 0
for idx, val in enumerate(test_preds):
answer = indx2candid[testA[idx].item(0)]
prediction = indx2candid[val]
if "what do you think of this option:" in prediction:
total = total+1
if prediction == answer:
match=match+1
print('Restaurant Recommendation Accuracy : ' + str(match/float(total)) + " (" + str(match) + "/" + str(total) + ")")
if __name__ == '__main__':
u = tokenize('The phone number of taj_tandoori is taj_tandoori_phone')
print(u) | DineshRaghu/dstc6-track1 | src/data_utils.py | Python | gpl-3.0 | 14,089 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from foo.hotel.models import Hotel
from foo.hotel.views import HotelLimitListView
from foo.hotel.views import HotelLimitNoOrderListView
urlpatterns = patterns('',
url(r'^$', HotelLimitListView.as_view(model=Hotel), name='hotel'),
url(r'^noorder$', HotelLimitNoOrderListView.as_view(model=Hotel), name='hotelnoorder'))
| rodo/django-perf | foo/hotel/urls.py | Python | gpl-3.0 | 544 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs.StringType import StringType
logger = logging.getLogger(__name__)
class VariableIdPattern(StringType):
# <xsd:pattern value="oval:[A-Za-z0-9_\-\.]+:var:[1-9][0-9]*"/>
def get_value_pattern(self):
return r'oval:[A-Za-z0-9_\-\.]+:var:[1-9][0-9]*'
| cjaymes/pyscap | src/scap/model/oval_5/VariableIdPattern.py | Python | gpl-3.0 | 978 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtProperty
from friture.ballistic_peak import BallisticPeak
from friture.level_data import LevelData
class LevelViewModel(QtCore.QObject):
two_channels_changed = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
self._two_channels = False
self._level_data = LevelData(self)
self._level_data_2 = LevelData(self)
self._level_data_slow = LevelData(self)
self._level_data_slow_2 = LevelData(self)
self._level_data_ballistic = BallisticPeak(self)
self._level_data_ballistic_2 = BallisticPeak(self)
@pyqtProperty(bool, notify=two_channels_changed)
def two_channels(self):
return self._two_channels
@two_channels.setter
def two_channels(self, two_channels):
if self._two_channels != two_channels:
self._two_channels = two_channels
self.two_channels_changed.emit(two_channels)
@pyqtProperty(LevelData, constant = True)
def level_data(self):
return self._level_data
@pyqtProperty(LevelData, constant = True)
def level_data_2(self):
return self._level_data_2
@pyqtProperty(LevelData, constant = True)
def level_data_slow(self):
return self._level_data_slow
@pyqtProperty(LevelData, constant = True)
def level_data_slow_2(self):
return self._level_data_slow_2
@pyqtProperty(LevelData, constant = True)
def level_data_ballistic(self):
return self._level_data_ballistic
@pyqtProperty(LevelData, constant = True)
def level_data_ballistic_2(self):
return self._level_data_ballistic_2 | tlecomte/friture | friture/level_view_model.py | Python | gpl-3.0 | 2,390 |
# Copyright 2010, 2014 Gerardo Marset <gammer1994@gmail.com>
#
# This file is part of Haxxor Engine.
#
# Haxxor Engine is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Haxxor Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Haxxor Engine. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import json
import tools
from filesystem import File
import cli
import system
import missions
SAVEGAME = "{}.sav"
DOWNLOADS_DIR = "C:\\Descargas"
class Game(object):
def __init__(self):
self.running = True
print("Bienvenido a Haxxor Engine.")
try:
self.name = "test" if tools.DEBUG else ask_for_name()
except EOFError:
self.running = False
return
self.clear()
if os.path.isfile(SAVEGAME.format(self.name)):
self.load()
print("Juego cargado.")
else:
self.aliases = default_aliases()
self.mission_id = 0
self.system = system.default_local_system()
self.save()
print("Una nueva partida fue creada para {}.".format(self.name))
print("Escribí \"help\" para ver la lista de comandos.")
self.start_mission()
self.cli = cli.CLI(self.system, self)
@property
def valid_hosts(self):
return ["127.0.0.1", "localhost", self.system.ip,
self.mission.system.ip]
def start_mission(self, restart=None):
self.mission = (missions.missions[self.mission_id].
get_prepared_copy(self, restart))
print("Tenés un e-mail. Escribí \"mail\" para verlo.")
def main_loop(self):
while self.running:
ms = time.time()
self.cli.prompt()
if not self.cli.system.is_local:
if self.mission.ip_tracker.update(time.time() - ms,
self.system.ip):
self.telnet_end("Conexión perdida.\nTu IP fue rastreada.",
True)
def load(self):
with open(SAVEGAME.format(self.name), "r") as f:
load_dict = json.loads(f.read())
self.aliases = load_dict["aliases"]
self.mission_id = load_dict["mission_id"]
filesystem = load_dict["filesystem"]
ip = load_dict["ip"]
def recursive_loop(directory):
for name, value in directory.items():
if isinstance(value, dict):
for element in recursive_loop(value):
pass
else:
directory[name] = File(value)
yield name
for element in recursive_loop(filesystem):
pass
self.system = system.System(filesystem, ip, True)
def save(self):
with open(SAVEGAME.format(self.name), "w") as f:
f.write(json.dumps({
"aliases": self.aliases,
"mission_id": self.mission_id,
"filesystem": self.system.filesystem,
"ip": self.system.ip
}, indent=4, default=lambda o: o.id_))
def clear(self):
if os.name == "posix":
os.system("clear")
elif os.name in ("nt", "dos", "ce"):
os.system("cls")
else:
print("\n" * 300)
def telnet_start(self):
self.cli.system = self.mission.system
self.clear()
print(self.mission.asciiart)
if not self.cli.telnet_login():
self.telnet_end()
def telnet_end(self, message="Conexión cerrada.", force_fail=False):
self.cli.system = self.system
self.clear()
print(message)
if force_fail or not self.mission.is_complete():
print("Misión fallida.")
self.start_mission(self.mission)
return
print("Misión superada.")
downloads_dir = self.system.retrieve(tools.
dir_to_dirlist(DOWNLOADS_DIR))
for file_name, file_ in self.mission.downloads:
downloads_dir[file_name] = file_
self.mission_id += 1
self.start_mission()
def default_aliases():
return {
"cd..": "cd ..",
"ls": "dir",
"rm": "del",
"clear": "cls"
}
def ask_for_name():
while True:
name = tools.iinput("¿Cuál es tu nombre? ")
if name == "":
print("Escribí tu nombre.")
continue
if not all(ord(c) < 128 for c in name):
print("Solo se permiten caracteres ASCII.")
continue
if not name.isalnum():
print("Solo se permiten caracteres alfanuméricos.")
continue
break
return name
| ideka/haxeng | haxeng/game.py | Python | gpl-3.0 | 5,288 |
# Copyright (c) The AcidSWF Project.
# See LICENSE.txt for details.
"""
Support for creating a service which runs a web server.
@since: 1.0
"""
import logging
from twisted.python import usage
from twisted.application import service
from acidswf.service import createAMFService
optParameters = [
['log-level', None, logging.INFO, 'Log level.'],
['amf-transport', None, 'http', 'Run the AMF server on HTTP or HTTPS transport.'],
['amf-host', None, 'localhost', 'The interface for the AMF gateway to listen on.'],
['service', None, 'acidswf', 'The remote service name.'],
['amf-port', None, 8000, 'The port number for the AMF gateway to listen on.'],
['crossdomain', None, 'crossdomain.xml', 'Path to a crossdomain.xml file.'],
]
class Options(usage.Options):
"""
Define the options accepted by the I{acidswf amf} plugin.
"""
synopsis = "[amf options]"
optParameters = optParameters
longdesc = """\
This starts an AMF server."""
def postOptions(self):
"""
Set up conditional defaults and check for dependencies.
If SSL is not available but an HTTPS server was configured, raise a
L{UsageError} indicating that this is not possible.
If no server port was supplied, select a default appropriate for the
other options supplied.
"""
pass
#if self['https']:
# try:
# from twisted.internet.ssl import DefaultOpenSSLContextFactory
# except ImportError:
# raise usage.UsageError("SSL support not installed")
def makeService(options):
top_service = service.MultiService()
createAMFService(top_service, options)
return top_service
| thijstriemstra/acidswf | python/acidswf/application/amf.py | Python | gpl-3.0 | 1,733 |
__author__ = 'Paolo Bellagente'
# Documentation for this module.
#
# More details.
################################## DATABASE ##############################################
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
import datetime
## Database name
db_name = "testDatabase"
## Database user
db_uid = "root"
## Database user's password
db_passwd = ""
## Database host
db_host = "localhost"
##
# set the database connection engine
engine = create_engine('mysql+pymysql://'+db_uid+':'+db_passwd+'@'+db_host+'/'+db_name)
## Classe base per l'ereditarieta' delle tabelle
#
# Permette di istanziare una volta la classe base e riutilizzarla
Base = declarative_base()
class Lesson(Base):
__tablename__ = 'lessons'
id = Column(INTEGER, primary_key=True)
semesterStartDate = Column(DATE)
semesterEndDate = Column(DATE)
# lesson's start hour
hour = Column(TIME)
# lesson's day of the week coded form 0 to 6 where 0 is monday and 6 is sunday.
day = Column(INTEGER)
subject = Column(VARCHAR(200))
rooms = Column(VARCHAR(30))
address = Column(VARCHAR(50))
teacher = Column(VARCHAR(50))
def __init__(self):
self.teacher = ''
# persist the entity into the database
def persist(self):
Session = sessionmaker(bind=engine)
session = Session()
session.add(self)
session.commit()
session.close()
# todo: create new entity here
## Create the necesary tables into the databse
Base.metadata.create_all(engine)
| mpescimoro/stripp3r | lessonEntity.py | Python | gpl-3.0 | 1,603 |
import sys
from mock import (
patch, Mock
)
from pytest import raises
from ..test_helper import argv_kiwi_tests
import kiwi
from kiwi.exceptions import KiwiFileSystemSetupError
from kiwi.builder.filesystem import FileSystemBuilder
class TestFileSystemBuilder:
@patch('kiwi.builder.filesystem.FileSystemSetup')
@patch('platform.machine')
def setup(self, mock_machine, mock_fs_setup):
mock_machine.return_value = 'x86_64'
self.loop_provider = Mock()
self.loop_provider.get_device = Mock(
return_value='/dev/loop1'
)
self.loop_provider.create = Mock()
self.filesystem = Mock()
self.filesystem.create_on_device = Mock()
self.filesystem.create_on_file = Mock()
self.filesystem.sync_data = Mock()
self.xml_state = Mock()
self.xml_state.get_build_type_unpartitioned_bytes = Mock(
return_value=0
)
self.xml_state.get_fs_mount_option_list = Mock(
return_value=['async']
)
self.xml_state.get_fs_create_option_list = Mock(
return_value=['-O', 'option']
)
self.xml_state.get_build_type_name = Mock(
return_value='ext3'
)
self.xml_state.get_image_version = Mock(
return_value='1.2.3'
)
self.xml_state.xml_data.get_name = Mock(
return_value='myimage'
)
self.xml_state.build_type.get_target_blocksize = Mock(
return_value=4096
)
self.xml_state.build_type.get_squashfscompression = Mock(
return_value='gzip'
)
self.fs_setup = Mock()
self.fs_setup.get_size_mbytes = Mock(
return_value=42
)
self.setup = Mock()
kiwi.builder.filesystem.SystemSetup = Mock(
return_value=self.setup
)
def test_create_unknown_filesystem(self):
self.xml_state.get_build_type_name = Mock(
return_value='super-fs'
)
fs = FileSystemBuilder(
self.xml_state, 'target_dir', 'root_dir'
)
with raises(KiwiFileSystemSetupError):
fs.create()
def test_no_filesystem_configured(self):
self.xml_state.get_build_type_name = Mock(
return_value='pxe'
)
self.xml_state.build_type.get_filesystem = Mock(
return_value=None
)
with raises(KiwiFileSystemSetupError):
FileSystemBuilder(
self.xml_state, 'target_dir', 'root_dir'
)
@patch('kiwi.builder.filesystem.LoopDevice')
@patch('kiwi.builder.filesystem.FileSystem')
@patch('kiwi.builder.filesystem.FileSystemSetup')
@patch('platform.machine')
def test_create_on_loop(
self, mock_machine, mock_fs_setup, mock_fs, mock_loop
):
mock_machine.return_value = 'x86_64'
mock_fs_setup.return_value = self.fs_setup
mock_fs.return_value = self.filesystem
mock_loop.return_value = self.loop_provider
fs = FileSystemBuilder(
self.xml_state, 'target_dir', 'root_dir'
)
fs.create()
mock_loop.assert_called_once_with(
'target_dir/myimage.x86_64-1.2.3.ext3', 42, 4096
)
self.loop_provider.create.assert_called_once_with()
mock_fs.assert_called_once_with(
'ext3', self.loop_provider, 'root_dir/', {
'mount_options': ['async'],
'create_options': ['-O', 'option']
}
)
self.filesystem.create_on_device.assert_called_once_with(None)
self.filesystem.sync_data.assert_called_once_with(
['image', '.profile', '.kconfig', '.buildenv', 'var/cache/kiwi']
)
self.setup.export_package_verification.assert_called_once_with(
'target_dir'
)
self.setup.export_package_list.assert_called_once_with(
'target_dir'
)
@patch('kiwi.builder.filesystem.FileSystem')
@patch('kiwi.builder.filesystem.DeviceProvider')
@patch('platform.machine')
def test_create_on_file(
self, mock_machine, mock_provider, mock_fs
):
mock_machine.return_value = 'x86_64'
provider = Mock()
mock_provider.return_value = provider
mock_fs.return_value = self.filesystem
self.xml_state.get_build_type_name = Mock(
return_value='squashfs'
)
fs = FileSystemBuilder(
self.xml_state, 'target_dir', 'root_dir'
)
fs.create()
mock_fs.assert_called_once_with(
'squashfs', provider, 'root_dir', {
'mount_options': ['async'],
'create_options': ['-O', 'option'],
'compression': 'gzip'
}
)
self.filesystem.create_on_file.assert_called_once_with(
'target_dir/myimage.x86_64-1.2.3.squashfs', None,
['image', '.profile', '.kconfig', '.buildenv', 'var/cache/kiwi']
)
self.setup.export_package_verification.assert_called_once_with(
'target_dir'
)
self.setup.export_package_list.assert_called_once_with(
'target_dir'
)
def teardown(self):
sys.argv = argv_kiwi_tests
| b1-systems/kiwi | test/unit/builder/filesystem_test.py | Python | gpl-3.0 | 5,298 |
"""
This file is part of contribution to the OAD Data Science Toolkit.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this software. If not, see <http://www.gnu.org/licenses/>.
File name: EvaluationStats.py
Created: April 11th, 2018
Author: Dr. Rob Lyon
Contact: rob@scienceguyrob.com or robert.lyon@manchester.ac.uk
Web: <http://www.scienceguyrob.com>
Computes and stores the performance statistics of a classifier
given a confusion matrix containing it's true positive, false positive,
true negative and false negative rates.
Designed to run on python 2.4 or later.
"""
from numpy import sqrt
# ******************************
#
# CLASS DEFINITION
#
# ******************************
class ClassifierStats:
"""
Computes the performance statistics of a BINARY classifier only.
"""
# ****************************************************************************************************
#
# Constructor.
#
# ****************************************************************************************************
def __init__(self,confusionMatrix):
"""
Default constructor.
Parameters:
confusionMatrix - a matrix containing the performance of a given BINARY classifier on
a training set. For example given the matrix,
[[15528 731]
[ 249 1390]]
Where,
TrueNegatives = confusionMatrix[0][0] # Negatives correctly receiving negative label.
FalseNegatives = confusionMatrix[0][1] # Positives incorrectly receiving negative label.
FalsePositives = confusionMatrix[1][0] # Negatives incorrectly receiving positive label.
TruePositives = confusionMatrix[1][1] # Positives correctly receiving positive label.
Will not evaluate the performance of multi-class classifiers.
"""
# The accuracy of the classifier.
self.accuracy = 0.0
# The precision of the classifier. Precision is the fraction of retrieved instances that are relevant.
self.precision = 0
# The recall of the classifier. Recall is the fraction of relevant instances that are retrieved.
self.recall = 0.0
# The precision of the map. Specificity relates to the ability of the map to identify negative results.
self.specificity = 0.0
# The negative predictive value (NPV) is a summary statistic
# defined as the proportion of input patterns identified as negative,
# that are correctly identified as such. A high NPV means that when the
# classifier yields a negative result, it is most likely correct in its assessment.
self.negativePredictiveValue = 0.0
# The Matthews correlation coefficient is used in machine learning
# as a measure of the quality of binary (two-class) classifications.
# It takes into account true and false positives and negatives and
# is generally regarded as a balanced measure which can be used even
# if the classes are of very different sizes. The MCC is in essence a
# correlation coefficient between the observed and predicted binary
# classifications; it returns a value between -1 and +1. A coefficient
# of +1 represents a perfect prediction, 0 no better than random prediction
# and -1 indicates total disagreement between prediction and observation.
# The statistic is also known as the phi coefficient.
self.matthewsCorrelation = 0.0
# The F1 score (also F-score or F-measure) is a measure of a classifier's accuracy.
# It considers both the precision p and the recall r of the classifier to compute
# the score: p is the number of correct results divided by the number of all
# returned results and r is the number of correct results divided by the
# number of results that should have been returned.
self.fScore = 0.0
# The g-mean is a measure of useful for data sets with skewed class distributions.
# In other words when most examples belong to one class (say the negative), then this
# metric helps asses performance irrespective of the imbalance. It evaluates the
# inductive bias in terms of the ratio between positive and negative accuracy.
self.gmean = 0.0
# The kappa statistic. Cohen's kappa coefficient is a statistical measure of inter-rater
# agreement or inter-annotator agreement for qualitative (categorical) items. It is
# generally thought to be a more robust measure than simple percent agreement
# calculation since k takes into account the agreement occurring by chance.
self.kappa = 0.0
# The true positives.
self.TP = 0.0
# The true negatives.
self.TN = 0.0
# The false positives.
self.FP = 0.0
# The false negatives.
self.FN = 0.0
# The area under the roc curve.
self.auroc = float('NaN')
# The area under the precision-recall curve.
self.auprc = float('NaN')
self.load(confusionMatrix)
self.calculate()
# ****************************************************************************************************
def load(self,confusionMatrix):
"""
Loads data from the confusion matrix into the correct variables.
Parameters:
confusionMatrix - a matrix containing the performance of a given BINARY classifier on
a training set. For example given the matrix,
[[15528 731]
[ 249 1390]]
Where,
TrueNegatives = confusionMatrix[0][0] # Negatives correctly receiving negative label.
FalseNegatives = confusionMatrix[0][1] # Positives incorrectly receiving negative label.
FalsePositives = confusionMatrix[1][0] # Negatives incorrectly receiving positive label.
TruePositives = confusionMatrix[1][1] # Positives correctly receiving positive label.
Will not evaluate the performance of multi-class classifiers.
Returns: N/A.
"""
self.TN = float(confusionMatrix[0][0]) # Negatives correctly receiving negative label.
self.FN = float(confusionMatrix[0][1]) # Positives incorrectly receiving negative label.
self.FP = float(confusionMatrix[1][0]) # Negatives incorrectly receiving positive label.
self.TP = float(confusionMatrix[1][1]) # Positives correctly receiving positive label.
# ****************************************************************************************************
def calculate(self):
"""
Computes the values of the statistics describing classifier performance.
Parameters: None.
Returns: N/A.
"""
try:
self.accuracy = (self.TP + self.TN) / (self.TP + self.FP + self.FN + self.TN)
except ZeroDivisionError as error:
self.accuracy = float('Nan')
try:
self.precision = (self.TP) / (self.TP + self.FP)
except ZeroDivisionError as error:
self.precision = float('Nan')
try:
self.recall = (self.TP) / (self.TP + self.FN)
except ZeroDivisionError as error:
self.recall = float('Nan')
try:
self.specificity = (self.TN) / (self.FP+self.TN)
except ZeroDivisionError as error:
self.specificity = float('Nan')
try:
self.negativePredictiveValue = (self.TN) / (self.FN + self.TN)
except ZeroDivisionError as error:
self.negativePredictiveValue = float('Nan')
try:
self.matthewsCorrelation = ((self.TP * self.TN) - (self.FP * self.FN)) /\
sqrt((self.TP+self.FP) * (self.TP+self.FN) * (self.TN+self.FP) * (self.TN+self.FN))
except ZeroDivisionError as error:
self.matthewsCorrelation = float('Nan')
try:
self.fScore = 2 * ((self.precision * self.recall) / (self.precision + self.recall))
except ZeroDivisionError as error:
self.fscore = float('Nan')
# Kappa = (totalAccuracy - randomAccuracy) / (1 - randomAccuracy)
#
# where,
#
# totalAccuracy = (TP + TN) / (TP + TN + FP + FN)
#
# and
#
# randomAccuracy = (TN + FP) * (TN + FN) + (FN + TP) * (FP + TP) / (Total*Total).
total = self.TP + self.TN + self.FP + self.FN
totalAcc = (self.TP + self.TN) / (self.TP + self.TN + self.FP + self.FN)
randomAcc = (((self.TN + self.FP) * (self.TN + self.FN)) + ((self.FN + self.TP) * (self.FP + self.TP))) / (total*total)
try:
self.kappa = (totalAcc - randomAcc) / (1 - randomAcc)
except ZeroDivisionError as error:
self.kappa = float('Nan')
try:
self.gmean = sqrt( ( self.TP /( self.TP + self.FN ) ) * ( self.TN / ( self.TN + self.FP ) ) )
except ZeroDivisionError as error:
self.gmean = float('Nan')
# ****************************************************************************************************
def show(self):
"""
Prints classifier performance stats to standard output.
Parameters: None.
Returns: N/A.
"""
output ='{:<14}'.format("TP:") +"\t" + str(int(self.TP)) + "\n" +\
'{:<14}'.format("TN:") +"\t" + str(int(self.TN)) + "\n" +\
'{:<14}'.format("FP:") +"\t" + str(int(self.FP)) + "\n" +\
'{:<14}'.format("FN:") +"\t" + str(int(self.FN)) + "\n" +\
'{:<14}'.format("Accuracy:") +"\t" + str(self.accuracy * 100) + "\n" +\
'{:<14}'.format("Precision:") +"\t" + str(self.precision * 100) + "\n" +\
'{:<14}'.format("Recall:") +"\t" + str(self.recall * 100) + "\n" +\
'{:<14}'.format("Specificity:")+"\t" + str(self.specificity * 100) + "\n" +\
'{:<14}'.format("NPV:") +"\t" + str(self.negativePredictiveValue * 100) + "\t(Negative Predictive Value)\n" +\
'{:<14}'.format("MCC:") +"\t" + str(self.matthewsCorrelation) + "\t(Matthews Correlation Coefficient)\n" +\
'{:<14}'.format("F-Score:") +"\t" + str(self.fScore) +"\n" +\
'{:<14}'.format("Kappa:") +"\t" + str(self.kappa) +"\n" +\
'{:<14}'.format("G-Mean:" ) +"\t" + str(self.gmean) +"\n" +\
'{:<14}'.format("AUROC:" ) +"\t" + str(self.auroc) +"\n" +\
'{:<14}'.format("AUPRC:" ) +"\t" + str(self.auprc) +"\n"
print (output)
# ****************************************************************************************************
# ******************************
# Getters
# ******************************
def getAccuracy(self):
"""
Accuracy of the classifier where accuracy = (TP + TN) / (TP + FP + FN + TN).
Parameters: None.
Returns: accuracy as a float.
"""
return float(self.accuracy)
def getPrecision(self):
"""
Precision of the classifier where precision = (TP) / (TP + FP).
Parameters: None.
Returns: precision as a float.
"""
return float(self.precision)
def getRecall(self):
"""
Recall of the classifier where recall = (TP) / (TP + FN).
Parameters: None.
Returns: recall as a float.
"""
return float(self.recall)
def getSpecificity(self):
"""
Specificity of the classifier where specificity = (TN) / (FP+TN).
Parameters: None.
Returns: specificity as a float.
"""
return float(self.specificity)
def getMatthewsCorrelation(self):
"""
Matthew's Correlation Coefficient of the classifier where,
matthewsCorrelation = ((TP * TN) - (FP * FN)) / sqrt((TP+FP) * (TP+FN) * (TN+FP) * (TN+FN)).
Parameters: None.
Returns: mcc as a float.
"""
return float(self.matthewsCorrelation)
def getfScore(self):
"""
F-Score of the classifier where fScore = 2 * ((precision * recall) / (precision + recall)).
Parameters: None.
Returns: F-score as a float.
"""
return float(self.fScore)
def getNegativePredictiveValue(self):
"""
Negative predictive value of the classifier where negativePredictiveValue = (TN) / (FN + TN).
Parameters: None.
Returns: npv as a float.
"""
return float(self.negativePredictiveValue)
def getKappa(self):
"""
Cohen's Kappa of the classifier where,
kappa = (totalAccuracy - randomAccuracy) / (1 - randomAccuracy)
where,
totalAccuracy = (TP + TN) / (TP + TN + FP + FN)
and
randomAccuracy = (TN + FP) * (TN + FN) + (FN + TP) * (FP + TP) / (Total*Total).
Parameters: None.
Returns: kappa as a float.
"""
return float(self.kappa)
def getGMean(self):
"""
G-mean of the classifier where gmean = sqrt( ( TP /( TP + FN ) ) * ( TN / ( TN + FP ) ) ).
Parameters: None.
Returns: gmean as a float.
"""
return float(self.gmean)
def getAUROC(self):
"""
Area under the roc curve of the classifier.
Parameters: None.
Returns: auroc as a float.
"""
return float(self.auroc)
def setAUROC(self,auroc):
"""
Sets the Area under the roc curve of the classifier.
Parameters:
auroc - the area under the roc curve calculated externally.
Returns: None.
"""
self.auroc = auroc
def getAUPRC(self):
"""
Area under the precision-recall curve of the classifier.
Parameters: None.
Returns: auroc as a float.
"""
return float(self.auprc)
def setAUPRC(self,auprc):
"""
Sets the Area under the precision-recall curve of the classifier.
Parameters:
auprc - the area under the precision-recall curve calculated externally.
Returns: None.
"""
self.auprc = auprc
def getTP(self):
"""
True positives (TP) returned by the classifier.
Parameters: None.
Returns: true positives as an integer.
"""
return int(self.TP)
def getTN(self):
"""
True negatives (TN) returned by the classifier.
Parameters: None.
Returns: true negatives as an integer.
"""
return int(self.TN)
def getFP(self):
"""
False positives (FP) returned by the classifier.
Parameters: None.
Returns: false positives as an integer.
"""
return int(self.FP)
def getFN(self):
"""
False negatives (FN) returned by the classifier.
Parameters: None.
Returns: false negatives as an integer.
"""
return int(self.FN)
# **************************************************************************************************** | astro4dev/OAD-Data-Science-Toolkit | Teaching Materials/Machine Learning/Supervised Learning/Examples/PPC/EvaluationStats.py | Python | gpl-3.0 | 16,967 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""setup
(C) Franck Barbenoire <fbarbenoire@yahoo.fr>
License : GPL v3"""
from distutils.core import setup
from setuptools import find_packages
setup(name = "django-openzoom",
version = "0.1.1",
description = "Django application for displaying very high resolution images",
author = "Franck Barbenoire",
author_email = "fbarbenoire@yahoo.fr",
url = "https://github.com/franckinux/django-openzoom",
packages = find_packages(),
include_package_data = True,
zip_safe = False,
classifiers = ['Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
)
| franckinux/django-openzoom | setup.py | Python | gpl-3.0 | 925 |
from tests.base_test import BaseTest
from tests import config
from core import modules
from core.sessions import SessionURL
from testfixtures import log_capture
from core import messages
import logging
import os
import subprocess
class FileBzip(BaseTest):
# Create and bzip2 binary files for the test
binstring = [
b'\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1',
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00'
]
uncompressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1')
]
compressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0.bz2'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1.bz2')
]
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_bzip2/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/"
echo -n '\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1' > "$BASE_FOLDER/binfile0"
echo -n '\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00' > "$BASE_FOLDER/binfile1"
bzip2 "$BASE_FOLDER/binfile0"
bzip2 "$BASE_FOLDER/binfile1"
chown www-data: -R "$BASE_FOLDER/"
""".format(
config = config
), shell=True)
self.run_argv = modules.loaded['file_bzip2'].run_argv
def test_compress_decompress(self):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[0]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Recompress it keeping the original file
self.assertTrue(self.run_argv([self.uncompressed[0], '--keep']))
# Check the existance of the original file and remove it
subprocess.check_call('stat -c %%a "%s"' % self.uncompressed[0], shell=True)
subprocess.check_call('rm "%s"' % self.uncompressed[0], shell=True)
#Do the same check
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
def test_compress_decompress_multiple(self):
for index in range(0, len(self.compressed)):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[index]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
@log_capture()
def test_already_exists(self, log_captured):
# Decompress keeping it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0], '--keep']));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Do it again and trigger that the file decompressed already exists
self.assertIsNone(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping decompressing" % self.uncompressed[0])
# Compress and trigger that the file compressed already exists
self.assertIsNone(self.run_argv([self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping compressing" % self.compressed[0])
@log_capture()
def test_wrong_ext(self, log_captured):
# Decompress it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Decompress the decompressed, wrong ext
self.assertIsNone(self.run_argv(["--decompress", self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"Unknown suffix, skipping decompressing")
@log_capture()
def test_unexistant(self, log_captured):
# Decompress it and check test file
self.assertIsNone(self.run_argv(["--decompress", 'bogus']));
self.assertEqual(log_captured.records[-1].msg,
"Skipping file '%s', check existance and permission" % 'bogus')
| epinna/weevely3 | tests/test_file_bzip2.py | Python | gpl-3.0 | 5,554 |