text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
def f(x):
return x + 1
raise ValueError("I do not want to be imported")
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/commands/command/script/import/rdar-12586188/fail212586188.py
|
Python
|
bsd-3-clause
| 77
| 0.012987
|
from __future__ import print_function, division
from sympy.core import C, Add, Mul, Pow, S
from sympy.core.compatibility import default_sort_key
from sympy.core.mul import _keep_coeff
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import precedence
class AssignmentError(Exception):
"""
Raised if an assignment variable for a loop is missing.
"""
pass
class CodePrinter(StrPrinter):
"""
The base class for code-printing subclasses.
"""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
def _doprint_a_piece(self, expr, assign_to=None):
# Here we print an expression that may contain Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
lhs_printed = self._print(assign_to)
lines = []
# Setup loops over non-dummy indices -- all terms need these
indices = self.get_expression_indices(expr, assign_to)
openloop, closeloop = self._get_loop_opening_ending(indices)
# Setup loops over dummy indices -- each term needs separate treatment
from sympy.tensor import get_contraction_structure
d = get_contraction_structure(expr)
# terms with no summations first
if None in d:
text = CodePrinter.doprint(self, Add(*d[None]))
else:
# If all terms have summations we must initialize array to Zero
text = CodePrinter.doprint(self, 0)
# skip redundant assignments
if text != lhs_printed:
lines.extend(openloop)
if assign_to is not None:
text = self._get_statement("%s = %s" % (lhs_printed, text))
lines.append(text)
lines.extend(closeloop)
for dummies in d:
# then terms with summations
if isinstance(dummies, tuple):
indices = self._sort_optimized(dummies, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in d[dummies]:
if term in d and not ([list(f.keys()) for f in d[term]]
== [[None] for f in d[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
"FIXME: no support for contractions in factor yet")
else:
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
if assign_to is None:
raise AssignmentError(
"need assignment variable for loops")
if term.has(assign_to):
raise ValueError("FIXME: lhs present in rhs,\
this is undefined in CCodePrinter")
lines.extend(openloop)
lines.extend(openloop_d)
text = "%s = %s" % (lhs_printed, CodePrinter.doprint(
self, assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return lines
def get_expression_indices(self, expr, assign_to):
from sympy.tensor import get_indices, get_contraction_structure
rinds, junk = get_indices(expr)
linds, junk = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError("lhs indices must match non-dummy"
" rhs indices in %s" % expr)
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(C.Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings["precision"]))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return "%s_%i" % (expr.name, expr.dummy_index) # Dummy
_print_Catalan = _print_NumberSymbol
_print_EulerGamma = _print_NumberSymbol
_print_GoldenRatio = _print_NumberSymbol
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
if len(a) == 1 and not (a[0].is_Atom or a[0].is_Add):
return sign + "%s/" % a_str[0] + '*'.join(b_str)
else:
return sign + '*'.join(a_str) + "/%s" % b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_Matrix = _print_not_supported
_print_DeferredVector = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_PDF = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_Sample = _print_not_supported
_print_SparseMatrix = _print_not_supported
_print_tuple = _print_not_supported
_print_Uniform = _print_not_supported
_print_Unit = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
|
hrashk/sympy
|
sympy/printing/codeprinter.py
|
Python
|
bsd-3-clause
| 9,879
| 0.001417
|
#!/usr/bin/python
# -*- coding: windows-1252 -*-
import wxversion
wxversion.select('2.8')
import wx
import wx.aui
from id import *
from model import *
from graphic import *
from sql import *
from django import *
import sqlite3
from xml.dom import minidom
class MainFrame(wx.aui.AuiMDIParentFrame):
def __init__(self, app, posx, posy, sizex, sizey):
self.data = {}
self.locale = wx.Locale()
self.locale.AddCatalogLookupPathPrefix('./locale')
if app.config.Read("language"):
if app.config.Read("language") != 'English':
idioma = app.config.Read("language")
else:
idioma = ''
else:
idioma = 'es_ES'
app.config.Write("language", idioma)
app.config.Flush()
self.locale.AddCatalog(idioma)
for key, value in language.iteritems():
if value == idioma:
self.data["idioma"] = key
self.translation = wx.GetTranslation
self.app = app
#--Iniciar el padre con las posiciones y titulo del Frame--#
wx.aui.AuiMDIParentFrame.__init__(self, None, -1, self.translation(archivo[TITULO]), pos = (posx, posy), size = (sizex, sizey))
#--Imbuir el logo del CUC en la caja de control de la ventana--#
ico = wx.Icon('images/mini_logo_cuc_trans.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
#--Inicializamos la libreria OGL de wxPython--#
ogl.OGLInitialize()
#--MENU--#
#Menu de Archivo
self.menuFile = wx.Menu()
self.menuFile.Append(ID_CREAR_MODELO, self.translation(archivo[ID_CREAR_MODELO]), self.translation(archivoHelp[ID_CREAR_MODELO]))
self.menuFile.Append(ID_ABRIR_MODELO, self.translation(archivo[ID_ABRIR_MODELO]), self.translation(archivoHelp[ID_ABRIR_MODELO]))
self.menuFile.AppendSeparator()
self.menuFile.Append(ID_GUARDAR_MODELO, self.translation(archivo[ID_GUARDAR_MODELO]), self.translation(archivoHelp[ID_GUARDAR_MODELO]))
self.menuFile.Enable(ID_GUARDAR_MODELO, False)
self.menuFile.Append(ID_GUARDAR_COMO_MODELO, self.translation(archivo[ID_GUARDAR_COMO_MODELO]), self.translation(archivoHelp[ID_GUARDAR_COMO_MODELO]))
self.menuFile.Enable(ID_GUARDAR_COMO_MODELO, False)
self.menuFile.Append(ID_EXPORTAR_MODELO, self.translation(archivo[ID_EXPORTAR_MODELO]), self.translation(archivoHelp[ID_EXPORTAR_MODELO]))
self.menuFile.Enable(ID_EXPORTAR_MODELO, False)
self.menuFile.AppendSeparator()
self.menuFile.Append(ID_CERRAR_APLICACION, self.translation(archivo[ID_CERRAR_APLICACION]), self.translation(archivoHelp[ID_CERRAR_APLICACION]))
#Menu Ver
self.menuVer = wx.Menu()
self.refrescar = self.menuVer.Append(ID_MENU_VER_REFRESCAR, self.translation(archivo[ID_MENU_VER_REFRESCAR]), self.translation(archivoHelp[ID_MENU_VER_REFRESCAR]))
wx.EVT_MENU(self, ID_MENU_VER_REFRESCAR, self.Actualizar)
self.menuVer.AppendSeparator()
self.menuVerStandard = self.menuVer.Append(ID_MENU_VER_STANDARD, self.translation(archivo[ID_MENU_VER_STANDARD]), self.translation(archivoHelp[ID_MENU_VER_STANDARD]), kind=wx.ITEM_CHECK)
self.menuVerIdef1x = self.menuVer.Append(ID_MENU_VER_IDF1X, self.translation(archivo[ID_MENU_VER_IDF1X]), self.translation(archivoHelp[ID_MENU_VER_IDF1X]), kind=wx.ITEM_CHECK)
self.menuVer.AppendSeparator()
self.menuVerNav = self.menuVer.Append(ID_MENU_VER_NAV, self.translation(archivo[ID_MENU_VER_NAV]), self.translation(archivoHelp[ID_MENU_VER_NAV]), kind=wx.ITEM_CHECK)
self.menuVerCard = self.menuVer.Append(ID_MENU_VER_CARD, self.translation(archivo[ID_MENU_VER_CARD]), self.translation(archivoHelp[ID_MENU_VER_CARD]), kind=wx.ITEM_CHECK)
self.menuVer.AppendSeparator()
self.barraStatus = self.menuVer.Append(ID_MENU_VER_BARRA_ESTADO, self.translation(archivo[ID_MENU_VER_BARRA_ESTADO]), self.translation(archivoHelp[ID_MENU_VER_BARRA_ESTADO]), kind=wx.ITEM_CHECK)
if app.tool:
idf1x, standard, navegador = eval(app.tool)
else:
idf1x, standard, navegador = (True, True, True)
app.config.Write("tool", str( (True, True, True) ))
app.config.Flush()
self.menuVer.Check(ID_MENU_VER_STANDARD, standard)
self.menuVer.Check(ID_MENU_VER_IDF1X, idf1x)
self.menuVer.Check(ID_MENU_VER_BARRA_ESTADO, True)
self.menuVer.Enable(ID_MENU_VER_REFRESCAR, False)
self.menuVer.Enable(ID_MENU_VER_NAV, False)
self.menuVer.Enable(ID_MENU_VER_CARD, False)
#Menu Herramientas
self.menuTool = wx.Menu()
self.menuTool.Append(ID_CREAR_ENTIDAD, self.translation(archivo[ID_CREAR_ENTIDAD]), self.translation(archivoHelp[ID_CREAR_ENTIDAD]))
self.menuTool.Enable(ID_CREAR_ENTIDAD, False)
self.menuTool.AppendSeparator()
self.menuTool.Append(ID_RELACION_IDENTIF, self.translation(archivo[ID_RELACION_IDENTIF]), self.translation(archivoHelp[ID_RELACION_IDENTIF]))
self.menuTool.Enable(ID_RELACION_IDENTIF, False)
self.menuTool.Append(ID_RELACION_NO_IDENTIF, self.translation(archivo[ID_RELACION_NO_IDENTIF]), self.translation(archivoHelp[ID_RELACION_IDENTIF]))
self.menuTool.Enable(ID_RELACION_NO_IDENTIF, False)
self.menuTool.AppendSeparator()
self.menuTool.Append(ID_GENERAR_SCRIPT, self.translation(archivo[ID_GENERAR_SCRIPT]), self.translation(archivoHelp[ID_GENERAR_SCRIPT]))
self.menuTool.Enable(ID_GENERAR_SCRIPT, False)
self.menuTool.Append(ID_GENERAR_SCRIPT_DJANGO, archivo[ID_GENERAR_SCRIPT_DJANGO], archivoHelp[ID_GENERAR_SCRIPT_DJANGO])
self.menuTool.Enable(ID_GENERAR_SCRIPT_DJANGO, False)
#self.menuTool.Append(ID_GUARDAR_SCRIPT, "Guardar Script SQL", "Guarda el Script SQL del modelo para PostgreSQL")
#Menu de Ayuda
self.menuHelp = wx.Menu()
#self.menuLanguage = wx.Menu()
#self.menuLanguage.Append(ID_MENU_HELP_us_US, self.translation(archivo[ID_MENU_HELP_us_US]), self.translation(archivoHelp[ID_MENU_HELP_us_US]), kind=wx.ITEM_RADIO)
#self.menuLanguage.Append(ID_MENU_HELP_es_ES, self.translation(archivo[ID_MENU_HELP_es_ES]), self.translation(archivoHelp[ID_MENU_HELP_es_ES]), kind=wx.ITEM_RADIO).Check(True)
#self.menuLanguage.Append(ID_MENU_HELP_fr_FR, self.translation("frances"), kind=wx.ITEM_RADIO)
#self.menuHelp.AppendMenu(ID_MENU_HELP_LANGUAGE, self.translation(archivo[ID_MENU_HELP_LANGUAGE]), self.menuLanguage)
self.menuHelp.Append(ID_MENU_HELP_LANGUAGE, self.translation(archivo[ID_MENU_HELP_LANGUAGE]), self.translation(archivoHelp[ID_MENU_HELP_LANGUAGE]))
self.menuHelp.Append(ID_MENU_HELP_AYUDA, self.translation(archivo[ID_MENU_HELP_AYUDA]), self.translation(archivoHelp[ID_MENU_HELP_AYUDA]))
self.menuHelp.AppendSeparator()
self.menuHelp.Append(ID_MENU_HELP_LOG, self.translation(archivo[ID_MENU_HELP_LOG]), self.translation(archivoHelp[ID_MENU_HELP_LOG]))
self.menuHelp.Enable(ID_MENU_HELP_LOG, False)
self.menuHelp.AppendSeparator()
self.menuHelp.Append(ID_MENU_HELP_ACERCA_DE, self.translation(archivo[ID_MENU_HELP_ACERCA_DE]), self.translation(archivoHelp[ID_MENU_HELP_ACERCA_DE]))
#--Se adicionan los menues a la barra de menu--#
self.menuBar = wx.MenuBar()
self.menuBar.Append(self.menuFile, self.translation(menuBar[0]))
self.menuBar.Append(self.menuVer, self.translation(menuBar[1]))
self.menuBar.Append(self.menuTool, self.translation(menuBar[2]))
self.menuBar.Append(self.menuHelp, self.translation(menuBar[3]))
#--Se adiciona la barra de menu al frame--#
self.SetMenuBar(self.menuBar)
if not posx:
self.Centre()
#--MENU ToolBar--#
self._mgr = wx.aui.AuiManager()
self._mgr.SetManagedWindow(self)
#self.translationperspectives = []
self.n = 0
self.x = 0
self.toolBarIdef1x = wx.ToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
self.toolBarIdef1x.SetToolBitmapSize((8, 8))
self.toolBarIdef1x.AddLabelTool(ID_PUNTERO_MOUSE, self.translation(archivo[ID_PUNTERO_MOUSE]), wx.Bitmap('images/Puntero.png'))
self.toolBarIdef1x.AddLabelTool(ID_CREAR_ENTIDAD, self.translation(archivo[ID_CREAR_ENTIDAD]), wx.Bitmap('images/Entidad.png'))
self.toolBarIdef1x.EnableTool(ID_CREAR_ENTIDAD, False)
self.toolBarIdef1x.AddLabelTool(ID_RELACION_IDENTIF, self.translation(archivo[ID_RELACION_IDENTIF]), wx.Bitmap('images/R-identificadora.png'))
self.toolBarIdef1x.EnableTool(ID_RELACION_IDENTIF, False)
self.toolBarIdef1x.AddLabelTool(ID_RELACION_NO_IDENTIF, self.translation(archivo[ID_RELACION_NO_IDENTIF]), wx.Bitmap('images/R-No-identificadora.png'))
self.toolBarIdef1x.EnableTool(ID_RELACION_NO_IDENTIF, False)
self.toolBarIdef1x.Realize()
self._mgr.AddPane(self.toolBarIdef1x, wx.aui.AuiPaneInfo().
Name("toolBarIdef1x").Caption("IDEF1X-Kit").
ToolbarPane().Top().Row(1).
LeftDockable(True).RightDockable(True).CloseButton(False))
if not idf1x:
panelIdef1x = self._mgr.GetPane("toolBarIdef1x");
panelIdef1x.Hide()
self.toolBarStandard = wx.ToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
self.toolBarStandard.SetToolBitmapSize(wx.Size(32, 32))
self.toolBarStandard.AddLabelTool(ID_CREAR_MODELO, self.translation(archivo[ID_CREAR_MODELO]), wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR))
self.toolBarStandard.AddLabelTool(ID_ABRIR_MODELO, self.translation(archivo[ID_ABRIR_MODELO]), wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR))
self.toolBarStandard.AddSeparator()
self.toolBarStandard.AddLabelTool(ID_GUARDAR_MODELO, self.translation(archivo[ID_GUARDAR_MODELO]), wx.ArtProvider.GetBitmap(wx.ART_FLOPPY, wx.ART_TOOLBAR))
self.toolBarStandard.EnableTool(ID_GUARDAR_MODELO, False)
self.toolBarStandard.AddSeparator()
self.toolBarStandard.AddLabelTool(ID_GENERAR_SCRIPT, self.translation(archivo[ID_GENERAR_SCRIPT]), wx.Bitmap('images/2_sqlLogo.png') )
self.toolBarStandard.EnableTool(ID_GENERAR_SCRIPT, False)
self.toolBarStandard.AddLabelTool(ID_GENERAR_SCRIPT_DJANGO, archivo[ID_GENERAR_SCRIPT_DJANGO], wx.Bitmap('images/django.png') )
self.toolBarStandard.EnableTool(ID_GENERAR_SCRIPT_DJANGO, False)
self.toolBarStandard.Realize()
self._mgr.AddPane(self.toolBarStandard, wx.aui.AuiPaneInfo().
Name("toolBarStandard").Caption("Estandar").
ToolbarPane().Top().Row(1).
LeftDockable(True).RightDockable(True).CloseButton(False))
if not standard:
panelStandard = self._mgr.GetPane("toolBarStandard");
panelStandard.Hide()
self._mgr.Update()
#--Barra de Estado--#
self.statusBar = self.CreateStatusBar()
self.SetStatusText("Listo!")
#--MENU click derecho en el Tree --#
self.menu_tree_entidad = wx.Menu()
self.menu_tree_entidad.Append(ID_CREAR_ENTIDAD, self.translation(archivo[ID_CREAR_ENTIDAD]))
self.menu_tree_atributo = wx.Menu()
self.menu_tree_atributo.Append(ID_TREE_MODIFICAR_ATRIBUTO, self.translation(archivo[ID_TREE_MODIFICAR_ATRIBUTO]))
self.menu_tree_atributo.Append(ID_TREE_ELIMINAR_ATRIBUTO, self.translation(archivo[ID_TREE_ELIMINAR_ATRIBUTO]))
self.menu_tree_relacion = wx.Menu()
self.menu_tree_relacion.Append(ID_CREAR_RELACION, self.translation(archivo[ID_CREAR_RELACION]))
#--MENU click derecho en las formas--#
self.menu_entidad = wx.Menu()
self.menu_entidad.Append(ID_MODIFICAR_ENTIDAD, self.translation(archivo[ID_MODIFICAR_ENTIDAD]))
self.menu_entidad.Append(ID_ELIMINAR_ENTIDAD, self.translation(archivo[ID_ELIMINAR_ENTIDAD]))
self.menu_atributo = wx.Menu()
self.menu_atributo.Append(ID_CREAR_ATRIBUTO, self.translation(archivo[ID_CREAR_ATRIBUTO]))
self.menu_atributo.Append(ID_MODIFICAR_ATRIBUTO, self.translation(archivo[ID_MODIFICAR_ATRIBUTO]))
self.menu_atributo.Append(ID_ELIMINAR_ATRIBUTO, self.translation(archivo[ID_ELIMINAR_ATRIBUTO]))
self.menu_relacion = wx.Menu()
self.menu_relacion.Append(ID_MODIFICAR_RELACION, self.translation(archivo[ID_MODIFICAR_RELACION]))
self.menu_relacion.Append(ID_ELIMINAR_RELACION, self.translation(archivo[ID_ELIMINAR_RELACION]))
self.menu_relacionIdentificadora = wx.Menu()
self.menu_relacionIdentificadora.Append(ID_MODIFICAR_RELACION, self.translation(archivo[ID_MODIFICAR_RELACION]))
self.menu_relacionIdentificadora.Append(ID_ELIMINAR_RELACION, self.translation(archivo[ID_ELIMINAR_RELACION]))
self.menu_relacionNoIdentificadora = wx.Menu()
self.menu_relacionNoIdentificadora.Append(ID_MODIFICAR_RELACION, self.translation(archivo[ID_MODIFICAR_RELACION]))
self.menu_relacionNoIdentificadora.Append(ID_ELIMINAR_RELACION, self.translation(archivo[ID_ELIMINAR_RELACION]))
#--Eventos para todos los botones segun su ID--#
self.Bind(wx.EVT_MENU, self.CrearModelo, id=ID_CREAR_MODELO)
self.Bind(wx.EVT_MENU, self.GuardarModelo, id=ID_GUARDAR_MODELO)
self.Bind(wx.EVT_MENU, self.GuardarModeloComo, id=ID_GUARDAR_COMO_MODELO)
self.Bind(wx.EVT_MENU, self.AbrirModelo, id=ID_ABRIR_MODELO)
self.Bind(wx.EVT_MENU, self.ExportarModelo, id=ID_EXPORTAR_MODELO)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_CERRAR_APLICACION )
self.Bind(wx.EVT_MENU, self.ToolBarIdef1xVer, id=ID_MENU_VER_IDF1X)
self.Bind(wx.EVT_MENU, self.NavVer, id=ID_MENU_VER_NAV)
self.Bind(wx.EVT_MENU, self.NavCard, id=ID_MENU_VER_CARD)
self.Bind(wx.EVT_MENU, self.ToolBarStandardVer, id=ID_MENU_VER_STANDARD)
self.Bind(wx.EVT_MENU, self.ToggleStatusBar, id=ID_MENU_VER_BARRA_ESTADO)
self.Bind(wx.EVT_MENU, self.Puntero, id = ID_PUNTERO_MOUSE)
self.Bind(wx.EVT_MENU, self.CrearEntidad, id = ID_CREAR_ENTIDAD)
self.Bind(wx.EVT_MENU, self.ModificarEntidad, id= ID_MODIFICAR_ENTIDAD)
self.Bind(wx.EVT_MENU, self.EliminarEntidad, id= ID_ELIMINAR_ENTIDAD)
self.Bind(wx.EVT_MENU, self.CrearAtributo, id = ID_CREAR_ATRIBUTO)
self.Bind(wx.EVT_MENU, self.ModificarAtributo, id = ID_MODIFICAR_ATRIBUTO)
self.Bind(wx.EVT_MENU, self.EliminarAtributo, id = ID_ELIMINAR_ATRIBUTO)
self.Bind(wx.EVT_MENU, self.TreeModificarAtributo, id = ID_TREE_MODIFICAR_ATRIBUTO)
self.Bind(wx.EVT_MENU, self.TreeEliminarAtributo, id = ID_TREE_ELIMINAR_ATRIBUTO)
self.Bind(wx.EVT_MENU, self.CrearRelacion, id = ID_CREAR_RELACION)
self.Bind(wx.EVT_MENU, self.RelacionIdentificadora, id = ID_RELACION_IDENTIF)
self.Bind(wx.EVT_MENU, self.RelacionNoIdentificadora, id = ID_RELACION_NO_IDENTIF)
self.Bind(wx.EVT_MENU, self.ModificarRelacion, id = ID_MODIFICAR_RELACION)
self.Bind(wx.EVT_MENU, self.EliminarRelacion, id = ID_ELIMINAR_RELACION)
self.Bind(wx.EVT_MENU, self.GenerarScriptSql, id = ID_GENERAR_SCRIPT)
self.Bind(wx.EVT_MENU, self.GenerarScriptDjango, id = ID_GENERAR_SCRIPT_DJANGO)
#self.Bind(wx.EVT_MENU, self.GuardarScriptSql, id = ID_GUARDAR_SCRIPT)
#self.Bind(wx.EVT_MENU, self.ActualizarIdioma, id=ID_MENU_HELP_us_US )
#self.Bind(wx.EVT_MENU, self.ActualizarIdioma, id=ID_MENU_HELP_es_ES )
#self.Bind(wx.EVT_MENU, self.ActualizarIdioma, id=ID_MENU_HELP_fr_FR )
self.Bind(wx.EVT_MENU, self.ActualizarIdioma, id=ID_MENU_HELP_LANGUAGE )
self.Bind(wx.EVT_MENU, self.VerLog, id=ID_MENU_HELP_LOG )
self.Bind(wx.EVT_MENU, self.OnAboutBox, id=ID_MENU_HELP_ACERCA_DE )
#--Hilo para verificar y guardar la posicion del Frame--#
self.time = wx.Timer(self)
self.Bind(wx.EVT_TIMER, app.SaveConfig, self.time)
self.time.Start(5000)
self.GetMenuBar().Remove(self.GetMenuBar().FindMenu('&Window'))
def CrearModelo(self, evt):
ejecute = Modelo(self)
ejecute.CrearModelo(self)
if ejecute.num == 1:
ejecute.Close(True)
self.GetMenuBar().Remove(self.GetMenuBar().FindMenu('&Window'))
def GuardarModelo(self, evt):
self.GetActiveChild().GuardarModelo()
def GuardarModeloComo(self, evt):
self.GetActiveChild().GuardarModelo(1)
def AbrirModelo(self, evt):
file = wx.FileDialog(self, message=self.Idioma(archivo[ID_MODELO_ABRIR_TITULO]), defaultDir=os.path.expanduser("~"), wildcard=self.Idioma(archivo[ID_MODELO_ABRIR_ARCHIVO]), style=0)
if file.ShowModal() == wx.ID_OK:
ejecute = Modelo(self)
ejecute.AbrirModelo(self, file.GetPath(), file.GetFilename())
if ejecute.num == 1:
dial = wx.MessageDialog(self, self.Idioma(archivo[ID_MODELO_ABRIR_ERROR]), self.Idioma(archivo[ID_MODELO_ABRIR_ERROR_TITULO]), wx.OK | wx.ICON_ERROR)
dial.ShowModal()
ejecute.Close(True)
self.GetMenuBar().Remove(self.GetMenuBar().FindMenu('&Window'))
def AbrirModeloDirecto(self, file):
ejecute = Modelo(self)
ejecute.AbrirModelo(self, file.strip(), "")
if ejecute.num == 1:
dial = wx.MessageDialog(self, self.Idioma(archivo[ID_MODELO_ABRIR_ERROR]), self.Idioma(archivo[ID_MODELO_ABRIR_ERROR_TITULO]), wx.OK | wx.ICON_ERROR)
dial.ShowModal()
ejecute.Close(True)
self.GetMenuBar().Remove(self.GetMenuBar().FindMenu('&Window'))
def ExportarModelo(self, evt):
self.GetActiveChild().ExportarModelo()
#--Permite salir de la aplicacion--#
def OnExit(self, evt):
self.Close(True)
def Actualizar(self, evt):
dc = wx.ClientDC(self.GetActiveChild().canvas)
self.GetActiveChild().canvas.PrepareDC(dc)
self.GetActiveChild().canvas.Redraw(dc)
self.GetActiveChild().canvas.Refresh()
self.Refresh()
def ToolBarIdef1xVer(self, event):
panelIdef1x = self._mgr.GetPane("toolBarIdef1x");
if self.menuVerIdef1x.IsChecked():
panelIdef1x.Show()
mos = True
else:
panelIdef1x.Hide()
mos = False
self.app.config.Write("tool", str((mos, self.menuVerStandard.IsChecked(), self.menuVerNav.IsChecked())))
self.app.config.Flush()
self._mgr.Update()
def NavVer(self, event):
panelNav = self.GetActiveChild().nav;
if self.menuVerNav.IsChecked() and not panelNav.IsShown():
panelNav.Show()
mos = True
else:
panelNav.Hide()
mos = False
self.menuVer.Check(ID_MENU_VER_NAV, mos)
self.app.config.Write("tool", str((self.menuVerIdef1x.IsChecked(), self.menuVerStandard.IsChecked() , mos)))
self.app.config.Flush()
self.GetActiveChild()._mgr.Update()
def NavCard(self, event):
if self.menuVerCard.IsChecked():
mos = True
else:
mos = False
self.menuVer.Check(ID_MENU_VER_CARD, mos)
for relacion in self.GetActiveChild().relaciones:
relacion.OnCardinalidad()
self.GetActiveChild().canvas.Refresh()
def ToolBarStandardVer(self, event):
panelStandard = self._mgr.GetPane("toolBarStandard");
if self.menuVerStandard.IsChecked():
panelStandard.Show()
mos = True
else:
panelStandard.Hide()
mos = False
self.app.config.Write("tool", str((self.menuVerIdef1x.IsChecked(), mos, self.menuVerNav.IsChecked())))
self.app.config.Flush()
self._mgr.Update()
def ToggleStatusBar(self, event):
if self.barraStatus.IsChecked():
self.statusBar.Show()
else:
self.statusBar.Hide()
def CrearEntidad(self, evt):
ejecute = Entidad()
#validar = ejecute.CrearEntidad(self, self.GetActiveChild().canvas, self.GetActiveChild().contadorEntidad)
dlg = Dialogos(self, self.Idioma(archivo[ENTIDAD_TITULO]))
dlg.Entidad(ejecute.data)
if dlg.ShowModal() == wx.ID_OK:
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == ejecute.data.get("nombre"):
validar = ejecute.ValidarNombreEntidad(self.GetActiveChild().entidades)
if validar == False:
return 0
else:
return 0
ejecute.CrearEntidad(self, self.GetActiveChild().canvas, self.GetActiveChild().contadorEntidad)
self.GetActiveChild().contadorEntidad += 1
self.GetActiveChild().entidades.append(ejecute)
self.GetActiveChild().canvas.Refresh()
def ModificarEntidad(self, evt):
ejecute = Entidad()
for elemento in self.GetActiveChild().entidades:
if elemento.nombreForma.Selected():
ejecute.editar = 1
ejecute.elemento = elemento
if ejecute.editar == 1:
ejecute.ModificarEntidad(self.GetActiveChild().canvas, ejecute.elemento, self.GetActiveChild().entidades)
"""else:
dlg = wx.TextEntryDialog(None, "cual entidad quiere modificar?", 'Modificar Entidad', '')
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
ejecute.ModificarEntidad(self.GetActiveChild().canvas, elemento, self.GetActiveChild().entidades)"""
self.GetActiveChild().canvas.Refresh()
def EliminarEntidad(self, evt):
ejecute = Entidad()
for elemento in self.GetActiveChild().entidades:
if elemento.nombreForma.Selected():
ejecute.editar = 1
ejecute.elemento = elemento
if ejecute.editar == 1:
respuesta = ejecute.EliminarEntidad(self.GetActiveChild().canvas, ejecute.elemento, self.GetActiveChild().entidades, self.GetActiveChild())
if respuesta == 1:
self.GetActiveChild().entidades.remove(ejecute.elemento)
"""else:
dlg = wx.TextEntryDialog(None, "cual entidad quiere eliminar?", 'Eliminar Entidad', '')
dlg.SetIcon=(icon)
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
respuesta = ejecute.EliminarEntidad(self.GetActiveChild().canvas, elemento, self.GetActiveChild().entidades, self.GetActiveChild())
if respuesta == 1:
self.GetActiveChild().entidades.remove(elemento)"""
self.GetActiveChild().canvas.Refresh()
def CrearAtributo(self, evt):
ejecute = Atributo()
for elemento in self.GetActiveChild().entidades:
if elemento.atributosForma.Selected():
ejecute.editar = 1
ejecute.elemento = elemento
if ejecute.editar == 1:
dlg = Dialogos(self.GetActiveChild().canvas.frame, self.Idioma(archivo[ATRIBUTO_TITULO]))
dlg.Atributo(ejecute.data)
if dlg.ShowModal() == wx.ID_OK:
for elemento in ejecute.elemento.atributos:
if elemento.nombre == ejecute.data.get("nombreAtributo"):
validar = ejecute.ValidarNombreAtributo(self.GetActiveChild().canvas.frame, ejecute.elemento.atributos)
if validar == False:
return 0
else:
return 0
ejecute.CrearAtributo(self.GetActiveChild().canvas, ejecute.elemento, self.GetActiveChild().contadorAtributo)
self.GetActiveChild().contadorAtributo += 1
for entidadHija in ejecute.elemento.entidadesHijas:
entidadHija.HeredarAtributos(ejecute.elemento, 1)
"""else:
dlg = wx.TextEntryDialog(None, "cual entidad agregar un atributo?", 'Agregar Atributo', '')
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
ejecute.CrearAtributo(self.GetActiveChild().canvas, elemento, self.GetActiveChild().contadorAtributo)"""
self.GetActiveChild().canvas.Refresh()
def ModificarAtributo(self, evt):
ejecute = Atributo()
for elemento in self.GetActiveChild().entidades:
if elemento.atributosForma.Selected():
ejecute.editar = 1
ejecute.elemento = elemento
if ejecute.editar == 1:
ejecute.DlgModificarAtributo(self.GetActiveChild().canvas, ejecute.elemento)
"""else:
dlg = wx.TextEntryDialog(None, "cuall entidad agregar un atributo?", 'Agregar Atributo', '')
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
ejecute.ModificarAtributo(self.GetActiveChild().canvas, elemento)"""
dc = wx.ClientDC(self.GetActiveChild().canvas)
for elemento in self.GetActiveChild().entidades:
ejecute.ModificarAtributosForma(dc, elemento)
self.GetActiveChild().canvas.Refresh()
def EliminarAtributo(self, evt):
ejecute = Atributo()
for elemento in self.GetActiveChild().entidades:
if elemento.atributosForma.Selected():
ejecute.editar = 1
ejecute.elemento = elemento
if ejecute.editar == 1:
ejecute.DlgEliminarAtributo(self.GetActiveChild().canvas, ejecute.elemento)
"""else:
dlg = wx.TextEntryDialog(None, "cual entidad remover un atributo?", 'Eliminar Atributo', '')
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
ejecute.DlgEliminarAtributo(self.GetActiveChild().canvas, elemento)"""
self.GetActiveChild().canvas.Refresh()
def CrearRelacion(self, evt):
ejecute = Relacion()
ejecute.DlgCrearRelacion(self, self.GetActiveChild().canvas, self.GetActiveChild().entidades)
self.GetActiveChild().contadorRelacion += 1
self.GetActiveChild().canvas.Refresh()
def TreeModificarAtributo(self, evt):
ejecute = Atributo()
ejecute.ModificarAtributo(self.GetActiveChild().canvas, self.atributoAcc.entidad, self.atributoAcc)
self.GetActiveChild().canvas.Refresh()
def TreeEliminarAtributo(self, evt):
if self.atributoAcc.claveForanea == True:
dial = wx.MessageDialog(self, self.Idioma(archivo[ATRIBUTO_ELIMINAR_ERROR]) % self.atributoAcc.nombre, 'Error', wx.OK | wx.ICON_ERROR)
dial.ShowModal()
return
dlg = wx.MessageDialog(self.GetActiveChild().canvas, self.Idioma('Want to remove the attribute %s') % self.atributoAcc.nombre, self.Idioma('Delete Attribute %s') % self.atributoAcc.nombre, wx.YES_NO | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
ejecute = Atributo()
ejecute.EliminarAtributo(self.GetActiveChild().canvas, self.atributoAcc.entidad, self.atributoAcc)
self.GetActiveChild().canvas.Refresh()
def RelacionIdentificadora(self, evt):
self.GetActiveChild().canvas.SetCursor(wx.CROSS_CURSOR)
self.GetActiveChild().relacion = 1
def RelacionNoIdentificadora(self, evt):
self.GetActiveChild().canvas.SetCursor(wx.CROSS_CURSOR)
self.GetActiveChild().relacion = 2
def ModificarRelacion(self, evt):
ejecute = Relacion()
for elemento in self.GetActiveChild().relaciones:
if elemento.Selected():
ejecute.DlgModificarRelacion(elemento, self, self.GetActiveChild().canvas, self.GetActiveChild().entidades)
def EliminarRelacion(self, evt):
ejecute = Relacion()
for elemento in self.GetActiveChild().relaciones:
if elemento.Selected():
ejecute.EliminarRelacion(elemento, self.GetActiveChild().canvas, self.GetActiveChild(), self.GetActiveChild().entidades)
def GenerarScriptSql(self, evt):
script = SQL().ScriptPostgreSQL(self.GetActiveChild())
dlg = Dialogos(self, "Script SQL")
dlg.ScriptSql(script)
dlg.ShowModal()
def GenerarScriptDjango(self, evt):
script = Django().ScriptDjango(self.GetActiveChild())
dlg = Dialogos(self, "Script Django")
dlg.ScriptSql(script)
dlg.ShowModal()
def GuardarScriptSql(self, evt):
script = SQL().ScriptPostgreSQL(self.GetActiveChild())
tempFile = wx.FileDialog(self, message="Guardar SQL", defaultDir=os.path.expanduser("~"), defaultFile="sofiaSQL", wildcard="Archivos SQL (*.sql)|*.sql", style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if tempFile.ShowModal() == wx.ID_OK:
fileSQL = "%s.sql" % tempFile.GetPath()
#nombreArchivoTemporal = tempFile.GetFilename()
file = codecs.open(fileSQL, encoding='UTF-8', mode = 'w+')
file.write(script)
file.close()
def Idioma(self, texto):
if language[self.data["idioma"]] != '':
return self.translation(texto)
else:
return texto
def ActualizarIdioma(self, evt):
dlg = Dialogos(self, self.Idioma("Configuration"))
dlg.Configuracion(self.data)
if dlg.ShowModal() == wx.ID_OK:
countMenuBar = 0
if language[self.data["idioma"]] != '':
self.locale.AddCatalog(language[self.data["idioma"]])
idioma = language[self.data["idioma"]]
for menu in self.menuFile.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuVer.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuTool.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuHelp.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuBar.GetMenus():
try:
menu[0].SetTitle(self.translation(menuBar[countMenuBar]))
self.menuBar.Replace(countMenuBar, menu[0], self.translation(menuBar[countMenuBar]))
countMenuBar = countMenuBar + 1
except:
countMenuBar = countMenuBar + 1
for menu in self.menu_tree_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_tree_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_tree_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacionIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacionNoIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
try:
self.SetTitle(self.translation(archivo[TITULO]))
self.GetActiveChild().lienzo.Caption(self.translation("Canvas"))
self.GetActiveChild().nav.Caption(self.translation("Object Browser"))
except:
pass
else:
idioma = 'English'
for menu in self.menuFile.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuVer.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuTool.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuHelp.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuBar.GetMenus():
try:
menu[0].SetTitle(menuBar[countMenuBar])
self.menuBar.Replace(countMenuBar, menu[0], menuBar[countMenuBar])
countMenuBar = countMenuBar + 1
except:
countMenuBar = countMenuBar + 1
for menu in self.menu_tree_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_tree_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_tree_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_relacionIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menu_relacionNoIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
self.SetTitle(archivo[TITULO])
try:
self.GetActiveChild().lienzo.Caption("Canvas")
self.GetActiveChild().nav.Caption("Object Browser")
except:
pass
self.app.config.Write("language", idioma)
self.app.config.Flush()
self.Refresh()
def VerLog(self, event):
dlg = Dialogos(self, "Eventos")
dlg.VerLog(self.GetActiveChild().log.VerEventos())
dlg.ShowModal()
#--Permite desplegar el cuadro de About--#
def OnAboutBox(self, event):
description = """Sofia es una herramienta desarrollada con el lenguaje de programación Python para la modelación de datos, genera el Script SQL para PostgreSQL en esta versión. Es un proyecto de Investigación y Desarrollo del Centro de Investigación en Informatica Aplicada (CENIIA) del Colegio Universitario de Caracas. Creado y dirigido por el Prof. Alejandro Amaro con la colaboración de los estudiantes."""
licence = """Aplicacion liberada bajo la licencia GPLv3, para el uso."""
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon("images/sofia.png", wx.BITMAP_TYPE_PNG))
info.SetName('Sofia')
info.SetVersion('0.072')
info.SetDescription(description)
info.SetCopyright('(C) 2011 Colegio Universitario de Caracas')
info.SetWebSite('http://www.cuc.edu.ve')
info.SetLicence(licence)
info.AddDeveloper('Prof. Alejandro Amaro - Autor - Tutor')
info.AddDeveloper("Estudiantes de Proyecto Socio-Tecnológico:")
info.AddDeveloper(' Junio 2011 Mayo 2012 - Versión 0.0.7')
info.AddDeveloper(' T.S.U. Arturo Delgado ')
info.AddDeveloper(' T.S.U. Maximo Gonzales ')
info.AddDeveloper(' T.S.U. Alexis Canchica ')
info.AddDeveloper(' Mayo 2010 Mayo 2011 - Versión 0.0.4')
info.AddDeveloper(' Br. Arturo Delgado ')
info.AddDeveloper(' Br. Ruben Rosas ')
info.AddDeveloper(' Br. Carolina Machado')
info.AddDeveloper(' Br. Erik Mejias ')
info.AddDeveloper('Estudiantes Tesistas:')
info.AddDeveloper(' Abril 2009 Junio 2009 - Versión 0.0.1')
info.AddDeveloper(' Br. Dorian Machado ')
info.AddDeveloper(' Br. Daglis Campos ')
info.AddDeveloper(' Br. Felix Rodriguez ')
info.AddDocWriter('Estudiantes de Proyecto Socio-Tecnológico:')
info.AddDocWriter(' Junio 2011 Mayo 2012 - Versión 0.0.7')
info.AddDocWriter(' T.S.U. Arturo Delgado ')
info.AddDocWriter(' T.S.U. Maximo Gonzales ')
info.AddDocWriter(' T.S.U. Alexis Canchica ')
info.AddDocWriter(' Mayo 2010 Mayo 2011 - Versión 0.0.4')
info.AddDocWriter(' Br. Arturo Delgado ')
info.AddDocWriter(' Br. Ruben Rosas ')
info.AddDocWriter(' Br. Carolina Machado')
info.AddDocWriter(' Br. Erik Mejias ')
info.AddArtist('Alumnos del Colegio Universitario de Caracas')
info.AddTranslator('Anonimo')
wx.AboutBox(info)
#dlg = Dialogos(self, "Script SQL")
#dlg.AboutBox()
#dlg.ShowModal()
def Puntero(self, evt):
self.GetActiveChild().canvas.SetCursor(wx.STANDARD_CURSOR)
self.GetActiveChild().click = 0
self.GetActiveChild().relacion = 0
|
ajdelgados/Sofia
|
modules/main.py
|
Python
|
gpl-3.0
| 37,233
| 0.010102
|
# -*- coding: utf-8 -*-
## src/search_window.py
##
## Copyright (C) 2007 Stephan Erb <steve-e AT h3c.de>
## Copyright (C) 2007-2014 Yann Leboulanger <asterix AT lagaule.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from common import gajim
from common import dataforms
from common import ged
import gtkgui_helpers
import dialogs
import vcard
import config
import dataforms_widget
class SearchWindow:
def __init__(self, account, jid):
"""
Create new window
"""
# an account object
self.account = account
self.jid = jid
# retrieving widgets from xml
self.xml = gtkgui_helpers.get_gtk_builder('search_window.ui')
self.window = self.xml.get_object('search_window')
for name in ('label', 'progressbar', 'search_vbox', 'search_button',
'add_contact_button', 'information_button'):
self.__dict__[name] = self.xml.get_object(name)
self.search_button.set_sensitive(False)
# displaying the window
self.xml.connect_signals(self)
self.window.show_all()
self.request_form()
self.pulse_id = GLib.timeout_add(80, self.pulse_callback)
self.is_form = None
# Is there a jid column in results ? if -1: no, else column number
self.jid_column = -1
gajim.ged.register_event_handler('search-form-received', ged.GUI1,
self._nec_search_form_received)
gajim.ged.register_event_handler('search-result-received', ged.GUI1,
self._nec_search_result_received)
def request_form(self):
gajim.connections[self.account].request_search_fields(self.jid)
def pulse_callback(self):
self.progressbar.pulse()
return True
def on_search_window_key_press_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.window.destroy()
def on_search_window_destroy(self, widget):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
del gajim.interface.instances[self.account]['search'][self.jid]
gajim.ged.remove_event_handler('search-form-received', ged.GUI1,
self._nec_search_form_received)
gajim.ged.remove_event_handler('search-result-received', ged.GUI1,
self._nec_search_result_received)
def on_close_button_clicked(self, button):
self.window.destroy()
def on_search_button_clicked(self, button):
if self.is_form:
self.data_form_widget.data_form.type_ = 'submit'
gajim.connections[self.account].send_search_form(self.jid,
self.data_form_widget.data_form.get_purged(), True)
else:
infos = self.data_form_widget.get_infos()
if 'instructions' in infos:
del infos['instructions']
gajim.connections[self.account].send_search_form(self.jid, infos,
False)
self.search_vbox.remove(self.data_form_widget)
self.progressbar.show()
self.label.set_text(_('Waiting for results'))
self.label.show()
self.pulse_id = GLib.timeout_add(80, self.pulse_callback)
self.search_button.hide()
def on_add_contact_button_clicked(self, widget):
(model, iter_) = self.result_treeview.get_selection().get_selected()
if not iter_:
return
jid = model[iter_][self.jid_column]
dialogs.AddNewContactWindow(self.account, jid)
def on_information_button_clicked(self, widget):
(model, iter_) = self.result_treeview.get_selection().get_selected()
if not iter_:
return
jid = model[iter_][self.jid_column]
if jid in gajim.interface.instances[self.account]['infos']:
gajim.interface.instances[self.account]['infos'][jid].window.present()
else:
contact = gajim.contacts.create_contact(jid=jid, account=self.account)
gajim.interface.instances[self.account]['infos'][jid] = \
vcard.VcardWindow(contact, self.account)
def _nec_search_form_received(self, obj):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
self.progressbar.hide()
self.label.hide()
if obj.is_dataform:
self.is_form = True
self.data_form_widget = dataforms_widget.DataFormWidget()
self.dataform = dataforms.ExtendForm(node=obj.data)
self.data_form_widget.set_sensitive(True)
try:
self.data_form_widget.data_form = self.dataform
except dataforms.Error:
self.label.set_text(_('Error in received dataform'))
self.label.show()
return
if self.data_form_widget.title:
self.window.set_title('%s - Search - Gajim' % \
self.data_form_widget.title)
else:
self.is_form = False
self.data_form_widget = config.FakeDataForm(obj.data)
self.data_form_widget.show_all()
self.search_vbox.pack_start(self.data_form_widget, True, True, 0)
self.search_button.set_sensitive(True)
def on_result_treeview_cursor_changed(self, treeview):
if self.jid_column == -1:
return
(model, iter_) = treeview.get_selection().get_selected()
if not iter_:
return
if model[iter_][self.jid_column]:
self.add_contact_button.set_sensitive(True)
self.information_button.set_sensitive(True)
else:
self.add_contact_button.set_sensitive(False)
self.information_button.set_sensitive(False)
def _nec_search_result_received(self, obj):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
self.progressbar.hide()
self.label.hide()
if not obj.is_dataform:
if not obj.data:
self.label.set_text(_('No result'))
self.label.show()
return
# We suppose all items have the same fields
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.result_treeview = Gtk.TreeView()
self.result_treeview.connect('cursor-changed',
self.on_result_treeview_cursor_changed)
sw.add(self.result_treeview)
# Create model
fieldtypes = [str]*len(obj.data[0])
model = Gtk.ListStore(*fieldtypes)
# Copy data to model
for item in obj.data:
model.append(item.values())
# Create columns
counter = 0
for field in obj.data[0].keys():
self.result_treeview.append_column(Gtk.TreeViewColumn(field,
Gtk.CellRendererText(), text=counter))
if field == 'jid':
self.jid_column = counter
counter += 1
self.result_treeview.set_model(model)
sw.show_all()
self.search_vbox.pack_start(sw, True, True, 0)
if self.jid_column > -1:
self.add_contact_button.show()
self.information_button.show()
return
self.dataform = dataforms.ExtendForm(node=obj.data)
if len(self.dataform.items) == 0:
# No result
self.label.set_text(_('No result'))
self.label.show()
return
self.data_form_widget.set_sensitive(True)
try:
self.data_form_widget.data_form = self.dataform
except dataforms.Error:
self.label.set_text(_('Error in received dataform'))
self.label.show()
return
self.result_treeview = self.data_form_widget.records_treeview
selection = self.result_treeview.get_selection()
selection.set_mode(Gtk.SelectionMode.SINGLE)
self.result_treeview.connect('cursor-changed',
self.on_result_treeview_cursor_changed)
counter = 0
for field in self.dataform.reported.iter_fields():
if field.var == 'jid':
self.jid_column = counter
break
counter += 1
self.search_vbox.pack_start(self.data_form_widget, True, True, 0)
self.data_form_widget.show()
if self.jid_column > -1:
self.add_contact_button.show()
self.information_button.show()
if self.data_form_widget.title:
self.window.set_title('%s - Search - Gajim' % \
self.data_form_widget.title)
|
lovetox/gajim
|
src/search_window.py
|
Python
|
gpl-3.0
| 9,270
| 0.003344
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 mjirik <mjirik@mjirik-Latitude-E6520>
#
# Distributed under terms of the MIT license.
"""
"""
import numpy as np
from loguru import logger
# logger = logging.getLogger()
import argparse
from scipy import ndimage
from . import qmisc
class ShapeModel():
"""
Cílem je dát dohromady vstupní data s různou velikostí a různou polohou
objektu. Výstup je pak zapotřebí opět přizpůsobit libovolné velikosti a
poloze objektu v obraze.
Model je tvořen polem s velikostí definovanou v konstruktoru (self.shape).
U modelu je potřeba brát v potaz polohu objektu. Ta je udávána pomocí
crinfo. To je skupina polí s minimální a maximální hodnotou pro každou osu.
Trénování je prováděno opakovaným voláním funkce train_one().
:param model_margin: stanovuje velikost okraje v modelu. Objekt bude ve
výchozím nastavení vzdálen 0 px od každého okraje.
"""
def __init__(self, shape=[5, 5, 5]):
"""TODO: to be defined1. """
self.model = np.ones(shape)
self.data_number = 0
self.model_margin = [0, 0, 0]
pass
def get_model(self, crinfo, image_shape):
"""
:param image_shape: Size of output image
:param crinfo: Array with min and max index of object for each axis.
[[minx, maxx], [miny, maxy], [minz, maxz]]
"""
# Průměrování
mdl = self.model / self.data_number
print(mdl.shape)
print(crinfo)
# mdl_res = imma.image.resize_to_shape(mdl, crinfo[0][]
uncr = qmisc.uncrop(mdl, crinfo, image_shape, resize=True)
return uncr
def train_one(self, data,voxelSize_mm):
"""
Trenovani shape modelu
data se vezmou a oriznou (jen jatra)
na oriznuta data je aplikovo binarni otevreni - rychlejsi nez morphsnakes
co vznikne je uhlazena cast ktera se odecte od puvodniho obrazu
cimz vzniknou spicky
orezany obraz se nasledne rozparceluje podle velikosti (shape) modelu
pokud pocet voxelu v danem useku prekroci danou mez, je modelu
prirazena nejaka hodnota. Meze jsou nasledujici:
0%-50% => 1
50%-75% => 2
75%-100% => 3
"""
crinfo = qmisc.crinfo_from_specific_data(data, margin=self.model_margin)
datacr = qmisc.crop(data, crinfo=crinfo)
dataShape = self.model.shape
datacrres = self.trainThresholdMap(datacr, voxelSize_mm, dataShape)
self.model += datacrres
self.data_number += 1
# Tady bude super kód pro trénování
def train(self, data_arr):
for data in data_arr:
self.train_one(data)
def objectThreshold(self,objekt,thresholds,values):
'''
Objekt - 3d T/F pole
thresholds = [0,0.5,0.75] zacina nulou
values = [3,2,1]
vrati hodnotu z values odpovidajici thresholds
podle podilu True voxelu obsazenych v 3d poli
zde napriklad 60% =>2, 80% => 1.
'''
bile = np.sum(objekt)
velikost = objekt.shape
velikostCelkem = 1.0
for x in velikost:
velikostCelkem = velikostCelkem*x
podil = bile/velikostCelkem #podil True voxelu
#print podil
#vybrani threshold
final = 0 #vracena hodnota
pomocny = 0 #pomocna promenna
for threshold in thresholds:
if(podil >= threshold ):
final = values[pomocny]
pomocny = pomocny+1
return final
def rozdelData(self,crData,dataShape, nasobitel1=1,nasobitel2 = 2):
'''
crData - vstupni data
dataShape - velikost vraceneho pole
volte 0<nasobitel1 < nasobitel2, vysvetleni nasleduje:
rozdeli pole crData na casti vrati pole rozmeru dataShape
vysledne hodnoty pole jsou urceny funkci objectThreshold(object,thresholds,values)
intervaly prirazeni values [1-3] jsou nasledujici:
[0-prumer*nasobitel1],[prumer*nasobitel1-prumer*nasobitel2],[prumer*nasobitel2 a vice]
'''
'vypocet prumerneho podilu bilych voxelu'
bile = np.sum(crData)
velikost = crData.shape
velikostCelkem = 1.0
for x in velikost:
velikostCelkem = velikostCelkem*x
podil = bile/velikostCelkem #prumerny podil True voxelu
thresholds = [0,nasobitel1*podil,nasobitel2*podil]
values = [3,2,1]
'vybrani voxelu a vytvoreni objektu'
velikostDat = crData.shape
voxelySmer = [0,0,0]
vysledek = np.zeros(dataShape)
for poradi in range(3):
voxelySmer[poradi] = velikostDat[poradi]/dataShape[poradi]
for x in range(dataShape[0]):
for y in range(dataShape[1]):
for z in range(dataShape[2]):
xStart = x * voxelySmer[0]
xKonec = xStart + voxelySmer[0]
yStart = y * voxelySmer[1]
yKonec = yStart + voxelySmer[1]
zStart = z * voxelySmer[2]
zKonec = zStart + voxelySmer[2]
objekt = crData[
int(xStart):int(xKonec),
int(yStart):int(yKonec),
int(zStart):int(zKonec)
]
vysledek[x,y,z] = self.objectThreshold(objekt,thresholds,values)
return vysledek
def vytvorKouli3D(self,voxelSize_mm,polomer_mm):
'''voxelSize:mm = [x,y,z], polomer_mm = r
Vytvari kouli v 3d prostoru postupnym vytvarenim
kruznic podel X (prvni) osy. Predpokladem spravnosti
funkce je ze Y a Z osy maji stejne rozliseni
funkce vyuziva pythagorovu vetu'''
print('zahajeno vytvareni 3D objektu')
x = voxelSize_mm[0]
y = voxelSize_mm[1]
z = voxelSize_mm[2]
xVoxely = int(np.ceil(polomer_mm/x))
yVoxely = int(np.ceil(polomer_mm/y))
zVoxely = int( np.ceil(polomer_mm/z))
rozmery = [xVoxely*2+1,yVoxely*2+1,yVoxely*2+1]
xStred = xVoxely
konec = yVoxely*2+1
koule = np.zeros(rozmery) #pole kam bude ulozen vysledek
for xR in range(xVoxely*2+1):
if(xR == xStred):
print('3D objekt z 50% vytvoren')
c = polomer_mm #nejdelsi strana
a = (xStred-xR )*x
vnitrek = (c**2-a**2)
b = 0.0
if(vnitrek > 0):
b = np.sqrt((c**2-a**2))#pythagorova veta b je v mm
rKruznice = float(b)/float(y)
if(rKruznice == np.NAN):
continue
#print rKruznice #osetreni NAN
kruznice = self.vytvoritTFKruznici(yVoxely,rKruznice)
koule[xR,0:konec,0:konec] = kruznice[0:konec,0:konec]
print('3D objekt uspesne vytvoren')
return koule
def vytvoritTFKruznici(self,polomerPole,polomerKruznice):
'''vytvori 2d pole velikosti 2xpolomerPole+1
s kruznici o polomeru polomerKruznice uprostred '''
radius = polomerPole
r2 = np.arange(-radius, radius+1)**2
dist2 = r2[:, None] + r2
vratit = (dist2 <= polomerKruznice**2).astype(np.int)
return vratit
def trainThresholdMap(self,data3d,voxelSize,dataShape):
structure = self.vytvorKouli3D(voxelSize, 5)
smoothed = ndimage.binary_opening(data3d, structure, 3)
spicky = smoothed != data3d
vysledek = self.rozdelData(spicky,dataShape)
return vysledek
def main():
# logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
# create file handler which logs even debug messages
# fh = logging.FileHandler('log.txt')
# fh.setLevel(logging.DEBUG)
# formatter = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# logger.addHandler(fh)
# logger.debug('start')
# input parser
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'-i', '--inputfile',
default=None,
required=True,
help='input file'
)
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if args.debug:
ch.setLevel(logging.DEBUG)
if __name__ == "__main__":
main()
|
mjirik/lisa
|
lisa/shape_model.py
|
Python
|
bsd-3-clause
| 8,801
| 0.011083
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-18 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cat',
name='sex',
field=models.CharField(choices=[('F', 'Female'), ('M', 'Male')], max_length=1),
),
]
|
Kemaweyan/django-content-gallery
|
content_gallery_testapp/testapp/migrations/0002_auto_20170618_1457.py
|
Python
|
bsd-3-clause
| 475
| 0.002105
|
"""Common operations on DOS pathnames."""
import os
import stat
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile","ismount",
"walk","expanduser","expandvars","normpath","abspath","realpath"]
def normcase(s):
"""Normalize the case of a pathname.
On MS-DOS it maps the pathname to lowercase, turns slashes into
backslashes.
Other normalizations (such as optimizing '../' away) are not allowed
(this is done by normpath).
Previously, this version mapped invalid consecutive characters to a
single '_', but this has been removed. This functionality should
possibly be added as a new function."""
return s.replace("/", "\\").lower()
def isabs(s):
"""Return whether a path is absolute.
Trivial in Posix, harder on the Mac or MS-DOS.
For DOS it is absolute if it starts with a slash or backslash (current
volume), or if a pathname after the volume letter and colon starts with
a slash or backslash."""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
def join(a, *p):
"""Join two (or more) paths."""
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\:':
path = path + b
else:
path = path + "\\" + b
return path
def splitdrive(p):
"""Split a path into a drive specification (a drive letter followed
by a colon) and path specification.
It is always true that drivespec + pathspec == p."""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
def split(p):
"""Split a path into head (everything up to the last '/') and tail
(the rest). After the trailing '/' is stripped, the invariant
join(head, tail) == p holds.
The resulting head won't end in '/' unless it is the root."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the first dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c in '/\\':
root, ext = root + ext + c, ''
elif c == '.' or ext:
ext = ext + c
else:
root = root + c
return root, ext
def basename(p):
"""Return the tail (basename) part of a path."""
return split(p)[1]
def dirname(p):
"""Return the head (dirname) part of a path."""
return split(p)[0]
def commonprefix(m):
"""Return the longest prefix of all list elements."""
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_ATIME]
def islink(path):
"""Is a path a symbolic link?
This will always return false on systems where posix.lstat doesn't exist."""
return 0
def exists(path):
"""Does a path exist?
This is false for dangling symbolic links."""
try:
st = os.stat(path)
except os.error:
return 0
return 1
def isdir(path):
"""Is a path a dos directory?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISDIR(st[stat.ST_MODE])
def isfile(path):
"""Is a path a regular file?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISREG(st[stat.ST_MODE])
def ismount(path):
"""Is a path a mount point?"""
# XXX This degenerates in: 'is this the root?' on DOS
return isabs(splitdrive(path)[1])
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def expanduser(path):
"""Expand paths beginning with '~' or '~user'.
'~' means $HOME; '~user' means that user's home directory.
If the path doesn't begin with '~', or if the user or $HOME is unknown,
the path is returned unchanged (leaving error reporting to whatever
function is called with the expanded path as argument).
See also module 'glob' for expansion of *, ? and [...] in pathnames.
(A function should also be defined to do full *sh-style environment
variable expansion.)"""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i+1
if i == 1:
if not os.environ.has_key('HOME'):
return path
userhome = os.environ['HOME']
else:
return path
return userhome + path[i:]
def expandvars(path):
"""Expand paths containing shell variable substitutions.
The following rules apply:
- no expansion within single quotes
- no escape character, except for '$$' which is translated into '$'
- ${varname} is accepted.
- varnames can be made out of letters, digits and the character '_'"""
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
if '$' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + "_-"
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen -1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if os.environ.has_key(var):
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
def normpath(path):
"""Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
Also, components of the path are silently truncated to 8+3 notation."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..' and i > 0 and \
comps[i-1] not in ('', '..'):
del comps[i-1:i+1]
i = i - 1
elif comps[i] == '' and i > 0 and comps[i-1] != '':
del comps[i]
elif '.' in comps[i]:
comp = comps[i].split('.')
comps[i] = comp[0][:8] + '.' + comp[1][:3]
i = i + 1
elif len(comps[i]) > 8:
comps[i] = comps[i][:8]
i = i + 1
else:
i = i + 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/dospath.py
|
Python
|
gpl-3.0
| 10,452
| 0.003062
|
#!/usr/bin/env python
'''
Command to send dynamic filesystem information to Zagg
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name,import-error
import argparse
import re
from openshift_tools.monitoring.metric_sender import MetricSender
from openshift_tools.monitoring import pminfo
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Disk metric sender')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--filter-pod-pv', action='store_true', default=None,
help="Filter out OpenShift Pod PV mounts")
parser.add_argument('--force-send-zeros', action='store_true', default=None,
help="Send 0% full for mounts, useful for clearing existing bad alerts")
return parser.parse_args()
def filter_out_key_name_chars(metric_dict, filesystem_filter):
""" Simple filter to elimate unnecessary characters in the key name """
filtered_dict = {k.replace(filesystem_filter, ''):v
for (k, v) in metric_dict.iteritems()
}
return filtered_dict
def filter_out_container_root(metric_dict):
""" Simple filter to remove the container root FS info """
container_root_regex = r'^/dev/mapper/docker-\d+:\d+-\d+-[0-9a-f]+$'
filtered_dict = {k: v
for (k, v) in metric_dict.iteritems()
if not re.match(container_root_regex, k)
}
return filtered_dict
def filter_out_customer_pv_filesystems(metric_dict):
""" Remove customer PVs from list """
r = re.compile("^/dev/(?:xvd[a-z]{2}|nvme(?:[2-9].*|\d{2,}.*))$")
# filter out xvda{2} (???) and nvme devices past 2
return {
k:v for (k, v) in metric_dict.iteritems() if not r.match(k)
}
def zero_mount_percentages(metric_dict):
""" Make all mounts report 0% used """
return {
k:0 for (k, v) in metric_dict.iteritems()
}
def main():
""" Main function to run the check """
args = parse_args()
metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)
filesys_full_metric = ['filesys.full']
filesys_inode_derived_metrics = {'filesys.inodes.pused' :
'filesys.usedfiles / (filesys.usedfiles + filesys.freefiles) * 100'
}
discovery_key_fs = 'disc.filesys'
item_prototype_macro_fs = '#OSO_FILESYS'
item_prototype_key_full = 'disc.filesys.full'
item_prototype_key_inode = 'disc.filesys.inodes.pused'
# Get the disk space
filesys_full_metrics = pminfo.get_metrics(filesys_full_metric)
filtered_filesys_metrics = filter_out_key_name_chars(filesys_full_metrics, 'filesys.full.')
filtered_filesys_metrics = filter_out_container_root(filtered_filesys_metrics)
if args.filter_pod_pv:
filtered_filesys_metrics = filter_out_customer_pv_filesystems(filtered_filesys_metrics)
if args.force_send_zeros:
filtered_filesys_metrics = zero_mount_percentages(filtered_filesys_metrics)
metric_sender.add_dynamic_metric(discovery_key_fs, item_prototype_macro_fs, filtered_filesys_metrics.keys())
for filesys_name, filesys_full in filtered_filesys_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_full, filesys_name): filesys_full})
# Get filesytem inode metrics
filesys_inode_metrics = pminfo.get_metrics(derived_metrics=filesys_inode_derived_metrics)
filtered_filesys_inode_metrics = filter_out_key_name_chars(filesys_inode_metrics, 'filesys.inodes.pused.')
filtered_filesys_inode_metrics = filter_out_container_root(filtered_filesys_inode_metrics)
if args.filter_pod_pv:
filtered_filesys_inode_metrics = filter_out_customer_pv_filesystems(filtered_filesys_inode_metrics)
if args.force_send_zeros:
filtered_filesys_inode_metrics = zero_mount_percentages(filtered_filesys_inode_metrics)
for filesys_name, filesys_inodes in filtered_filesys_inode_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_inode, filesys_name): filesys_inodes})
metric_sender.send_metrics()
if __name__ == '__main__':
main()
|
blrm/openshift-tools
|
scripts/monitoring/cron-send-filesystem-metrics.py
|
Python
|
apache-2.0
| 5,045
| 0.006938
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for DICOM."""
import os
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_io as tfio
# The DICOM sample files must be downloaded befor running the tests
#
# To download the DICOM samples:
# $ bash dicom_samples.sh download
# $ bash dicom_samples.sh extract
#
# To remopve the DICOM samples:
# $ bash dicom_samples.sh clean_dcm
#
# To remopve all the downloaded files:
# $ bash dicom_samples.sh clean_all
def test_dicom_input():
"""test_dicom_input"""
_ = tfio.image.decode_dicom_data
_ = tfio.image.decode_dicom_image
_ = tfio.image.dicom_tags
@pytest.mark.parametrize(
"fname, exp_shape",
[
("OT-MONO2-8-colon.dcm", (1, 512, 512, 1)),
("CR-MONO1-10-chest.dcm", (1, 440, 440, 1)),
("CT-MONO2-16-ort.dcm", (1, 512, 512, 1)),
("MR-MONO2-16-head.dcm", (1, 256, 256, 1)),
("US-RGB-8-epicard.dcm", (1, 480, 640, 3)),
("CT-MONO2-8-abdo.dcm", (1, 512, 512, 1)),
("MR-MONO2-16-knee.dcm", (1, 256, 256, 1)),
("OT-MONO2-8-hip.dcm", (1, 512, 512, 1)),
("US-RGB-8-esopecho.dcm", (1, 120, 256, 3)),
("CT-MONO2-16-ankle.dcm", (1, 512, 512, 1)),
("MR-MONO2-12-an2.dcm", (1, 256, 256, 1)),
("MR-MONO2-8-16x-heart.dcm", (16, 256, 256, 1)),
("OT-PAL-8-face.dcm", (1, 480, 640, 3)),
("XA-MONO2-8-12x-catheter.dcm", (12, 512, 512, 1)),
("CT-MONO2-16-brain.dcm", (1, 512, 512, 1)),
("NM-MONO2-16-13x-heart.dcm", (13, 64, 64, 1)),
("US-MONO2-8-8x-execho.dcm", (8, 120, 128, 1)),
("CT-MONO2-16-chest.dcm", (1, 400, 512, 1)),
("MR-MONO2-12-shoulder.dcm", (1, 1024, 1024, 1)),
("OT-MONO2-8-a7.dcm", (1, 512, 512, 1)),
("US-PAL-8-10x-echo.dcm", (10, 430, 600, 3)),
("TOSHIBA_J2K_OpenJPEGv2Regression.dcm", (1, 512, 512, 1)),
],
)
def test_decode_dicom_image(fname, exp_shape):
"""test_decode_dicom_image"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname
)
file_contents = tf.io.read_file(filename=dcm_path)
dcm_image = tfio.image.decode_dicom_image(
contents=file_contents,
dtype=tf.float32,
on_error="strict",
scale="auto",
color_dim=True,
)
assert dcm_image.numpy().shape == exp_shape
@pytest.mark.parametrize(
"fname, tag, exp_value",
[
(
"OT-MONO2-8-colon.dcm",
tfio.image.dicom_tags.StudyInstanceUID,
b"1.3.46.670589.17.1.7.1.1.16",
),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.Rows, b"512"),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.Columns, b"512"),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.SamplesperPixel, b"1"),
(
"US-PAL-8-10x-echo.dcm",
tfio.image.dicom_tags.StudyInstanceUID,
b"999.999.3859744",
),
(
"US-PAL-8-10x-echo.dcm",
tfio.image.dicom_tags.SeriesInstanceUID,
b"999.999.94827453",
),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.NumberofFrames, b"10"),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Rows, b"430"),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Columns, b"600"),
],
)
def test_decode_dicom_data(fname, tag, exp_value):
"""test_decode_dicom_data"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname
)
file_contents = tf.io.read_file(filename=dcm_path)
dcm_data = tfio.image.decode_dicom_data(contents=file_contents, tags=tag)
assert dcm_data.numpy() == exp_value
def test_dicom_image_shape():
"""test_decode_dicom_image"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dataset = tf.data.Dataset.from_tensor_slices([dcm_path])
dataset = dataset.map(tf.io.read_file)
dataset = dataset.map(lambda e: tfio.image.decode_dicom_image(e, dtype=tf.uint16))
dataset = dataset.map(lambda e: tf.image.resize(e, (224, 224)))
def test_dicom_image_concurrency():
"""test_decode_dicom_image_currency"""
@tf.function
def preprocess(dcm_content):
tags = tfio.image.decode_dicom_data(
dcm_content, tags=[tfio.image.dicom_tags.PatientsName]
)
tf.print(tags)
image = tfio.image.decode_dicom_image(dcm_content, dtype=tf.float32)
return image
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"TOSHIBA_J2K_OpenJPEGv2Regression.dcm",
)
dataset = (
tf.data.Dataset.from_tensor_slices([dcm_path])
.repeat()
.map(tf.io.read_file)
.map(preprocess, num_parallel_calls=8)
.take(200)
)
for i, item in enumerate(dataset):
print(tf.shape(item), i)
assert np.array_equal(tf.shape(item), [1, 512, 512, 1])
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dataset = (
tf.data.Dataset.from_tensor_slices([dcm_path])
.repeat()
.map(tf.io.read_file)
.map(preprocess, num_parallel_calls=8)
.take(200)
)
for i, item in enumerate(dataset):
print(tf.shape(item), i)
assert np.array_equal(tf.shape(item), [10, 430, 600, 3])
def test_dicom_sequence():
"""test_decode_dicom_sequence"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"2.25.304589190180579357564631626197663875025.dcm",
)
dcm_content = tf.io.read_file(filename=dcm_path)
tags = tfio.image.decode_dicom_data(
dcm_content, tags=["[0x0008,0x1115][0][0x0008,0x1140][0][0x0008,0x1155]"]
)
assert np.array_equal(tags, [b"2.25.211904290918469145111906856660599393535"])
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dcm_content = tf.io.read_file(filename=dcm_path)
tags = tfio.image.decode_dicom_data(dcm_content, tags=["[0x0020,0x000E]"])
assert np.array_equal(tags, [b"999.999.94827453"])
tags = tfio.image.decode_dicom_data(dcm_content, tags=["0x0020,0x000e"])
assert np.array_equal(tags, [b"999.999.94827453"])
if __name__ == "__main__":
test.main()
|
tensorflow/io
|
tests/test_dicom.py
|
Python
|
apache-2.0
| 7,159
| 0.000559
|
# test driver to verify that new version of code works
import opiniongame.config as og_cfg
import opiniongame.IO as og_io
import opiniongame.coupling as og_coupling
import opiniongame.state as og_state
import opiniongame.opinions as og_opinions
import opiniongame.adjacency as og_adj
import opiniongame.selection as og_select
import opiniongame.potentials as og_pot
import opiniongame.core as og_core
import opiniongame.stopping as og_stop
import numpy as np
#
# process command line
#
cmdline = og_cfg.CmdLineArguments()
cmdline.printOut()
#
# load configuration
#
# TODO: add option to generate defaults and save to file
# TODO: interpret args to get filename if specified on cmd line
config = og_cfg.staticParameters()
config.readFromFile('staticParameters.cfg')
config.threshold = 0.01
config.printOut()
#
# seed PRNG: must do this before any random numbers are
# ever sampled during default generation
#
print("SEEDING PRNG: "+str(config.startingseed))
np.random.seed(config.startingseed)
state = og_state.WorldState.fromCmdlineArguments(cmdline, config)
#
# run
#
tau_list = np.arange(0.45, 0.9, 0.01)
alpha_list = np.arange(0.05, 0.25, 0.01)
numalphas = len(alpha_list)
numtaus = len(tau_list)
numvars = 3
resultMatrix = np.zeros((numalphas, numtaus, numvars))
for (i, alpha) in enumerate(alpha_list):
config.learning_rate = alpha
print("")
for (j, tau) in enumerate(tau_list):
print((alpha, tau))
#
# functions for use by the simulation engine
#
ufuncs = og_cfg.UserFunctions(og_select.FastPairSelection,
og_stop.totalChangeStop,
og_pot.createTent(tau))
polarized = 0
notPolarized = 0
aveIters = 0
for k in range(100):
state = og_core.run_until_convergence(config, state, ufuncs)
results = og_opinions.isPolarized(state.history[-1], 0.05)
for result in results:
if result:
polarized += 1
else:
notPolarized += 1
aveIters += state.iterCount
state.reset()
state.initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics)
# maybe you want to do Consensus and nonConsensus. Finding consensus is easier!
# assuming pop_size = 20, ten people at 1, nine people at 0 and and one person
# at 0.5 will be polarization, but, still ...
resultMatrix[i][j][0] = polarized
resultMatrix[i][j][1] = notPolarized
resultMatrix[i][j][2] = aveIters/100.0
rdict = {}
rdict['results'] = resultMatrix
og_io.saveMatrix('output.mat', rdict)
|
mjsottile/PyOpinionGame
|
driver_alpha_tau_study.py
|
Python
|
gpl-3.0
| 2,715
| 0.003315
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
from nose.tools import nottest
import numpy as np
from neon.util.testing import assert_tensor_equal
@attr('cuda')
class TestGPUTensor(object):
def setup(self):
from neon.backends.cc2 import GPUTensor
self.gpt = GPUTensor
def test_empty_creation(self):
tns = self.gpt([])
expected_shape = (0, )
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_1d_creation(self):
tns = self.gpt([1, 2, 3, 4])
expected_shape = (4, )
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_creation(self):
tns = self.gpt([[1, 2], [3, 4]])
expected_shape = (2, 2)
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_ndarray_creation(self):
tns = self.gpt(np.array([[1.5, 2.5], [3.3, 9.2],
[0.111111, 5]]))
assert tns.shape == (3, 2)
@nottest # TODO: add >2 dimension support to cudanet
def test_higher_dim_creation(self):
shapes = ((1, 1, 1), (1, 2, 3, 4), (1, 2, 3, 4, 5, 6, 7))
for shape in shapes:
tns = self.gpt(np.empty(shape))
assert tns.shape == shape
def test_str(self):
tns = self.gpt([[1, 2], [3, 4]])
assert str(tns) == "[[ 1. 2.]\n [ 3. 4.]]"
def test_scalar_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[1, 0]
assert res.shape == (1, 1)
assert_tensor_equal(res, self.gpt([[3]]))
def test_range_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[0:2, 0]
assert res.shape == (2, 1)
assert_tensor_equal(res, self.gpt([1, 3]))
@nottest # TODO: add scalar assignment to self.gpt class
def test_scalar_slice_assignment(self):
tns = self.gpt([[1, 2], [3, 4]])
tns[1, 0] = 9
assert_tensor_equal(tns, self.gpt([[1, 2], [9, 4]]))
def test_asnumpyarray(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.asnumpyarray()
assert isinstance(res, np.ndarray)
assert_tensor_equal(res, np.array([[1, 2], [3, 4]]))
@nottest # TODO: fix this for self.gpt
def test_transpose(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.transpose()
assert_tensor_equal(res, self.gpt([[1, 3], [2, 4]]))
def test_fill(self):
tns = self.gpt([[1, 2], [3, 4]])
tns.fill(-9.5)
assert_tensor_equal(tns, self.gpt([[-9.5, -9.5], [-9.5, -9.5]]))
|
kfoss/neon
|
neon/backends/tests/test_cc2_tensor.py
|
Python
|
apache-2.0
| 3,502
| 0
|
import lxml
import requests
def requests_session():
"""
Get a suitable requests session for use in SmartBot.
In particular, this sets the `User-Agent` header to the value of
'SmartBot'.
"""
session = requests.Session()
session.headers.update({"User-Agent": "SmartBot"})
return session
def _check_content_type(response, content_type="text/html"):
return response.headers.get("Content-Type", "").startswith(content_type)
def get_title(url):
"""Get the title of a website."""
try:
page = requests_session().get(url, timeout=5, stream=True)
if page.status_code == 200 and _check_content_type(page):
try:
tree = lxml.html.fromstring(page.text)
except ValueError: # lxml seems to have issues with unicode
tree = lxml.html.fromstring(page.content)
title = tree.cssselect("title")[0].text_content()
return title.strip().replace("\n", "").replace("\r", "")
except requests.exceptions.Timeout:
return "Timeout!"
except IndexError: # no title element
return "No title."
def sprunge(data):
"""Upload the data to `sprunge.us` (a popular plain-text paste bin)."""
payload = {"sprunge": data}
page = requests_session().post("http://sprunge.us", data=payload)
return page.text
|
thomasleese/smartbot-old
|
smartbot/utils/web.py
|
Python
|
mit
| 1,353
| 0
|
#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import uuid
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import NetzobLogger
from netzob.Common.Utils.Decorators import typeCheck
from netzob.Model.Vocabulary.Domain.Variables.AbstractVariable import AbstractVariable
from netzob.Model.Vocabulary.Domain.Parser.VariableParserResult import VariableParserResult
@NetzobLogger
class VariableParserPath(object):
"""This class denotes one parsing result of a variable against a specified content
"""
def __init__(self,
variableParser,
consumedData,
remainingData,
originalVariableParserPath=None):
self.name = str(uuid.uuid4())
self.consumedData = consumedData
self.remainingData = remainingData
self.variableParser = variableParser
self.memory = self.variableParser.memory.duplicate()
self.originalVariableParserPath = originalVariableParserPath
self.variableParserResults = []
if originalVariableParserPath is not None:
self.variableParserResults.extend(
originalVariableParserPath.variableParserResults)
def getValueToParse(self, variable):
"""Returns the value that is assigned to the specified variable"""
def createVariableParserResult(self, variable, parserResult, consumedData,
remainedData):
variableParserResult = VariableParserResult(variable, parserResult,
consumedData, remainedData)
if parserResult:
self._logger.debug("New parser result attached to path {0}: {1}".
format(self, variableParserResult))
self.remainingData = variableParserResult.remainedData
if self.consumedData is None:
self._logger.debug("consumed is none...")
self.consumedData = variableParserResult.consumedData
else:
self.consumedData.extend(variableParserResult.consumedData)
else:
self._logger.debug("creation of an invalid parser result.")
self.variableParserResults.append(variableParserResult)
self._logger.debug(
"After registering new VariablePathResult, Path is {0}".format(
self))
def __str__(self):
return "Path {0} (consumedData={1}, remainingData={2}".format(
self.name, self.consumedData, self.remainingData)
@property
def consumedData(self):
return self.__consumedData
@consumedData.setter
def consumedData(self, consumedData):
self.__consumedData = consumedData
@property
def memory(self):
return self.__memory
@memory.setter
def memory(self, memory):
if memory is None:
raise Exception("Memory cannot be None")
self.__memory = memory
|
lootr/netzob
|
netzob/src/netzob/Model/Vocabulary/Domain/Parser/VariableParserPath.py
|
Python
|
gpl-3.0
| 5,916
| 0.007277
|
"""
homeassistant.config
~~~~~~~~~~~~~~~~~~~~
Module to help with parsing and generating configuration files.
"""
import logging
import os
from homeassistant.exceptions import HomeAssistantError
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE)
import homeassistant.util.location as loc_util
_LOGGER = logging.getLogger(__name__)
YAML_CONFIG_FILE = 'configuration.yaml'
CONFIG_DIR_NAME = '.homeassistant'
DEFAULT_CONFIG = (
# Tuples (attribute, default, auto detect property, description)
(CONF_NAME, 'Home', None, 'Name of the location where Home Assistant is '
'running'),
(CONF_LATITUDE, None, 'latitude', 'Location required to calculate the time'
' the sun rises and sets'),
(CONF_LONGITUDE, None, 'longitude', None),
(CONF_TEMPERATURE_UNIT, 'C', None, 'C for Celcius, F for Fahrenheit'),
(CONF_TIME_ZONE, 'UTC', 'time_zone', 'Pick yours from here: http://en.wiki'
'pedia.org/wiki/List_of_tz_database_time_zones'),
)
DEFAULT_COMPONENTS = {
'introduction': 'Show links to resources in log and frontend',
'frontend': 'Enables the frontend',
'discovery': 'Discover some devices automatically',
'conversation': 'Allows you to issue voice commands from the frontend',
'history': 'Enables support for tracking state changes over time.',
'logbook': 'View all events in a logbook',
'sun': 'Track the sun',
}
def get_default_config_dir():
""" Put together the default configuration directory based on OS. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
return os.path.join(data_dir, CONFIG_DIR_NAME)
def ensure_config_exists(config_dir, detect_location=True):
""" Ensures a config file exists in given config dir.
Creating a default one if needed.
Returns path to the config file. """
config_path = find_config_file(config_dir)
if config_path is None:
print("Unable to find configuration. Creating default one in",
config_dir)
config_path = create_default_config(config_dir, detect_location)
return config_path
def create_default_config(config_dir, detect_location=True):
""" Creates a default configuration file in given config dir.
Returns path to new config file if success, None if failed. """
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
info = {attr: default for attr, default, *_ in DEFAULT_CONFIG}
location_info = detect_location and loc_util.detect_location_info()
if location_info:
if location_info.use_fahrenheit:
info[CONF_TEMPERATURE_UNIT] = 'F'
for attr, default, prop, _ in DEFAULT_CONFIG:
if prop is None:
continue
info[attr] = getattr(location_info, prop) or default
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, 'w') as config_file:
config_file.write("homeassistant:\n")
for attr, _, _, description in DEFAULT_CONFIG:
if info[attr] is None:
continue
elif description:
config_file.write(" # {}\n".format(description))
config_file.write(" {}: {}\n".format(attr, info[attr]))
config_file.write("\n")
for component, description in DEFAULT_COMPONENTS.items():
config_file.write("# {}\n".format(description))
config_file.write("{}:\n\n".format(component))
return config_path
except IOError:
print('Unable to create default configuration file', config_path)
return None
def find_config_file(config_dir):
""" Looks in given directory for supported config files. """
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
return config_path if os.path.isfile(config_path) else None
def load_config_file(config_path):
""" Loads given config file. """
return load_yaml_config_file(config_path)
def load_yaml_config_file(config_path):
""" Parse a YAML configuration file. """
import yaml
def parse(fname):
""" Parse a YAML file. """
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or {}
except yaml.YAMLError:
error = 'Error reading YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise HomeAssistantError(error)
def yaml_include(loader, node):
"""
Loads another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
return parse(fname)
yaml.add_constructor('!include', yaml_include)
conf_dict = parse(config_path)
if not isinstance(conf_dict, dict):
_LOGGER.error(
'The configuration file %s does not contain a dictionary',
os.path.basename(config_path))
raise HomeAssistantError()
return conf_dict
|
alexkolar/home-assistant
|
homeassistant/config.py
|
Python
|
mit
| 5,349
| 0
|
from namespace_class import *
try:
p = Private1()
error = 1
except:
error = 0
if (error):
raise RuntimeError, "Private1 is private"
try:
p = Private2()
error = 1
except:
error = 0
if (error):
raise RuntimeError, "Private2 is private"
EulerT3D.toFrame(1,1,1)
b = BooT_i()
b = BooT_H()
f = FooT_i()
f.quack(1)
f = FooT_d()
f.moo(1)
f = FooT_H()
f.foo(Hi)
|
jrversteegh/softsailor
|
deps/swig-2.0.4/Examples/test-suite/python/namespace_class_runme.py
|
Python
|
gpl-3.0
| 379
| 0.036939
|
"""
Django settings for BenHoboCo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
GET_SOLO_TEMPLATE_TAG_NAME = 'get_solo'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b&r86v3qyzx=d^8p8k4$c!#imhb+jys*$g@yxz8#vt83@r-va_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# NOTE: Local server has to be in the first position!
ALLOWED_HOSTS = [
'127.0.0.1:8000',
'cs410.cs.ualberta.ca:41011',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'crispy_forms',
'solo',
'core',
'south',
'images',
'posts',
'authors',
'friends',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'BenHoboCo.urls'
WSGI_APPLICATION = 'BenHoboCo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME':'helix',
'USER':'myuser',
'PASSWORD':'mypass',
'HOST':'leago.btrinh.com',
'PORT':'3306',
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join( BASE_DIR, "static" )
STATICFILES_DIRS = (
STATIC_PATH,
)
# Templates
TEMPLATE_PATH = os.path.join( BASE_DIR, "templates")
TEMPLATE_DIRS = (
TEMPLATE_PATH,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join( BASE_DIR, 'media' )
LOGIN_URL = '/login/'
|
tpolasek/cmput410-project
|
BenHoboCo/BenHoboCo/settings.py
|
Python
|
gpl-2.0
| 2,810
| 0.003559
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rdrf', '0022_merge'),
]
operations = [
migrations.AddField(
model_name='rdrfcontext',
name='context_form_group',
field=models.ForeignKey(blank=True,
to='rdrf.ContextFormGroup',
null=True,
on_delete=models.SET_NULL),
),
]
|
muccg/rdrf
|
rdrf/rdrf/migrations/0023_rdrfcontext_context_form_group.py
|
Python
|
agpl-3.0
| 534
| 0
|
# -*- coding: utf8 -*-
# SDAPS - Scripts for data acquisition with paper based surveys
# Copyright(C) 2008, Christoph Simon <post@christoph-simon.eu>
# Copyright(C) 2008, Benjamin Berg <benjamin@sipsolutions.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import buddy
class Sheet(buddy.Object):
def __init__(self):
self.survey = None
self.data = dict()
self.images = list()
self.survey_id = None
self.questionnaire_id = None
self.global_id = None
self.valid = 1
self.quality = 1
def add_image(self, image):
self.images.append(image)
image.sheet = self
def get_page_image(self, page):
# Simply return the image for the requested page.
# Note: We return the first one we find; this means in the error case
# that a page exists twice, we return the first one.
for image in self.images:
if image.page_number == page and image.survey_id == self.survey.survey_id:
return image
return None
class Image(buddy.Object):
def __init__(self):
self.sheet = None
self.filename = str()
self.tiff_page = 0
self.rotated = 0
self.raw_matrix = None
self.page_number = None
self.survey_id = None
self.global_id = None
self.questionnaire_id = None
|
rodrigosurita/GDAd
|
sdaps/model/sheet.py
|
Python
|
gpl-3.0
| 1,971
| 0.001522
|
# -*- coding: utf-8 -*-
"""
**********************
Minimum Dominating Set
**********************
A dominating set for a graph G = (V, E) is a subset D of V such that every
vertex not in D is joined to at least one member of D by some edge. The
domination number gamma(G) is the number of vertices in a smallest dominating
set for G. Given a graph G = (V, E) find a minimum weight dominating set V'.
http://en.wikipedia.org/wiki/Dominating_set
This is reducible to the minimum set dom_set problem.
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__all__ = ["min_weighted_dominating_set",
"min_edge_dominating_set"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def min_weighted_dominating_set(graph, weight=None):
"""Return minimum weight dominating set.
Parameters
----------
graph : NetworkX graph
Undirected graph
weight : None or string, optional (default = None)
If None, every edge has weight/distance/weight 1. If a string, use this
edge attribute as the edge weight. Any edge attribute not present
defaults to 1.
Returns
-------
min_weight_dominating_set : set
Returns a set of vertices whose weight sum is no more than 1 + log w(V)
References
----------
.. [1] Vazirani, Vijay Approximation Algorithms (2001)
"""
if not graph:
raise ValueError("Expected non-empty NetworkX graph!")
# min cover = min dominating set
dom_set = set([])
cost_func = dict((node, nd.get(weight, 1)) \
for node, nd in graph.nodes_iter(data=True))
vertices = set(graph)
sets = dict((node, set([node]) | set(graph[node])) for node in graph)
def _cost(subset):
""" Our cost effectiveness function for sets given its weight
"""
cost = sum(cost_func[node] for node in subset)
return cost / float(len(subset - dom_set))
while vertices:
# find the most cost effective set, and the vertex that for that set
dom_node, min_set = min(sets.items(),
key=lambda x: (x[0], _cost(x[1])))
alpha = _cost(min_set)
# reduce the cost for the rest
for node in min_set - dom_set:
cost_func[node] = alpha
# add the node to the dominating set and reduce what we must cover
dom_set.add(dom_node)
del sets[dom_node]
vertices = vertices - min_set
return dom_set
def min_edge_dominating_set(graph):
"""Return minimum weight dominating edge set.
Parameters
----------
graph : NetworkX graph
Undirected graph
Returns
-------
min_edge_dominating_set : set
Returns a set of dominating edges whose size is no more than 2 * OPT.
"""
if not graph:
raise ValueError("Expected non-empty NetworkX graph!")
return nx.maximal_matching(graph)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/networkx/algorithms/approximation/dominating_set.py
|
Python
|
agpl-3.0
| 2,985
| 0.00067
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/class-2-find-the-torsional-angle
import io
import math
import sys
import unittest
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def subtract(self, other):
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return Vector(x, y, z)
def dot_product(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
def cross_product(self, other):
zero = Vector(0, 0, 0)
x = self.y * other.z - self.z * other.y
y = self.z * other.x - self.x * other.z
z = self.x * other.y - self.y * other.x
return zero.subtract(Vector(x, y, z))
def value(self):
xx = math.pow(self.x, 2)
yy = math.pow(self.y, 2)
zz = math.pow(self.z, 2)
return math.sqrt(xx + yy + zz)
def torsional_angle(a, b, c, d):
ab = a.subtract(b)
bc = b.subtract(c)
cd = c.subtract(d)
x = ab.cross_product(bc)
y = bc.cross_product(cd)
cosine = x.dot_product(y) / (x.value() * y.value())
return math.degrees(math.acos(cosine))
def main():
a = Vector(*tuple(map(float, input().strip().split())))
b = Vector(*tuple(map(float, input().strip().split())))
c = Vector(*tuple(map(float, input().strip().split())))
d = Vector(*tuple(map(float, input().strip().split())))
print('%.2f' % torsional_angle(a, b, c, d))
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
|
altermarkive/Coding-Interviews
|
algorithm-design/hackerrank/class_2_find_the_torsional_angle/test_class_2_find_the_torsional_angle.py
|
Python
|
mit
| 2,023
| 0
|
# Author: Marcin Serwach
# https://github.com/iblis-ms/conan_gbenchmark
from conans import ConanFile, CMake, tools
import os
import sys
import shutil
class GbenchmarkConan(ConanFile):
name = 'GBenchmark'
version = '1.3.0'
license = 'MIT Licence'
url = 'https://github.com/iblis-ms/conan_gbenchmark'
description = 'Conan.io support for Google Benchmark'
settings = ['os', 'compiler', 'build_type', 'arch', 'cppstd']
options = {
'BENCHMARK_ENABLE_TESTING': [True, False],
'BENCHMARK_ENABLE_LTO': [True, False]
}
default_options = ('BENCHMARK_ENABLE_TESTING=False',
'BENCHMARK_ENABLE_LTO=False'
)
generators = 'cmake'
source_root = 'benchmark-%s' % version
exports = 'CMakeLists.txt'
buildFolder = '_build'
def source(self):
zipFileName = "v%s.zip" % self.version
tools.download("https://github.com/google/benchmark/archive/%s" % zipFileName, zipFileName)
tools.unzip(zipFileName)
def build(self):
cmake = CMake(self)
for (opt, val) in self.options.items():
if val is not None:
cmake.definitions[opt] = 'ON' if val == "True" else 'OFF'
if self.settings.compiler == 'clang' and str(self.settings.compiler.libcxx) == 'libc++':
cmake.definitions['BENCHMARK_USE_LIBCXX'] = 'YES'
if str(self.settings.compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:
if str(self.settings.arch) in ['x86_64', 'sparcv9']:
cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'OFF'
elif str(self.settings.arch) in ['x86', 'sparc']:
cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'YES'
sys.stdout.write("cmake " + str(cmake.command_line) + "\n")
cmake.configure(source_dir=self.build_folder, build_dir=self.buildFolder)
cmake.build()
def package(self):
self.copy(pattern='*.h', dst='include', src='%s/include' % self.source_root, keep_path=True)
self.copy(pattern='*.lib', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False)
self.copy(pattern='*.a', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False)
for docPatter in ['*.md', 'LICENSE', 'AUTHORS', 'CONTRIBUTORS']:
self.copy(pattern=docPatter, dst='doc', src=self.source_root, keep_path=False)
def package_info(self):
self.cpp_info.libs = ['benchmark']
if self.settings.os == 'Windows':
self.cpp_info.libs.extend(['Shlwapi'])
|
iblis-ms/conan_gbenchmark
|
conanfile.py
|
Python
|
mit
| 2,604
| 0.006528
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class HackernewsscraperItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
link = scrapy.Field()
|
svram/fun-with-advanced-python
|
hackerNewsScraper/hackerNewsScraper/items.py
|
Python
|
gpl-3.0
| 317
| 0.003155
|
from django import forms
from .models import doctor
class ContactForm(forms.Form):
message = forms.CharField()
class SignUpForm(forms.ModelForm):
class Meta:
model = doctor
fields = ['full_name', 'email']
class areaForm(forms.Form):
messag = forms.CharField(required=False)
|
quit9to5/Next
|
patient_management/doctor/forms.py
|
Python
|
gpl-2.0
| 308
| 0.00974
|
import os
from segments import Segment, theme
from utils import colors, glyphs
class CurrentDir(Segment):
bg = colors.background(theme.CURRENTDIR_BG)
fg = colors.foreground(theme.CURRENTDIR_FG)
def init(self, cwd):
home = os.path.expanduser('~')
self.text = cwd.replace(home, '~')
class ReadOnly(Segment):
bg = colors.background(theme.READONLY_BG)
fg = colors.foreground(theme.READONLY_FG)
def init(self, cwd):
self.text = ' ' + glyphs.WRITE_ONLY + ' '
if os.access(cwd, os.W_OK):
self.active = False
class Venv(Segment):
bg = colors.background(theme.VENV_BG)
fg = colors.foreground(theme.VENV_FG)
def init(self):
env = os.getenv('VIRTUAL_ENV')
if env is None:
self.active = False
return
env_name = os.path.basename(env)
self.text = glyphs.VIRTUAL_ENV + ' ' + env_name
|
nimiq/promptastic
|
segments/filesystem.py
|
Python
|
apache-2.0
| 916
| 0.001092
|
import os
import sys
from .interfaces import Interface
from .search import SearchManager
from .cache import CacheManager
from .select import Select
from .help import Inspector
from .users import Users
from .packages import Packages
|
BrainIntensive/OnlineBrainIntensive
|
resources/HCP/pyxnat/pyxnat/core/__init__.py
|
Python
|
mit
| 233
| 0
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure-Python RSA cryptography implementation.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates. There is no support for p12 files.
"""
from __future__ import absolute_import
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from google.auth import _helpers
from google.auth.crypt import base
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1s and 0s to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
Args:
bit_list (Sequence): Sequence of 1s and 0s.
Returns:
bytes: The decoded bytes.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start : start + 8]
char_val = sum(val * digit for val, digit in six.moves.zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RSAVerifier(base.Verifier):
"""Verifies RSA cryptographic signatures using public keys.
Args:
public_key (rsa.key.PublicKey): The public key used to verify
signatures.
"""
def __init__(self, public_key):
self._pubkey = public_key
@_helpers.copy_docstring(base.Verifier)
def verify(self, message, signature):
message = _helpers.to_bytes(message)
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, public_key):
"""Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier.
Raises:
ValueError: If the public_key can't be parsed.
"""
public_key = _helpers.to_bytes(public_key)
is_x509_cert = _CERTIFICATE_MARKER in public_key
# If this is a certificate, extract the public key info.
if is_x509_cert:
der = rsa.pem.load_pem(public_key, "CERTIFICATE")
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b"":
raise ValueError("Unused bytes", remaining)
cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
else:
pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
return cls(pubkey)
class RSASigner(base.Signer, base.FromServiceAccountMixin):
"""Signs messages with an RSA private key.
Args:
private_key (rsa.key.PrivateKey): The private key to sign with.
key_id (str): Optional key ID used to identify this private key. This
can be useful to associate the private key with its associated
public key or certificate.
"""
def __init__(self, private_key, key_id=None):
self._key = private_key
self._key_id = key_id
@property
@_helpers.copy_docstring(base.Signer)
def key_id(self):
return self._key_id
@_helpers.copy_docstring(base.Signer)
def sign(self, message):
message = _helpers.to_bytes(message)
return rsa.pkcs1.sign(message, self._key, "SHA-256")
@classmethod
def from_string(cls, key, key_id=None):
"""Construct an Signer instance from a private key in PEM format.
Args:
key (str): Private key in PEM format.
key_id (str): An optional key id used to identify the private key.
Returns:
google.auth.crypt.Signer: The constructed signer.
Raises:
ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers.from_bytes(key) # PEM expects str in Python 3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
)
# Key is in pkcs1 format.
if marker_id == 0:
private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
# Key is in pkcs8.
elif marker_id == 1:
key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b"":
raise ValueError("Unused bytes", remaining)
private_key_info = key_info.getComponentByName("privateKey")
private_key = rsa.key.PrivateKey.load_pkcs1(
private_key_info.asOctets(), format="DER"
)
else:
raise ValueError("No key could be detected.")
return cls(private_key, key_id=key_id)
|
martbhell/wasthereannhlgamelastnight
|
src/lib/google/auth/crypt/_python_rsa.py
|
Python
|
mit
| 5,973
| 0.001005
|
# coding: utf-8
import numpy as np
from common.functions import *
from common.util import im2col, col2im
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
out = sigmoid(x)
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine:
def __init__(self, W, b):
self.W =W
self.b = b
self.x = None
self.original_x_shape = None
# 重み・バイアスパラメータの微分
self.dW = None
self.db = None
def forward(self, x):
# テンソル対応
self.original_x_shape = x.shape
x = x.reshape(x.shape[0], -1)
self.x = x
out = np.dot(self.x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
dx = dx.reshape(*self.original_x_shape) # 入力データの形状に戻す(テンソル対応)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None # softmaxの出力
self.t = None # 教師データ
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
if self.t.size == self.y.size: # 教師データがone-hot-vectorの場合
dx = (self.y - self.t) / batch_size
else:
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx = dx / batch_size
return dx
class Dropout:
"""
http://arxiv.org/abs/1207.0580
"""
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
class BatchNormalization:
"""
http://arxiv.org/abs/1502.03167
"""
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None # Conv層の場合は4次元、全結合層の場合は2次元
# テスト時に使用する平均と分散
self.running_mean = running_mean
self.running_var = running_var
# backward時に使用する中間データ
self.batch_size = None
self.xc = None
self.std = None
self.dgamma = None
self.dbeta = None
def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.transpose(1, 0, 2, 3).reshape(C, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc**2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1-self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out
def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.transpose(1, 0, 2, 3).reshape(C, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx
def __backward(self, dout):
dbeta = dout.sum(axis=0)
dgamma = np.sum(self.xn * dout, axis=0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis=0)
dx = dxc - dmu / self.batch_size
self.dgamma = dgamma
self.dbeta = dbeta
return dx
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
# 中間データ(backward時に使用)
self.x = None
self.col = None
self.col_W = None
# 重み・バイアスパラメータの勾配
self.dW = None
self.db = None
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
out_w = 1 + int((W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
self.x = x
self.col = col
self.col_W = col_W
return out
def backward(self, dout):
FN, C, FH, FW = self.W.shape
dout = dout.transpose(0,2,3,1).reshape(-1, FN)
self.db = np.sum(dout, axis=0)
self.dW = np.dot(self.col.T, dout)
self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)
dcol = np.dot(dout, self.col_W.T)
dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)
return dx
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
self.x = None
self.arg_max = None
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
arg_max = np.argmax(col, axis=1)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
self.x = x
self.arg_max = arg_max
return out
def backward(self, dout):
dout = dout.transpose(0, 2, 3, 1)
pool_size = self.pool_h * self.pool_w
dmax = np.zeros((dout.size, pool_size))
dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten()
dmax = dmax.reshape(dout.shape + (pool_size,))
dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)
dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad)
return dx
|
Takasudo/studyPython
|
deep/common/layers.py
|
Python
|
gpl-3.0
| 7,810
| 0.00516
|
from setuptools import setup, find_packages
import os
version = '0.5'
setup(name='uwosh.emergency.master',
version=version,
description="",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Nathan Van Gheem',
author_email='vangheem@gmail.com',
url='http://svn.plone.org/svn/plone/plone.example',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['uwosh', 'uwosh.emergency'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'uwosh.simpleemergency>=1.1',
'rsa'
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
uwosh/uwosh.emergency.master
|
setup.py
|
Python
|
gpl-2.0
| 1,123
| 0.002671
|
__author__ = 'harsha'
class ForceReply(object):
def __init__(self, force_reply, selective):
self.force_reply = force_reply
self.selective = selective
def get_force_reply(self):
return self.force_reply
def get_selective(self):
return self.selective
def __str__(self):
return str(self.__dict__)
|
harsha5500/pytelegrambot
|
telegram/ForceReply.py
|
Python
|
gpl-3.0
| 353
| 0.005666
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import caffe_train
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy
import google.protobuf
|
TimZaman/DIGITS
|
digits/model/tasks/test_caffe_train.py
|
Python
|
bsd-3-clause
| 281
| 0.007117
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# agnez documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import agnez
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Agnez'
copyright = u'2015, Eder Santana'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = agnez.__version__
# The full version, including alpha/beta/rc tags.
release = agnez.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agnezdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'agnez.tex',
u'Agnez Documentation',
u'Eder Santana', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agnez',
u'Agnez Documentation',
[u'Eder Santana'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'agnez',
u'Agnez Documentation',
u'Eder Santana',
'agnez',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
EderSantana/agnez
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,361
| 0.005382
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload images to wikipedia.
Arguments:
-keep Keep the filename as is
-filename Target filename without the namespace prefix
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary Pick a custom edit summary for the bot.
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
from datetime import date
from pywikibot import config
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
url = u''
description = []
summary = None
keepFilename = False
always = False
useFilename = None
verifyDescription = True
aborts = set()
ignorewarn = set()
chunk_size = 0
chunk_size_regex = r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$'
chunk_size_regex = re.compile(chunk_size_regex, re.I)
recursive = False
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
for arg in pywikibot.handle_args(args):
if arg:
if arg == '-always':
keepFilename = True
always = True
verifyDescription = False
elif arg == '-recursive':
recursive = True
elif arg.startswith('-keep'):
keepFilename = True
elif arg.startswith('-filename:'):
useFilename = arg[10:]
elif arg.startswith('-summary'):
summary = arg[9:]
elif arg.startswith('-noverify'):
verifyDescription = False
elif arg.startswith('-abortonwarn'):
if len(arg) > len('-abortonwarn:') and aborts is not True:
aborts.add(arg[len('-abortonwarn:'):])
else:
aborts = True
elif arg.startswith('-ignorewarn'):
if len(arg) > len('-ignorewarn:') and ignorewarn is not True:
ignorewarn.add(arg[len('-ignorewarn:'):])
else:
ignorewarn = True
elif arg.startswith('-chunked'):
match = chunk_size_regex.match(arg)
if match:
if match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == "k":
suffix = 1000
elif suffix == "m":
suffix = 1000000
elif suffix == "ki":
suffix = 1 << 10
elif suffix == "mi":
suffix = 1 << 20
else:
pass # huh?
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
else:
pywikibot.error('Chunk size parameter is not valid.')
elif url == u'':
url = arg
else:
description.append(arg)
description = u' '.join(description)
# curly barckets need to double in formatted string
description = """=={{{{int:filedesc}}}}==
{{{{Information
|description={{{{en|1=Native Israeli pronunciation of this Hebrew word}}}}
|date={0}
|source={{{{own}}}}
|author=[[User:{1}|{1}]]
|permission=
|other versions=
}}}}
=={{{{int:license-header}}}}==
{{{{self|cc-zero}}}}
[[Category:Hebrew pronunciation]]""".format(date.today(),config.usernames['commons']['commons'])
while not ("://" in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
else:
pywikibot.output(error)
url = pywikibot.input(u'URL, file or directory where files are now:')
if always and ((aborts is not True and ignorewarn is not True) or
not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return False
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, useFilename=useFilename,
keepFilename=keepFilename,
verifyDescription=verifyDescription,
aborts=aborts, ignoreWarning=ignorewarn,
chunk_size=chunk_size, always=always,
summary="bot upload",
targetSite=pywikibot.Site('commons', 'commons'))
bot.run()
if __name__ == "__main__":
main()
|
eranroz/hewiktionary_checker
|
upload.py
|
Python
|
mit
| 8,269
| 0.000605
|
from time import sleep
from os.path import join
import pytest
from cosmo_tester.framework.examples import get_example_deployment
from cosmo_tester.framework.test_hosts import Hosts, VM
from cosmo_tester.test_suites.snapshots import (
create_snapshot,
download_snapshot,
restore_snapshot,
upload_snapshot,
)
@pytest.fixture(scope='function')
def manager_and_vm(request, ssh_key, module_tmpdir, test_config,
logger):
hosts = Hosts(ssh_key, module_tmpdir, test_config, logger, request, 2)
hosts.instances[0] = VM('master', test_config)
hosts.instances[1] = VM('centos_7', test_config)
manager, vm = hosts.instances
passed = True
try:
hosts.create()
yield hosts.instances
except Exception:
passed = False
raise
finally:
hosts.destroy(passed=passed)
@pytest.fixture(scope='function')
def example(manager_and_vm, ssh_key, tmpdir, logger, test_config):
manager, vm = manager_and_vm
example = get_example_deployment(
manager, ssh_key, logger, 'inplace_restore', test_config, vm)
try:
yield example
finally:
if example.installed:
example.uninstall()
def test_inplace_restore(manager_and_vm,
example,
module_tmpdir,
logger):
manager, vm = manager_and_vm
snapshot_name = 'inplace_restore_snapshot_{0}'.format(manager.image_type)
snapshot_path = join(str(module_tmpdir), snapshot_name) + '.zip'
example.upload_and_verify_install()
create_snapshot(manager, snapshot_name, logger)
download_snapshot(manager, snapshot_path, snapshot_name, logger)
# We need the certs to be the same for the 'new' manager otherwise an
# inplace upgrade can't properly work
manager.run_command('mkdir /tmp/ssl_backup')
manager.run_command('cp /etc/cloudify/ssl/* /tmp/ssl_backup',
use_sudo=True)
manager.teardown()
# The teardown doesn't properly clean up rabbitmq
manager.run_command('pkill -f rabbitmq', use_sudo=True)
manager.run_command('rm -rf /var/lib/rabbitmq', use_sudo=True)
manager.install_config['rabbitmq'] = {
'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'cert_path': '/tmp/ssl_backup/rabbitmq-cert.pem',
'key_path': '/tmp/ssl_backup/rabbitmq-key.pem',
}
manager.install_config['prometheus'] = {
'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'cert_path': '/tmp/ssl_backup/monitoring_cert.pem',
'key_path': '/tmp/ssl_backup/monitoring_key.pem',
}
manager.install_config['ssl_inputs'] = {
'external_cert_path': '/tmp/ssl_backup/cloudify_external_cert.pem',
'external_key_path': '/tmp/ssl_backup/cloudify_external_key.pem',
'internal_cert_path': '/tmp/ssl_backup/cloudify_internal_cert.pem',
'internal_key_path': '/tmp/ssl_backup/cloudify_internal_key.pem',
'ca_cert_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'external_ca_cert_path':
'/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
}
manager.bootstrap()
upload_snapshot(manager, snapshot_path, snapshot_name, logger)
restore_snapshot(manager, snapshot_name, logger,
admin_password=manager.mgr_password)
manager.wait_for_manager()
logger.info('Waiting 35 seconds for agents to reconnect. '
'Agent reconnect retries are up to 30 seconds apart.')
sleep(35)
example.uninstall()
|
cloudify-cosmo/cloudify-system-tests
|
cosmo_tester/test_suites/snapshots/inplace_restore_test.py
|
Python
|
apache-2.0
| 3,573
| 0
|
from pathlib import Path
import re
from setuptools import setup
setup_dir = Path(__file__).resolve().parent
version = re.search(
r'__version__ = "(.*)"',
Path(setup_dir, 'tldr.py').open().read()
)
if version is None:
raise SystemExit("Could not determine version to use")
version = version.group(1)
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='tldr',
author='Felix Yan',
author_email='felixonmars@gmail.com',
url='https://github.com/tldr-pages/tldr-python-client',
description='command line client for tldr',
long_description=Path(setup_dir, 'README.md').open().read(),
long_description_content_type='text/markdown',
license='MIT',
py_modules=['tldr'],
entry_points={
"console_scripts": [
"tldr = tldr:cli"
]
},
data_files=[('share/man/man1', ['docs/man/tldr.1'])],
install_requires=required,
tests_require=[
'pytest',
'pytest-runner',
],
version=version,
python_requires='~=3.6',
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Operating System :: POSIX :: SunOS/Solaris",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Utilities",
"Topic :: System"
]
)
|
tldr-pages/tldr-python-client
|
setup.py
|
Python
|
mit
| 1,805
| 0
|
import unittest
import sys
sys.path.insert(0, "..")
from sections.sections import Circle
import test_sections_generic as generic
class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sectclass = Circle
cls.dimensions = dict(r=3.0)
cls.rp = 5.0, 4.0
cls.A = 28.274333882308138
cls._I0 = 63.61725123519331, 63.61725123519331, 0.0
cls._I = 63.61725123519331, 63.61725123519331, 0.0
cls._cog = 0.0, 0.0
def test_check_dimensions(self):
self.assertRaises(ValueError, self.section.set_dimensions, r=-1)
self.assertRaises(ValueError, self.section.set_dimensions, r=0)
if __name__ == "__main__":
unittest.main()
|
iamlikeme/sections
|
tests/test_sections_Circle.py
|
Python
|
mit
| 810
| 0.020988
|
"""API integration tests factories."""
import factory
from django_common.auth_backends import User
from factory.django import DjangoModelFactory
from samaritan.models import Address, ChurchRole, MembershipType, ChurchGroup, Member
class UserFactory(DjangoModelFactory):
"""Factory for users."""
username = factory.Faker('name')
class Meta:
model = User
class AddressFactory(DjangoModelFactory):
"""Factory for address."""
number = factory.Faker('word')
street = factory.Faker('name')
locality = factory.Faker('name')
city = factory.Faker('name')
post_code = factory.Faker('word')
class Meta:
model = Address
class RoleFactory(DjangoModelFactory):
"""Factory for Roles."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = ChurchRole
class GroupFactory(DjangoModelFactory):
"""Factory for Groups."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = ChurchGroup
@factory.post_generation
def members(self, create, extracted, **kwargs):
if create and extracted:
for member in extracted:
self.members.add(member)
class MembershipTypeFactory(DjangoModelFactory):
"""Membership Type Factory."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = MembershipType
class MemberFactory(DjangoModelFactory):
"""Factory for Members."""
first_name = factory.Faker('name')
last_name = factory.Faker('name')
date_of_birth = factory.Faker('date_this_century')
telephone = factory.Faker('random_int', min=0, max=99999999)
address = factory.SubFactory(AddressFactory)
email = factory.Faker('email')
details = factory.Faker('text')
is_baptised = factory.Faker('boolean')
baptismal_date = factory.Faker('date_this_century')
baptismal_place = factory.Faker('name')
is_member = factory.Faker('boolean')
membership_type = factory.SubFactory(MembershipTypeFactory)
membership_date = factory.Faker('date_this_year')
is_active = factory.Faker('boolean')
notes = factory.Faker('text')
church_role = factory.SubFactory(RoleFactory)
gdpr = factory.Faker('boolean')
class Meta:
model = Member
|
Silvian/samaritan
|
api/tests/integration/__init__.py
|
Python
|
gpl-3.0
| 2,353
| 0.00085
|
"""ユーザー設定用モジュール."""
import os
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
'app1',
'app2',
]
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = [
# 'wsgiref.validate.validator',
'ngo.wsgi.RedirectApp',
'ngo.wsgi.WSGIHandler',
]
"""
以下のように読み込まれていきます
app = None
app = WSGIHandler(None)
app = RedirectApp(app)
app = validator(app)
"""
# TEMPLATES = ('ngo.backends.Ngo', [])
"""
TEMPLATES = (
'ngo.backends.Ngo',
[os.path.join(BASE_DIR, 'template'), os.path.join(BASE_DIR, 'template2')]
)
"""
TEMPLATES = ('ngo.backends.Jinja2', [])
STATICFILES_DIRS = None
"""
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'static2')
]
"""
STATIC_URL = 'static'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = 'media'
|
naritotakizawa/ngo
|
tests/project2/project/settings.py
|
Python
|
mit
| 941
| 0
|
# jhbuild - a tool to ease building collections of source packages
# Copyright (C) 2011 Colin Walters <walters@verbum.org>
#
# systeminstall.py - Use system-specific means to acquire dependencies
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
import logging
import shlex
import subprocess
import pipes
from StringIO import StringIO
import cmds
def get_installed_pkgconfigs(config):
"""Returns a dictionary mapping pkg-config names to their current versions on the system."""
pkgversions = {}
try:
proc = subprocess.Popen(['pkg-config', '--list-all'], stdout=subprocess.PIPE, env=config.get_original_environment(), close_fds=True)
stdout = proc.communicate()[0]
proc.wait()
pkgs = []
for line in StringIO(stdout):
pkg, rest = line.split(None, 1)
pkgs.append(pkg)
# We have to rather inefficiently repeatedly fork to work around
# broken pkg-config installations - if any package has a missing
# dependency pkg-config will fail entirely.
for pkg in pkgs:
args = ['pkg-config', '--modversion']
args.append(pkg)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True, env=config.get_original_environment())
stdout = proc.communicate()[0]
proc.wait()
pkgversions[pkg] = stdout.strip()
except OSError: # pkg-config not installed
pass
return pkgversions
def systemdependencies_met(module_name, sysdeps, config):
'''Returns True of the system dependencies are met for module_name'''
def get_c_include_search_paths(config):
'''returns a list of C include paths (-I) from the environment and the
user's config'''
def extract_path_from_cflags(args):
'''extract the C include paths (-I) from a list of arguments (args)
Returns a list of paths'''
itr = iter(args.split())
paths = []
if os.name == 'nt':
# shlex.split doesn't handle sep '\' on Windows
import string
shell_split = string.split
else:
shell_split = shlex.split
try:
while True:
arg = itr.next()
if arg.strip() in ['-I', '-isystem']:
# extract paths handling quotes and multiple paths
paths += shell_split(itr.next())[0].split(os.pathsep)
elif arg.startswith('-I'):
paths += shell_split(arg[2:])[0].split(os.pathsep)
except StopIteration:
pass
return paths
try:
multiarch = subprocess.check_output(['gcc', '-print-multiarch']).strip()
except:
multiarch = None
# search /usr/include and its multiarch subdir (if any) by default
paths = [ os.path.join(os.sep, 'usr', 'include')]
if multiarch:
paths += [ os.path.join(paths[0], multiarch) ]
paths += extract_path_from_cflags(os.environ.get('CPPFLAGS', ''))
# check include paths incorrectly configured in CFLAGS, CXXFLAGS
paths += extract_path_from_cflags(os.environ.get('CFLAGS', ''))
paths += extract_path_from_cflags(os.environ.get('CXXFLAGS', ''))
# check include paths incorrectly configured in makeargs
paths += extract_path_from_cflags(config.makeargs)
paths += extract_path_from_cflags(config.module_autogenargs.get
(module_name, ''))
paths += extract_path_from_cflags(config.module_makeargs.get
(module_name, ''))
paths = list(set(paths)) # remove duplicates
return paths
c_include_search_paths = None
for dep_type, value in sysdeps:
if dep_type.lower() == 'path':
if os.path.split(value)[0]:
if not os.path.isfile(value) and not os.access(value, os.X_OK):
return False
else:
found = False
for path in os.environ.get('PATH', '').split(os.pathsep):
filename = os.path.join(path, value)
if (os.path.isfile(filename) and
os.access(filename, os.X_OK)):
found = True
break
if not found:
return False
elif dep_type.lower() == 'c_include':
if c_include_search_paths is None:
c_include_search_paths = get_c_include_search_paths(config)
found = False
for path in c_include_search_paths:
filename = os.path.join(path, value)
if os.path.isfile(filename):
found = True
break
if not found:
return False
return True
class SystemInstall(object):
def __init__(self):
if cmds.has_command('pkexec'):
self._root_command_prefix_args = ['pkexec']
elif cmds.has_command('sudo'):
self._root_command_prefix_args = ['sudo']
else:
raise SystemExit, _('No suitable root privilege command found; you should install "pkexec"')
def install(self, pkgconfig_ids):
"""Takes a list of pkg-config identifiers and uses a system-specific method to install them."""
raise NotImplementedError()
@classmethod
def find_best(cls):
global _classes
for possible_cls in _classes:
if possible_cls.detect():
return possible_cls()
# PackageKit dbus interface contains bitfield constants which
# aren't introspectable
PK_PROVIDES_ANY = 1
PK_FILTER_ENUM_NOT_INSTALLED = 1 << 3
PK_FILTER_ENUM_NEWEST = 1 << 16
PK_FILTER_ENUM_ARCH = 1 << 18
# NOTE: This class is unfinished
class PKSystemInstall(SystemInstall):
def __init__(self):
SystemInstall.__init__(self)
self._loop = None
# PackageKit 0.8.1 has API breaks in the D-BUS interface, for now
# we try to support both it and older PackageKit
self._using_pk_0_8_1 = None
self._sysbus = None
self._pkdbus = None
def _on_pk_message(self, msgtype, msg):
logging.info(_('PackageKit: %s' % (msg,)))
def _on_pk_error(self, msgtype, msg):
logging.error(_('PackageKit: %s' % (msg,)))
def _get_new_transaction(self):
if self._loop is None:
import glib
self._loop = glib.MainLoop()
if self._sysbus is None:
import dbus.glib
import dbus
self._dbus = dbus
self._sysbus = dbus.SystemBus()
if self._pkdbus is None:
self._pkdbus = dbus.Interface(self._sysbus.get_object('org.freedesktop.PackageKit',
'/org/freedesktop/PackageKit'),
'org.freedesktop.PackageKit')
if self._using_pk_0_8_1 is None:
try:
txn_path = self._pkdbus.CreateTransaction()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', txn_path)
self._using_pk_0_8_1 = True
except dbus.exceptions.DBusException:
tid = self._pkdbus.GetTid()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', tid)
self._using_pk_0_8_1 = False
elif self._using_pk_0_8_1:
txn_path = self._pkdbus.CreateTransaction()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', txn_path)
else:
tid = self._pkdbus.GetTid()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', tid)
txn_tx = self._dbus.Interface(txn, 'org.freedesktop.PackageKit.Transaction')
txn.connect_to_signal('Message', self._on_pk_message)
txn.connect_to_signal('ErrorCode', self._on_pk_error)
txn.connect_to_signal('Destroy', lambda *args: self._loop.quit())
return txn_tx, txn
def install(self, uninstalled_pkgconfigs, uninstalled_filenames):
pk_package_ids = set()
if uninstalled_pkgconfigs:
txn_tx, txn = self._get_new_transaction()
txn.connect_to_signal('Package', lambda info, pkid, summary: pk_package_ids.add(pkid))
if self._using_pk_0_8_1:
txn_tx.WhatProvides(PK_FILTER_ENUM_ARCH | PK_FILTER_ENUM_NEWEST |
PK_FILTER_ENUM_NOT_INSTALLED,
PK_PROVIDES_ANY,
['pkgconfig(%s)' % pkg for modname, pkg in
uninstalled_pkgconfigs])
else:
txn_tx.WhatProvides('arch;newest;~installed', 'any',
['pkgconfig(%s)' % pkg for modname, pkg in
uninstalled_pkgconfigs])
self._loop.run()
del txn, txn_tx
if uninstalled_filenames:
txn_tx, txn = self._get_new_transaction()
txn.connect_to_signal('Package', lambda info, pkid, summary: pk_package_ids.add(pkid))
if self._using_pk_0_8_1:
txn_tx.SearchFiles(PK_FILTER_ENUM_ARCH | PK_FILTER_ENUM_NEWEST |
PK_FILTER_ENUM_NOT_INSTALLED,
[pkg for modname, pkg in
uninstalled_filenames])
else:
txn_tx.SearchFiles('arch;newest;~installed',
[pkg for modname, pkg in
uninstalled_filenames])
self._loop.run()
del txn, txn_tx
# On Fedora 17 a file can be in two packages: the normal package and
# an older compat- package. Don't install compat- packages.
pk_package_ids = [pkg for pkg in pk_package_ids
if not pkg.startswith('compat-')]
if len(pk_package_ids) == 0:
logging.info(_('Nothing available to install'))
return
logging.info(_('Installing:\n %s' % ('\n '.join(pk_package_ids, ))))
txn_tx, txn = self._get_new_transaction()
txn_tx.InstallPackages(True, pk_package_ids)
self._loop.run()
logging.info(_('Complete!'))
@classmethod
def detect(cls):
return cmds.has_command('pkcon')
class YumSystemInstall(SystemInstall):
def __init__(self):
SystemInstall.__init__(self)
def install(self, uninstalled_pkgconfigs, uninstalled_filenames):
logging.info(_('Using yum to install packages. Please wait.'))
if len(uninstalled_pkgconfigs) + len(uninstalled_filenames) > 0:
logging.info(_('Installing:\n %(pkgs)s') %
{'pkgs': '\n '.join([modname for modname, pkg in
uninstalled_pkgconfigs +
uninstalled_filenames])})
args = self._root_command_prefix_args + ['yum', '-y', 'install']
args.extend(['pkgconfig(%s)' % pkg for modname, pkg in
uninstalled_pkgconfigs])
args.extend([pkg for modname, pkg in uninstalled_filenames])
subprocess.check_call(args)
else:
logging.info(_('Nothing to install'))
@classmethod
def detect(cls):
return cmds.has_command('yum')
class AptSystemInstall(SystemInstall):
def __init__(self):
SystemInstall.__init__(self)
def _get_package_for(self, filename):
proc = subprocess.Popen(['apt-file', 'search', filename],
stdout=subprocess.PIPE, close_fds=True)
stdout = proc.communicate()[0]
if proc.returncode != 0:
return None
for line in StringIO(stdout):
parts = line.split(':', 1)
if len(parts) != 2:
continue
name = parts[0]
path = parts[1]
# No idea why the LSB has forks of the pkg-config files
if path.find('/lsb3') != -1:
continue
# otherwise for now, just take the first match
return name
def install(self, uninstalled_pkgconfigs, uninstalled_filenames):
logging.info(_('Using apt-file to search for providers; this may be slow. Please wait.'))
native_packages = []
pkgconfigs = [(modname, '/%s.pc' % pkg) for modname, pkg in
uninstalled_pkgconfigs]
for modname, filename in pkgconfigs + uninstalled_filenames:
native_pkg = self._get_package_for(filename)
if native_pkg:
native_packages.append(native_pkg)
else:
logging.info(_('No native package found for %(id)s '
'(%(filename)s)') % {'id' : modname,
'filename' : filename})
if native_packages:
logging.info(_('Installing: %(pkgs)s') % {'pkgs': ' '.join(native_packages)})
args = self._root_command_prefix_args + ['apt-get', 'install']
args.extend(native_packages)
subprocess.check_call(args)
else:
logging.info(_('Nothing to install'))
@classmethod
def detect(cls):
return cmds.has_command('apt-file')
# Ordered from best to worst
_classes = [AptSystemInstall, PKSystemInstall, YumSystemInstall]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
installer = SystemInstall.find_best()
print "Using %r" % (installer, )
installer.install(sys.argv[1:])
|
ahmeier/jhbuild
|
jhbuild/utils/systeminstall.py
|
Python
|
gpl-2.0
| 14,543
| 0.002888
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
class TestSamplingIdOp(OpTest):
def setUp(self):
self.op_type = "sampling_id"
self.use_mkldnn = False
self.init_kernel_type()
self.X = np.random.random((100, 10)).astype('float32')
self.inputs = {"X": self.X}
self.Y = np.random.random(100).astype('int64')
self.outputs = {'Out': self.Y}
self.attrs = {'max': 1.0, 'min': 0.0, 'seed': 1}
def test_check_output(self):
self.check_output_customized(self.verify_output)
y1 = self.out
self.check_output_customized(self.verify_output)
y2 = self.out
# check dtype
assert y1.dtype == np.int64
assert y2.dtype == np.int64
# check output is index ids of inputs
inputs_ids = np.arange(self.X.shape[1])
assert np.isin(y1, inputs_ids).all()
assert np.isin(y2, inputs_ids).all()
self.assertTrue(np.array_equal(y1, y2))
self.assertEqual(len(y1), len(self.Y))
def verify_output(self, outs):
out = np.array(outs[0])
self.out = out
def init_kernel_type(self):
pass
class TestSamplingIdShape(unittest.TestCase):
def test_shape(self):
x = fluid.layers.data(name='x', shape=[3], dtype='float32')
output = fluid.layers.sampling_id(x)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[[0.2, 0.3, 0.5], [0.2, 0.3, 0.4]], dtype='float32')
}
output_np = exe.run(feed=feed, fetch_list=[output])[0]
self.assertEqual(output.shape[0], -1)
self.assertEqual(len(output.shape), 1)
self.assertEqual(output_np.shape[0], 2)
self.assertEqual(len(output_np.shape), 1)
if __name__ == "__main__":
unittest.main()
|
chengduoZH/Paddle
|
python/paddle/fluid/tests/unittests/test_sampling_id_op.py
|
Python
|
apache-2.0
| 2,626
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
UNSCOPED_TOKEN_HEADER = 'UNSCOPED_TOKEN'
UNSCOPED_TOKEN = {
"token": {
"issued_at": "2014-06-09T09:48:59.643406Z",
"extras": {},
"methods": ["token"],
"expires_at": "2014-06-09T10:48:59.643375Z",
"user": {
"OS-FEDERATION": {
"identity_provider": {
"id": "testshib"
},
"protocol": {
"id": "saml2"
},
"groups": [
{"id": "1764fa5cf69a49a4918131de5ce4af9a"}
]
},
"id": "testhib%20user",
"name": "testhib user"
}
}
}
SAML_ENCODING = "<?xml version='1.0' encoding='UTF-8'?>"
TOKEN_SAML_RESPONSE = """
<ns2:Response Destination="http://beta.example.com/Shibboleth.sso/POST/ECP"
ID="8c21de08d2f2435c9acf13e72c982846"
IssueInstant="2015-03-25T14:43:21Z"
Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:Issuer>
<ns2:Status>
<ns2:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</ns2:Status>
<saml:Assertion ID="a5f02efb0bff4044b294b4583c7dfc5d"
IssueInstant="2015-03-25T14:43:21Z" Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp</saml:Issuer>
<xmldsig:Signature>
<xmldsig:SignedInfo>
<xmldsig:CanonicalizationMethod
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<xmldsig:SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
<xmldsig:Reference URI="#a5f02efb0bff4044b294b4583c7dfc5d">
<xmldsig:Transforms>
<xmldsig:Transform
Algorithm="http://www.w3.org/2000/09/xmldsig#
enveloped-signature"/>
<xmldsig:Transform
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
</xmldsig:Transforms>
<xmldsig:DigestMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<xmldsig:DigestValue>
0KH2CxdkfzU+6eiRhTC+mbObUKI=
</xmldsig:DigestValue>
</xmldsig:Reference>
</xmldsig:SignedInfo>
<xmldsig:SignatureValue>
m2jh5gDvX/1k+4uKtbb08CHp2b9UWsLw
</xmldsig:SignatureValue>
<xmldsig:KeyInfo>
<xmldsig:X509Data>
<xmldsig:X509Certificate>...</xmldsig:X509Certificate>
</xmldsig:X509Data>
</xmldsig:KeyInfo>
</xmldsig:Signature>
<saml:Subject>
<saml:NameID>admin</saml:NameID>
<saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
NotOnOrAfter="2015-03-25T15:43:21.172385Z"
Recipient="http://beta.example.com/Shibboleth.sso/POST/ECP"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:AuthnStatement AuthnInstant="2015-03-25T14:43:21Z"
SessionIndex="9790eb729858456f8a33b7a11f0a637e"
SessionNotOnOrAfter="2015-03-25T15:43:21.172385Z">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</saml:AuthnContextClassRef>
<saml:AuthenticatingAuthority>
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:AuthenticatingAuthority>
</saml:AuthnContext>
</saml:AuthnStatement>
<saml:AttributeStatement>
<saml:Attribute Name="openstack_user"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_roles"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_project"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
</saml:AttributeStatement>
</saml:Assertion>
</ns2:Response>
"""
TOKEN_BASED_SAML = ''.join([SAML_ENCODING, TOKEN_SAML_RESPONSE])
ECP_ENVELOPE = """
<ns0:Envelope
xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
xmlns:ns2="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:xmldsig="http://www.w3.org/2000/09/xmldsig#"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ns0:Header>
<ns1:RelayState
ns0:actor="http://schemas.xmlsoap.org/soap/actor/next"
ns0:mustUnderstand="1">
ss:mem:1ddfe8b0f58341a5a840d2e8717b0737
</ns1:RelayState>
</ns0:Header>
<ns0:Body>
{0}
</ns0:Body>
</ns0:Envelope>
""".format(TOKEN_SAML_RESPONSE)
TOKEN_BASED_ECP = ''.join([SAML_ENCODING, ECP_ENVELOPE])
|
ctrlaltdel/neutrinator
|
vendor/keystoneauth1/tests/unit/k2k_fixtures.py
|
Python
|
gpl-3.0
| 5,454
| 0
|
# https://codecombat.com/play/level/if-stravaganza?
#
# Debes Comprar & Equipar:
# 1. Reloj de Pulsera Simple
# 2. Programática II
#
# ¡Derrota a los ogros desde dentro de su propio campamento!
while True:
enemy = hero.findNearestEnemy()
# Usa la sentencia if para comprobar si existe un enemigo
# Ataca al enemigo si existe:
if enemy:
hero.attack(enemy) * 2
|
inukaze/maestro
|
Solución/CodeCombat/Python/2.Bosque_Aislado/005 - If-stravaganza.py
|
Python
|
gpl-2.0
| 386
| 0.002604
|
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module implements plugins related to forensic artifacts.
https://github.com/ForensicArtifacts
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from builtins import object
from future.utils import with_metaclass
__author__ = "Michael Cohen <scudette@google.com>"
import csv
import datetime
import json
import platform
import os
import io
import sys
import zipfile
import yaml
from artifacts import definitions
from artifacts import errors
from rekall import plugin
from rekall import obj
from rekall_lib import yaml_utils
from rekall.ui import text
from rekall.ui import json_renderer
from rekall.plugins.response import common
from rekall_lib import registry
class ArtifactResult(object):
"""Bundle all the results from an artifact."""
def __init__(self, artifact_name=None, result_type=None, fields=None):
self.artifact_name = artifact_name
self.result_type = result_type
self.results = []
self.fields = fields or []
def __iter__(self):
return iter(self.results)
def add_result(self, **data):
if data:
self.results.append(data)
def merge(self, other):
self.results.extend(other)
def as_dict(self):
return dict(fields=self.fields,
results=self.results,
artifact_name=self.artifact_name,
result_type=self.result_type)
class BaseArtifactResultWriter(with_metaclass(registry.MetaclassRegistry, object)):
"""Writes the results of artifacts."""
__abstract = True
def __init__(self, session=None, copy_files=False,
create_timeline=False):
self.session = session
self.copy_files = copy_files
self.create_timeline = create_timeline
def write_result(self, result):
"""Writes the artifact result."""
def _create_timeline(self, artifact_result):
"""Create a new timeline result from the given result.
We use the output format suitable for the timesketch tool:
https://github.com/google/timesketch/wiki/UserGuideTimelineFromFile
"""
artifact_fields = artifact_result.fields
fields = [
dict(name="message", type="unicode"),
dict(name="timestamp", type="int"),
dict(name="datetime", type="unicode"),
dict(name="timestamp_desc", type="unicode"),
] + artifact_fields
new_result = ArtifactResult(
artifact_name=artifact_result.artifact_name,
result_type="timeline",
fields=fields)
for field in artifact_fields:
# This field is a timestamp - copy the entire row into the timeline.
if field["type"] == "epoch":
for row in artifact_result.results:
new_row = row.copy()
timestamp = row.get(field["name"])
if timestamp is None:
continue
new_row["timestamp"] = int(timestamp)
new_row["datetime"] = datetime.datetime.utcfromtimestamp(
timestamp).strftime("%Y-%m-%dT%H:%M:%S+00:00")
new_row["timestamp_desc"] = artifact_result.artifact_name
new_row["message"] = " ".join(
str(row[field["name"]]) for field in artifact_fields
if field["name"] in row)
new_result.add_result(**new_row)
return new_result
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
return
class DirectoryBasedWriter(BaseArtifactResultWriter):
name = "Directory"
def __init__(self, output=None, **kwargs):
super(DirectoryBasedWriter, self).__init__(**kwargs)
self.dump_dir = output
# Check if the directory already exists.
if not os.path.isdir(self.dump_dir):
raise plugin.PluginError("%s is not a directory" % self.dump_dir)
def write_file(self, result):
"""Writes a FileInformation object."""
for row in result.results:
filename = row["filename"]
with open(filename, "rb") as in_fd:
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename=filename, mode="wb") as out_fd:
while 1:
data = in_fd.read(1024*1024)
if not data:
break
out_fd.write(data)
def _write_csv_file(self, out_fd, result):
fieldnames = [x["name"] for x in result.fields]
writer = csv.DictWriter(
out_fd, dialect="excel",
fieldnames=fieldnames)
writer.writeheader()
for row in result.results:
writer.writerow(row)
def write_result(self, result):
"""Writes the artifact result."""
if self.copy_files and result.result_type == "file_information":
try:
self.write_file(result)
except (IOError, OSError) as e:
self.session.logging.warn("Unable to copy file: %s", e)
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename="artifacts/%s.json" % result.artifact_name,
mode="wb") as out_fd:
out_fd.write(json.dumps(result.as_dict(), sort_keys=True))
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename="artifacts/%s.csv" % result.artifact_name,
mode="wb") as out_fd:
self._write_csv_file(out_fd, result)
if self.create_timeline:
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename="artifacts/%s.timeline.csv" %
result.artifact_name,
mode="wb") as out_fd:
self._write_csv_file(out_fd, self._create_timeline(result))
class ZipBasedWriter(BaseArtifactResultWriter):
name = "Zip"
def __init__(self, output=None, **kwargs):
super(ZipBasedWriter, self).__init__(**kwargs)
self.output = output
def __enter__(self):
self.out_fd = self.session.GetRenderer().open(
filename=self.output, mode="wb").__enter__()
self.outzip = zipfile.ZipFile(self.out_fd, mode="w",
compression=zipfile.ZIP_DEFLATED)
return self
def __exit__(self, *args):
self.outzip.close()
self.out_fd.__exit__(*args)
def _write_csv_file(self, out_fd, result):
fieldnames = [x["name"] for x in result.fields]
writer = csv.DictWriter(
out_fd, dialect="excel",
fieldnames=fieldnames)
writer.writeheader()
for row in result.results:
writer.writerow(row)
def write_file(self, result):
for row in result.results:
filename = row["filename"]
self.outzip.write(filename)
def write_result(self, result):
"""Writes the artifact result."""
if self.copy_files and result.result_type == "file_information":
try:
self.write_file(result)
except (IOError, OSError) as e:
self.session.logging.warn(
"Unable to copy file %s into output: %s",
result["filename"], e)
self.outzip.writestr("artifacts/%s.json" % result.artifact_name,
json.dumps(result.as_dict(), sort_keys=True),
zipfile.ZIP_DEFLATED)
tmp_fd = io.StringIO()
self._write_csv_file(tmp_fd, result)
self.outzip.writestr("artifacts/%s.csv" % result.artifact_name,
tmp_fd.getvalue(),
zipfile.ZIP_DEFLATED)
if self.create_timeline:
tmp_fd = io.StringIO()
self._write_csv_file(tmp_fd, self._create_timeline(result))
self.outzip.writestr("artifacts/%s.timeline.csv" %
result.artifact_name,
tmp_fd.getvalue(),
zipfile.ZIP_DEFLATED)
# Rekall defines a new artifact type.
TYPE_INDICATOR_REKALL = "REKALL_EFILTER"
class _FieldDefinitionValidator(object):
"""Loads and validates fields in a dict.
We check their name, types and if they are optional according to a template
in _field_definitions.
"""
_field_definitions = []
def _LoadFieldDefinitions(self, data, field_definitions):
for field in field_definitions:
name = field["name"]
default = field.get("default")
required_type = field.get("type")
if required_type in (str, str):
required_type = basestring
if default is None and required_type is not None:
# basestring cant be instantiated.
if required_type is basestring:
default = ""
else:
default = required_type()
if required_type is None and default is not None:
required_type = type(default)
if not field.get("optional"):
if name not in data:
raise errors.FormatError(
u'Missing fields {}.'.format(name))
value = data.get(name, default)
if default is not None and not isinstance(value, required_type):
raise errors.FormatError(
u'field {} has type {} should be {}.'.format(
name, type(data[name]), required_type))
if field.get("checker"):
value = field["checker"](self, data)
setattr(self, name, value)
class SourceType(_FieldDefinitionValidator):
"""All sources inherit from this."""
# Common fields for all sources.
_common_fields = [
dict(name="type", optional=False),
dict(name="supported_os", optional=True, type=list,
default=list(definitions.SUPPORTED_OS)),
]
def __init__(self, source_definition, artifact=None):
attributes = source_definition["attributes"]
# The artifact that owns us.
self.artifact = artifact
self.source_definition = source_definition
self.type_indicator = source_definition["type"]
self._LoadFieldDefinitions(attributes, self._field_definitions)
self._LoadFieldDefinitions(source_definition, self._common_fields)
def is_active(self, **_):
"""Indicates if the source is applicable to the environment."""
return True
def apply(self, artifact_name=None, fields=None, result_type=None, **_):
"""Generate ArtifactResult instances."""
return ArtifactResult(artifact_name=artifact_name,
result_type=result_type,
fields=fields)
# These are the valid types of Rekall images. They can be used to restrict
# REKALL_EFILTER artifacts to specific types of images. The types which end in
# API refer to the API only version of the similar plugins.
REKALL_IMAGE_TYPES = [
"Windows", "WindowsAPI",
"Linux", "LinuxAPI",
"Darwin", "DarwinAPI"
]
class RekallEFilterArtifacts(SourceType):
"""Class to support Rekall Efilter artifact types."""
allowed_types = {
"int": int,
"unicode": str, # Unicode data.
"str": str, # Used for binary data.
"float": float,
"epoch": float, # Dates as epoch timestamps.
"any": str # Used for opaque types that can not be further processed.
}
_field_definitions = [
dict(name="query", type=basestring),
dict(name="query_parameters", default=[], optional=True),
dict(name="fields", type=list),
dict(name="type_name", type=basestring),
dict(name="image_type", type=list, optional=True,
default=REKALL_IMAGE_TYPES),
]
def __init__(self, source_definition, **kw):
super(RekallEFilterArtifacts, self).__init__(source_definition, **kw)
for column in self.fields:
if "name" not in column or "type" not in column:
raise errors.FormatError(
u"Field definition should have both name and type.")
mapped_type = column["type"]
if mapped_type not in self.allowed_types:
raise errors.FormatError(
u"Unsupported type %s." % mapped_type)
def GetImageType(self, session):
"""Returns one of the standard image types based on the session."""
result = session.profile.metadata("os").capitalize()
if session.GetParameter("live_mode") == "API":
result += "API"
return result
def is_active(self, session=None):
"""Determine if this source is active."""
return (self.image_type and
self.GetImageType(session) in self.image_type)
def apply(self, session=None, **kwargs):
result = super(RekallEFilterArtifacts, self).apply(
fields=self.fields, result_type=self.type_name, **kwargs)
if not self.is_active(session):
return
search = session.plugins.search(
query=self.query,
query_parameters=self.query_parameters)
for match in search.solve():
row = {}
for column in self.fields:
name = column["name"]
type = column["type"]
value = match.get(name)
if value is None:
continue
row[name] = RekallEFilterArtifacts.allowed_types[
type](value)
result.add_result(**row)
yield result
class LiveModeSourceMixin(object):
def is_active(self, session=None):
"""Determine if this source is active."""
# We are only active in Live mode (API or Memory).
return (session.GetParameter("live_mode") != None and
session.profile.metadata("os").capitalize() in
self.supported_os)
class FileSourceType(LiveModeSourceMixin, SourceType):
_field_definitions = [
dict(name="paths", default=[]),
dict(name="separator", default="/", type=basestring,
optional=True),
]
# These fields will be present in the ArtifactResult object we return.
_FIELDS = [
dict(name="st_mode", type="unicode"),
dict(name="st_nlink", type="int"),
dict(name="st_uid", type="unicode"),
dict(name="st_gid", type="unicode"),
dict(name="st_size", type="int"),
dict(name="st_mtime", type="epoch"),
dict(name="filename", type="unicode"),
]
def apply(self, session=None, **kwargs):
result = super(FileSourceType, self).apply(
fields=self._FIELDS, result_type="file_information", **kwargs)
for hits in session.plugins.glob(
self.paths, path_sep=self.separator,
root=self.separator).collect():
# Hits are FileInformation objects, and we just pick some of the
# important fields to report.
info = hits["path"]
row = {}
for field in self._FIELDS:
name = field["name"]
type = RekallEFilterArtifacts.allowed_types[field["type"]]
row[name] = type(getattr(info, name))
result.add_result(**row)
yield result
class ArtifactGroupSourceType(SourceType):
_field_definitions = [
dict(name="names", type=list),
dict(name="supported_os", optional=True,
default=definitions.SUPPORTED_OS),
]
def apply(self, collector=None, **_):
for name in self.names:
for result in collector.collect_artifact(name):
yield result
class WMISourceType(LiveModeSourceMixin, SourceType):
_field_definitions = [
dict(name="query", type=basestring),
dict(name="fields", type=list, optional=True, default=[]),
dict(name="type_name", type=basestring, optional=True),
dict(name="supported_os", optional=True,
default=definitions.SUPPORTED_OS),
]
fields = None
def _guess_returned_fields(self, sample):
result = []
for key, value in sample.items():
field_type = type(value)
if field_type is int:
field_type = "int"
elif field_type is str:
field_type = "unicode"
else:
field_type = "unicode"
result.append(dict(name=key, type=field_type))
return result
def apply(self, session=None, **kwargs):
result = super(WMISourceType, self).apply(
result_type=self.type_name, **kwargs)
wmi = session.plugins.wmi(query=self.query)
# The wmi plugin may not exist on non-windows systems.
if wmi == None:
return
for collected in wmi.collect():
match = collected["Result"]
row = {}
# If the user did not specify the fields, we must
# deduce them from the first returned row.
if not self.fields:
self.fields = self._guess_returned_fields(match)
result.fields = self.fields
for column in self.fields:
name = column["name"]
type = column["type"]
value = match.get(name)
if value is None:
continue
row[name] = RekallEFilterArtifacts.allowed_types[
type](value)
result.add_result(**row)
yield result
class RegistryKeySourceType(LiveModeSourceMixin, SourceType):
_field_definitions = [
dict(name="keys", default=[]),
dict(name="supported_os", optional=True,
default=["Windows"]),
]
_FIELDS = [
dict(name="st_mtime", type="epoch"),
dict(name="hive", type="unicode"),
dict(name="key_name", type="unicode"),
dict(name="value", type="str"),
dict(name="value_type", type="str"),
]
def apply(self, session=None, **kwargs):
result = super(RegistryKeySourceType, self).apply(
fields=self._FIELDS, result_type="registry_key", **kwargs)
for hits in session.plugins.glob(
self.keys, path_sep="\\", filesystem="Reg",
root="\\").collect():
# Hits are FileInformation objects, and we just pick some of the
# important fields to report.
info = hits["path"]
row = {}
for field in self._FIELDS:
name = field["name"]
field_type = RekallEFilterArtifacts.allowed_types[field["type"]]
data = info.get(name)
if data is not None:
row[name] = field_type(data)
result.add_result(**row)
yield result
class RegistryValueSourceType(LiveModeSourceMixin, SourceType):
def CheckKeyValuePairs(self, source):
key_value_pairs = source["key_value_pairs"]
for pair in key_value_pairs:
if (not isinstance(pair, dict) or "key" not in pair or
"value" not in pair):
raise errors.FormatError(
u"key_value_pairs should consist of dicts with key and "
"value items.")
return key_value_pairs
_field_definitions = [
dict(name="key_value_pairs", default=[],
checker=CheckKeyValuePairs),
dict(name="supported_os", optional=True,
default=["Windows"]),
]
_FIELDS = [
dict(name="st_mtime", type="epoch"),
dict(name="hive", type="unicode"),
dict(name="key_name", type="unicode"),
dict(name="value_name", type="unicode"),
dict(name="value_type", type="str"),
dict(name="value", type="str"),
]
def apply(self, session=None, **kwargs):
result = super(RegistryValueSourceType, self).apply(
fields=self._FIELDS, result_type="registry_value", **kwargs)
globs = [u"%s\\%s" % (x["key"], x["value"])
for x in self.key_value_pairs]
for hits in session.plugins.glob(
globs, path_sep="\\", filesystem="Reg",
root="\\").collect():
info = hits["path"]
row = {}
for field in self._FIELDS:
name = field["name"]
field_type = RekallEFilterArtifacts.allowed_types[field["type"]]
data = info.get(name)
if data is not None:
row[name] = field_type(data)
result.add_result(**row)
yield result
# This lookup table maps between source type name and concrete implementations
# that we support. Artifacts which contain sources which are not implemented
# will be ignored.
SOURCE_TYPES = {
TYPE_INDICATOR_REKALL: RekallEFilterArtifacts,
definitions.TYPE_INDICATOR_FILE: FileSourceType,
definitions.TYPE_INDICATOR_ARTIFACT_GROUP: ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: WMISourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY: RegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE: RegistryValueSourceType,
}
class ArtifactDefinition(_FieldDefinitionValidator):
"""The main artifact class."""
def CheckLabels(self, art_definition):
"""Ensure labels are defined."""
labels = art_definition.get("labels", [])
# Keep unknown labels around in case callers want to check for complete
# label coverage. In most cases it is desirable to allow users to extend
# labels but when super strict validation is required we want to make
# sure that users dont typo a label.
self.undefined_labels = set(labels).difference(definitions.LABELS)
return labels
def BuildSources(self, art_definition):
sources = art_definition["sources"]
result = []
self.unsupported_source_types = []
for source in sources:
if not isinstance(source, dict):
raise errors.FormatError("Source is not a dict.")
source_type_name = source.get("type")
if source_type_name is None:
raise errors.FormatError("Source has no type.")
source_cls = self.source_types.get(source_type_name)
if source_cls:
result.append(source_cls(source, artifact=self))
else:
self.unsupported_source_types.append(source_type_name)
if not result:
if self.unsupported_source_types:
raise errors.FormatError(
"No supported sources: %s" % (
self.unsupported_source_types,))
raise errors.FormatError("No available sources.")
return result
def SupportedOS(self, art_definition):
supported_os = art_definition.get(
"supported_os", definitions.SUPPORTED_OS)
undefined_supported_os = set(supported_os).difference(
definitions.SUPPORTED_OS)
if undefined_supported_os:
raise errors.FormatError(
u'supported operating system: {} '
u'not defined.'.format(
u', '.join(undefined_supported_os)))
return supported_os
_field_definitions = [
dict(name="name", type=basestring),
dict(name="doc", type=basestring),
dict(name="labels", default=[],
checker=CheckLabels, optional=True),
dict(name="sources", default=[],
checker=BuildSources),
dict(name="supported_os",
checker=SupportedOS, optional=True),
dict(name="conditions", default=[], optional=True),
dict(name="returned_types", default=[], optional=True),
dict(name="provides", type=list, optional=True),
dict(name="urls", type=list, optional=True)
]
name = "unknown"
source_types = SOURCE_TYPES
def __init__(self, data, source_types=None):
self.source_types = source_types or SOURCE_TYPES
self.data = data
try:
self._LoadDefinition(data)
except Exception as e:
exc_info = sys.exc_info()
raise errors.FormatError(
"Definition %s: %s" % (self.name, e))
def set_implementations(self, source_types):
return self.__class__(self.data, source_types)
def _LoadDefinition(self, data):
if not isinstance(data, dict):
raise errors.FormatError(
"Artifact definition must be a dict.")
different_keys = set(data) - definitions.TOP_LEVEL_KEYS
if different_keys:
raise errors.FormatError(u'Undefined keys: {}'.format(
different_keys))
self._LoadFieldDefinitions(data, self._field_definitions)
class ArtifactDefinitionProfileSectionLoader(obj.ProfileSectionLoader):
"""Loads artifacts from the artifact profiles."""
name = "$ARTIFACTS"
def LoadIntoProfile(self, session, profile, art_definitions):
for definition in art_definitions:
try:
profile.AddDefinition(definition)
except errors.FormatError as e:
session.logging.debug(
"Skipping Artifact %s: %s", definition.get("name"), e)
return profile
class ArtifactProfile(obj.Profile):
"""A profile containing artifact definitions."""
# This will contain the definitions.
def __init__(self, *args, **kwargs):
super(ArtifactProfile, self).__init__(*args, **kwargs)
self.definitions = []
self.definitions_by_name = {}
def AddDefinition(self, definition):
"""Add a new definition from a dict."""
self.definitions.append(definition)
self.definitions_by_name[definition["name"]] = definition
def GetDefinitionByName(self, name, source_types=None):
if source_types is None:
source_types = SOURCE_TYPES
definition = self.definitions_by_name[name]
return ArtifactDefinition(definition, source_types)
def GetDefinitions(self, source_types=None):
if source_types is None:
source_types = SOURCE_TYPES
for definition in self.definitions:
try:
yield ArtifactDefinition(definition, source_types)
except errors.FormatError:
pass
class ArtifactsCollector(plugin.TypedProfileCommand,
plugin.Command):
"""Collects artifacts."""
name = "artifact_collector"
__args = [
dict(name="artifacts", positional=True, required=True,
type="ArrayStringParser",
help="A list of artifact names to collect."),
dict(name="artifact_files", type="ArrayStringParser",
help="A list of additional yaml files to load which contain "
"artifact definitions."),
dict(name="definitions", type="ArrayStringParser",
help="An inline artifact definition in yaml format."),
dict(name="create_timeline", type="Bool", default=False,
help="Also generate a timeline file."),
dict(name="copy_files", type="Bool", default=False,
help="Copy files into the output."),
dict(name="writer", type="Choices",
choices=lambda: (
x.name for x in list(BaseArtifactResultWriter.classes.values())),
help="Writer for artifact results."),
dict(name="output_path",
help="Path suitable for dumping files."),
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="result"),
]
table_options = dict(
suppress_headers=True
)
def column_types(self):
return dict(path=common.FileInformation(filename="/etc"))
def __init__(self, *args, **kwargs):
super(ArtifactsCollector, self).__init__(*args, **kwargs)
self.artifact_profile = self.session.LoadProfile("artifacts")
extra_definitions = [
open(x).read() for x in self.plugin_args.artifact_files]
extra_definitions.extend(self.plugin_args.definitions or [])
# Make a copy of the artifact registry.
if extra_definitions:
self.artifact_profile = self.artifact_profile.copy()
for definition in extra_definitions:
for definition_data in yaml.safe_load_all(definition):
self.artifact_profile.AddDefinition(definition_data)
self.seen = set()
self.supported_os = self.get_supported_os(self.session)
if self.supported_os is None:
raise plugin.PluginError(
"Unable to determine running environment.")
# Make sure the args make sense.
if self.plugin_args.output_path is None:
if self.plugin_args.copy_files:
raise plugin.PluginError(
"Can only copy files when an output file is specified.")
if self.plugin_args.create_timeline:
raise plugin.PluginError(
"Can only create timelines when an output file "
"is specified.")
@classmethod
def get_supported_os(cls, session):
# Determine which context we are running in. If we are running in live
# mode, we use the platform to determine the supported OS, otherwise we
# determine it from the profile.
if session.GetParameter("live"):
return platform.system()
elif session.profile.metadata("os") == "linux":
return "Linux"
elif session.profile.metadata("os") == "windows":
return "Windows"
elif session.profile.metadata("os") == "darwin":
return "Darwin"
def _evaluate_conditions(self, conditions):
# TODO: Implement an expression parser for these. For now we just return
# True always.
return True
def collect_artifact(self, artifact_name):
if artifact_name in self.seen:
return
self.seen.add(artifact_name)
try:
definition = self.artifact_profile.GetDefinitionByName(
artifact_name)
except KeyError:
self.session.logging.error("Unknown artifact %s" % artifact_name)
return
# This artifact is not for us.
if self.supported_os not in definition.supported_os:
self.session.logging.debug(
"Skipping artifact %s: Supported OS: %s, but we are %s",
definition.name, definition.supported_os,
self.supported_os)
return
if not self._evaluate_conditions(definition.conditions):
return
yield dict(divider="Artifact: %s" % definition.name)
for source in definition.sources:
# This source is not for us.
if not source.is_active(session=self.session):
continue
for result in source.apply(
artifact_name=definition.name,
session=self.session,
collector=self):
if isinstance(result, dict):
yield result
else:
yield dict(result=result)
def collect(self):
# Figure out a sensible default for the output writer.
if (self.plugin_args.output_path is not None and
self.plugin_args.writer is None):
if os.path.isdir(self.plugin_args.output_path):
self.plugin_args.writer = "Directory"
else:
self.plugin_args.writer = "Zip"
if self.plugin_args.writer:
impl = BaseArtifactResultWriter.ImplementationByName(
self.plugin_args.writer)
with impl(session=self.session,
copy_files=self.plugin_args.copy_files,
create_timeline=self.plugin_args.create_timeline,
output=self.plugin_args.output_path) as writer:
for x in self._collect(writer=writer):
yield x
else:
for x in self._collect():
yield x
def _collect(self, writer=None):
for artifact_name in self.plugin_args.artifacts:
for hit in self.collect_artifact(artifact_name):
if "result" in hit and writer:
writer.write_result(hit["result"])
yield hit
class ArtifactsView(plugin.TypedProfileCommand,
plugin.Command):
name = "artifact_view"
__args = [
dict(name="artifacts", type="ArrayStringParser", positional=True,
help="A list of artifacts to display")
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="Message")
]
def collect(self):
artifact_profile = self.session.LoadProfile("artifacts")
for artifact in self.plugin_args.artifacts:
definition = artifact_profile.definitions_by_name.get(artifact)
if definition:
yield dict(divider=artifact)
yield dict(Message=yaml_utils.safe_dump(definition))
class ArtifactsList(plugin.TypedProfileCommand,
plugin.Command):
"""List details about all known artifacts."""
name = "artifact_list"
__args = [
dict(name="regex", type="RegEx",
default=".",
help="Filter the artifact name."),
dict(name="supported_os", type="ArrayStringParser", required=False,
help="If specified show for these OSs, otherwise autodetect "
"based on the current image."),
dict(name="labels", type="ArrayStringParser",
help="Filter by these labels."),
dict(name="all", type="Bool",
help="Show all artifacts."),
]
table_header = [
dict(name="Name", width=30),
dict(name="OS", width=8),
dict(name="Labels", width=20),
dict(name="Types", width=20),
dict(name="Description", width=50),
]
def collect(self):
# Empty means autodetect based on the image.
if not self.plugin_args.supported_os:
supported_os = set([
ArtifactsCollector.get_supported_os(self.session)])
else:
supported_os = set(self.plugin_args.supported_os)
for definition in self.session.LoadProfile(
"artifacts").GetDefinitions():
if (not self.plugin_args.all and
not supported_os.intersection(definition.supported_os)):
continue
# Determine the type:
types = set()
for source in definition.sources:
if self.plugin_args.all or source.is_active(
session=self.session):
types.add(source.type_indicator)
if self.plugin_args.regex.match(definition.name):
yield (definition.name, definition.supported_os,
definition.labels, sorted(types), definition.doc)
class ArtifactResult_TextObjectRenderer(text.TextObjectRenderer):
renders_type = "ArtifactResult"
def render_row(self, target, **_):
column_names = [x["name"] for x in target.fields]
table = text.TextTable(
columns=target.fields,
renderer=self.renderer,
session=self.session)
if not target.results:
return text.Cell("")
result = [
text.JoinedCell(*[text.Cell(x) for x in column_names]),
text.JoinedCell(*[text.Cell("-" * len(x)) for x in column_names])]
for row in target.results:
ordered_row = []
for column in column_names:
ordered_row.append(row.get(column))
result.append(table.get_row(*ordered_row))
result = text.StackedCell(*result)
return result
class ArtifactResult_DataExportObjectRenderer(
json_renderer.StateBasedObjectRenderer):
renders_type = "ArtifactResult"
renderers = ["DataExportRenderer"]
def GetState(self, item, **_):
return dict(artifact_name=item.artifact_name,
result_type=item.result_type,
fields=item.fields,
results=item.results)
|
google/rekall
|
rekall-core/rekall/plugins/response/forensic_artifacts.py
|
Python
|
gpl-2.0
| 37,683
| 0.001115
|
import sys
try:
from gensim.models.word2vec_inner import FAST_VERSION
print('FAST_VERSION ok ! Retrieved with value ', FAST_VERSION)
sys.exit()
except ImportError:
print('Failed... fall back to plain numpy (20-80x slower training than the above)')
sys.exit(-1)
|
summanlp/gensim
|
docker/check_fast_version.py
|
Python
|
lgpl-2.1
| 283
| 0.003534
|
# usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
from haversine import distance
from datetime import datetime
from dateutil import tz
import my_dbscan as mydb
import alert_update as au
from pymongo import MongoClient
import pandas as pd
import time
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
def _connect_mongo(host, port, username, password, db):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def Generate_data(get_col, set_col1, set_col2, time_delay, year, month, startday, endday, starthr, endhr, startmin, endmin):
id_dist = [] ; item_id_dist = []
main_curr_rank = {} ; tot_rank_curr = {}
count = 0
client = MongoClient('localhost', 27017)
db = client.maximus_db
for day in range(startday,endday+1):
for hr in range(starthr,endhr+1):
for mins in range(startmin,endmin+1,time_delay):
try:
#set_col1.drop()
#set_col2.drop()
mins_next = mins + time_delay
hr_next = hr
if time_delay + mins > 59:
mins_next = (time_delay + mins) - 60
hr_next += 1
if hr_next > 23:
hr_next = 0
day += 1
#print (hr,mins)
items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)])
utc = datetime(year,month,day,hr,mins)
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
ist = utc.astimezone(to_zone)
data = [] ; item_id = []
for item in items:
if item["unit_id"] not in item_id:
item_id.append(item["unit_id"])
data.append(item)
if item["unit_id"] not in item_id_dist:
item_id_dist.append(item["unit_id"])
id_dist.append(item)
u_id = [ids["unit_id"] for ids in id_dist]
if count > 0:
rank_curr = {} ; lat_curr = {} ; long_curr = {}
for item in item_id:
if item in u_id:
for i in range(len(id_dist)):
if item == id_dist[i]["unit_id"]:
for j in range(len(data)):
if item == data[j]["unit_id"]:
dist = distance(id_dist[i]["latitude"],data[j]["latitude"],id_dist[i]["longitude"],data[j]["longitude"])
id_dist[i]["latitude"] = data[j]["latitude"]
id_dist[i]["longitude"] = data[j]["longitude"]
rank_curr[item] = dist
lat_curr[item] = id_dist[i]["latitude"]
long_curr[item] = id_dist[i]["longitude"]
try:
tot_rank_curr[item] = dist + main_curr_rank[item]
main_curr_rank[item] = dist + main_curr_rank[item]
except Exception:
tot_rank_curr[item] = dist
main_curr_rank[item] = dist
#print (item, dist)
rank_current_sorted = sorted(rank_curr.values(), reverse=True)
tot_rank_current_sorted = sorted(tot_rank_curr.values(), reverse=True)
#rank,r_id,dist_rank = [],[],[]
for item in item_id:
if rank_curr[item] in rank_current_sorted:
set_col1.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":rank_curr[item], "unit_id":item, "rank":rank_current_sorted.index(rank_curr[item])+1,"timestamp":ist}])
set_col2.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":tot_rank_curr[item], "unit_id":item, "rank":tot_rank_current_sorted.index(tot_rank_curr[item])+1,"timestamp":ist}])
##########################################################################
# CREATING CLUSTERS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_1 = "tapola_rank_15_total"
eps = 5.0 # in KM
ride_id = None
coll_1 = db.tapola_rank_15_manual_clustering
df_1 = read_mongo("maximus_db", table_to_read_1, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
mydb.manual_DBSCAN(df_1, coll_1, eps)
print (ist)
print ("Creating cluster using manual dbscan algorithm")
##########################################################################
# CREATING ALERTS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_2 = "tapola_rank_15_manual_clustering"
df_2 = read_mongo("maximus_db", table_to_read_2, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
coll_2 = db.tapola_rank_15_manual_clus_alert
au.Generate_alert(df_2, coll_2)
print ("Generating alert and saving in the database\n")
time.sleep(1)
count += 1
except KeyError:
pass
return
|
Vijaysai005/KProject
|
vijay/DBSCAN/clustering/db/generate_data.py
|
Python
|
gpl-3.0
| 5,801
| 0.038786
|
# © 2019 ForgeFlow S.L.
# © 2019 Serpent Consulting Services Pvt. Ltd.
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class StockWarehouse(models.Model):
_inherit = "stock.warehouse"
def _default_operating_unit(self):
if self.company_id:
company = self.company_id
else:
company = self.env.company
for ou in self.env.user.operating_unit_ids:
if company == self.company_id:
self.operating_unit_id = ou
operating_unit_id = fields.Many2one(
comodel_name="operating.unit",
string="Operating Unit",
default=_default_operating_unit,
)
@api.constrains("operating_unit_id", "company_id")
def _check_company_operating_unit(self):
for rec in self:
if (
rec.operating_unit_id
and rec.company_id
and rec.operating_unit_id
and rec.company_id != rec.operating_unit_id.company_id
):
raise UserError(
_(
"Configuration error. The Company in the Stock Warehouse"
" and in the Operating Unit must be the same."
)
)
class StockWarehouseOrderPoint(models.Model):
_inherit = "stock.warehouse.orderpoint"
@api.constrains(
"warehouse_id",
"location_id",
"location_id.operating_unit_id",
"warehouse_id.operating_unit_id",
)
def _check_location(self):
for rec in self:
if (
rec.warehouse_id.operating_unit_id
and rec.warehouse_id
and rec.location_id
and rec.warehouse_id.operating_unit_id
!= rec.location_id.operating_unit_id
):
raise UserError(
_(
"Configuration Error. The Operating Unit of the "
"Warehouse and the Location must be the same. "
)
)
|
OCA/operating-unit
|
stock_operating_unit/model/stock_warehouse.py
|
Python
|
agpl-3.0
| 2,156
| 0.000464
|
"""Support for RainMachine devices."""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD,
CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL,
CONF_MONITORED_CONDITIONS, CONF_SWITCHES)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import verify_domain_control
from .config_flow import configured_instances
from .const import (
DATA_CLIENT, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DOMAIN,
PROVISION_SETTINGS, RESTRICTIONS_CURRENT, RESTRICTIONS_UNIVERSAL)
_LOGGER = logging.getLogger(__name__)
DATA_LISTENER = 'listener'
PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN)
SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN)
CONF_CONTROLLERS = 'controllers'
CONF_PROGRAM_ID = 'program_id'
CONF_SECONDS = 'seconds'
CONF_ZONE_ID = 'zone_id'
CONF_ZONE_RUN_TIME = 'zone_run_time'
DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC'
DEFAULT_ICON = 'mdi:water'
DEFAULT_ZONE_RUN = 60 * 10
TYPE_FLOW_SENSOR = 'flow_sensor'
TYPE_FLOW_SENSOR_CLICK_M3 = 'flow_sensor_clicks_cubic_meter'
TYPE_FLOW_SENSOR_CONSUMED_LITERS = 'flow_sensor_consumed_liters'
TYPE_FLOW_SENSOR_START_INDEX = 'flow_sensor_start_index'
TYPE_FLOW_SENSOR_WATERING_CLICKS = 'flow_sensor_watering_clicks'
TYPE_FREEZE = 'freeze'
TYPE_FREEZE_PROTECTION = 'freeze_protection'
TYPE_FREEZE_TEMP = 'freeze_protect_temp'
TYPE_HOT_DAYS = 'extra_water_on_hot_days'
TYPE_HOURLY = 'hourly'
TYPE_MONTH = 'month'
TYPE_RAINDELAY = 'raindelay'
TYPE_RAINSENSOR = 'rainsensor'
TYPE_WEEKDAY = 'weekday'
BINARY_SENSORS = {
TYPE_FLOW_SENSOR: ('Flow Sensor', 'mdi:water-pump'),
TYPE_FREEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FLOW_SENSOR_CLICK_M3: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks/m^3'),
TYPE_FLOW_SENSOR_CONSUMED_LITERS: (
'Flow Sensor Consumed Liters', 'mdi:water-pump', 'liter'),
TYPE_FLOW_SENSOR_START_INDEX: (
'Flow Sensor Start Index', 'mdi:water-pump', None),
TYPE_FLOW_SENSOR_WATERING_CLICKS: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks'),
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_ALTER_PROGRAM = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_ALTER_ZONE = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SERVICE_PAUSE_WATERING = vol.Schema({
vol.Required(CONF_SECONDS): cv.positive_int,
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLERS):
vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
if controller[CONF_IP_ADDRESS] in configured_instances(hass):
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data=controller))
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
from regenmaschine import login
from regenmaschine.errors import RainMachineError
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await login(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
websession,
port=config_entry.data[CONF_PORT],
ssl=config_entry.data[CONF_SSL])
rainmachine = RainMachine(
client,
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)),
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN))
await rainmachine.async_update()
except RainMachineError as err:
_LOGGER.error('An error occurred: %s', err)
raise ConfigEntryNotReady
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ('binary_sensor', 'sensor', 'switch'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]))
@_verify_domain_control
async def disable_program(call):
"""Disable a program."""
await rainmachine.client.programs.disable(
call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def disable_zone(call):
"""Disable a zone."""
await rainmachine.client.zones.disable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def enable_program(call):
"""Enable a program."""
await rainmachine.client.programs.enable(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def enable_zone(call):
"""Enable a zone."""
await rainmachine.client.zones.enable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def pause_watering(call):
"""Pause watering for a set number of seconds."""
await rainmachine.client.watering.pause_all(call.data[CONF_SECONDS])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_program(call):
"""Start a particular program."""
await rainmachine.client.programs.start(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_zone(call):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(
call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def stop_all(call):
"""Stop all watering."""
await rainmachine.client.watering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_program(call):
"""Stop a program."""
await rainmachine.client.programs.stop(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_zone(call):
"""Stop a zone."""
await rainmachine.client.zones.stop(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def unpause_watering(call):
"""Unpause watering."""
await rainmachine.client.watering.unpause_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
for service, method, schema in [
('disable_program', disable_program, SERVICE_ALTER_PROGRAM),
('disable_zone', disable_zone, SERVICE_ALTER_ZONE),
('enable_program', enable_program, SERVICE_ALTER_PROGRAM),
('enable_zone', enable_zone, SERVICE_ALTER_ZONE),
('pause_watering', pause_watering, SERVICE_PAUSE_WATERING),
('start_program', start_program, SERVICE_START_PROGRAM_SCHEMA),
('start_zone', start_zone, SERVICE_START_ZONE_SCHEMA),
('stop_all', stop_all, {}),
('stop_program', stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
('stop_zone', stop_zone, SERVICE_STOP_ZONE_SCHEMA),
('unpause_watering', unpause_watering, {}),
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(
config_entry.entry_id)
remove_listener()
for component in ('binary_sensor', 'sensor', 'switch'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
class RainMachine:
"""Define a generic RainMachine object."""
def __init__(
self, client, binary_sensor_conditions, sensor_conditions,
default_zone_runtime):
"""Initialize."""
self.binary_sensor_conditions = binary_sensor_conditions
self.client = client
self.data = {}
self.default_zone_runtime = default_zone_runtime
self.device_mac = self.client.mac
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
from regenmaschine.errors import RainMachineError
tasks = {}
if (TYPE_FLOW_SENSOR in self.binary_sensor_conditions
or any(c in self.sensor_conditions
for c in (TYPE_FLOW_SENSOR_CLICK_M3,
TYPE_FLOW_SENSOR_CONSUMED_LITERS,
TYPE_FLOW_SENSOR_START_INDEX,
TYPE_FLOW_SENSOR_WATERING_CLICKS))):
tasks[PROVISION_SETTINGS] = self.client.provisioning.settings()
if any(c in self.binary_sensor_conditions
for c in (TYPE_FREEZE, TYPE_HOURLY, TYPE_MONTH, TYPE_RAINDELAY,
TYPE_RAINSENSOR, TYPE_WEEKDAY)):
tasks[RESTRICTIONS_CURRENT] = self.client.restrictions.current()
if (any(c in self.binary_sensor_conditions
for c in (TYPE_FREEZE_PROTECTION, TYPE_HOT_DAYS))
or TYPE_FREEZE_TEMP in self.sensor_conditions):
tasks[RESTRICTIONS_UNIVERSAL] = (
self.client.restrictions.universal())
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for operation, result in zip(tasks, results):
if isinstance(result, RainMachineError):
_LOGGER.error(
'There was an error while updating %s: %s', operation,
result)
continue
self.data[operation] = result
class RainMachineEntity(Entity):
"""Define a generic RainMachine entity."""
def __init__(self, rainmachine):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._dispatcher_handlers = []
self._name = None
self.rainmachine = rainmachine
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
'identifiers': {
(DOMAIN, self.rainmachine.client.mac)
},
'name': self.rainmachine.client.name,
'manufacturer': 'RainMachine',
'model': 'Version {0} (API: {1})'.format(
self.rainmachine.client.hardware_version,
self.rainmachine.client.api_version),
'sw_version': self.rainmachine.client.software_version,
}
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
for handler in self._dispatcher_handlers:
handler()
|
DavidLP/home-assistant
|
homeassistant/components/rainmachine/__init__.py
|
Python
|
apache-2.0
| 14,628
| 0
|
import unittest
import os
import os.path
import json
# The folder holding the test data
data_path = os.path.dirname(__file__)
# Set the temporal config for testing
os.environ['TIMEVIS_CONFIG'] = os.path.join(data_path, 'config.py')
import timevis
class TestExperiment(unittest.TestCase):
def setUp(self):
self.app = timevis.app.test_client()
self.url = '/api/v2/experiment'
def test_post(self):
name = os.path.join(data_path, 'post_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.post(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data)
def test_get(self):
resp = self.app.get(self.url)
self.assertIsNotNone(resp.data)
def test_put(self):
name = os.path.join(data_path, 'put_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.put(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data)
if __name__ == '__main__':
unittest.main()
|
gaoce/TimeVis
|
tests/test_api.py
|
Python
|
mit
| 1,185
| 0.001688
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun 21. Feb 22:22:07 2016
# by: The Resource Compiler for PyQt (Qt v4.8.5)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x02\xf9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x24\x00\x00\x00\x24\x08\x06\x00\x00\x00\xe1\x00\x98\x98\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x02\x15\x16\x0a\x03\xbc\xda\x23\x1d\x00\x00\x02\x86\x49\x44\
\x41\x54\x58\xc3\xed\x96\x4d\x48\x54\x51\x18\x86\x9f\x7b\x38\x77\
\xfe\xcc\x66\xb4\xd4\x85\x84\xa4\x44\x85\x8b\x1a\xc8\xc8\x08\x04\
\xa3\xa0\xa0\x36\x31\x42\x8b\xa0\x76\x45\x8b\xa4\x55\xfb\x68\xe3\
\x6e\x5a\xb5\x09\x72\x35\x38\xe0\x46\x83\xa0\x8d\x18\xd1\xb4\x30\
\x66\xa9\x26\x39\x3a\x15\x24\x42\x51\xa6\xe1\xfd\x99\xd3\xc2\x9a\
\x4a\x67\xbc\xf7\x72\xef\x8c\x2e\x7a\x97\x87\x7b\xbe\xfb\x9e\xf7\
\x7c\xdf\xc3\xd1\x72\xf9\x19\xf5\x70\x78\x0c\x21\x43\x68\xf6\x0f\
\x34\x55\xc2\x8b\x14\x02\x25\xa3\x94\x2c\x83\x3b\xd7\x2f\x73\xea\
\xf8\x11\x0d\x1f\x92\xe9\xe1\x31\x9a\x3a\x4f\x20\xcc\x15\xfa\x3b\
\xe1\x50\xd7\x41\x4f\x05\xe6\xe6\x17\x98\x78\x07\xb6\xbe\x87\xf4\
\xf0\x38\x7e\x25\x4b\x80\xd4\x43\xa8\x75\x8b\x8e\x03\x1d\xb4\xb7\
\xb7\x7b\x2a\x60\x18\x06\xcc\x2d\x22\xf5\x10\xb6\x52\xfe\x0d\x6d\
\x5e\xc8\xe7\xf3\x64\xb3\x59\xc7\x8d\x42\x08\x52\xa9\x14\xf1\x78\
\x9c\x20\xb5\xc5\x50\x32\x99\x24\x99\x4c\xba\x2e\x50\x28\x14\x6a\
\x6b\xe8\x7f\x42\x5e\x13\xba\x71\xeb\x2e\xee\xb0\x30\x43\x18\xb8\
\x36\xf8\x40\xf9\xc1\x82\x63\x42\xb7\xef\x3f\xae\x2b\x16\xca\x86\
\x94\x90\xcc\x2f\x14\xb1\x6d\xfb\x9f\x0f\xea\x8d\x85\xb2\x21\x11\
\x6d\xe6\xc5\xfb\xaf\xa8\xc5\x4f\xdb\x6e\xa8\x75\xd3\xff\xb9\x32\
\x4d\x43\x8b\x24\x70\xe2\x7e\xad\x9b\x5e\x7a\x9d\x82\xfa\x25\x04\
\xa8\xd5\x65\x9a\x8d\x02\x4d\x4d\x89\xf2\xda\xd2\x4e\x26\xa4\x6c\
\x83\xd4\xa5\x73\x34\xee\x8d\xb3\x6e\x98\x00\xe4\x66\x47\x77\x2e\
\xa1\x8d\x56\xd2\x78\x3a\x31\xc5\xe8\xf3\x1c\x00\x2d\xad\x2d\xdb\
\x26\xf4\xb6\xb8\x5c\x95\x53\x4f\xc6\x5f\x7b\xe6\x94\xeb\x1e\xaa\
\x86\x85\x74\x66\x32\x50\x4e\xb9\x36\x54\x0d\x0b\x41\x73\xaa\xa2\
\xa1\x86\x58\x84\xd6\x7d\xf1\x5f\x91\x7a\xc3\x82\xdf\x1e\x93\xaa\
\x54\x02\xa5\x40\xdb\xf8\x95\x69\x5a\xf4\xf5\x74\xd3\xd7\xd3\x0d\
\xc0\xbd\xf4\x88\xa7\x13\xfb\x9d\x42\x79\xb6\xf7\x18\x93\x53\x6f\
\x08\xc5\x1a\x11\xe6\x2a\x23\xa3\x33\x48\x5d\xaf\xd8\xf7\x6e\xb0\
\xe0\x3b\xa1\x9b\x57\x2f\x6c\x7b\x0b\x03\x83\x43\xca\x0b\x16\x7c\
\x27\xe4\x95\xd4\x4e\x58\xf0\x9d\x10\x01\xab\xee\x09\x79\xe5\x94\
\x93\x16\x8b\x1f\x41\xe8\xfe\x0c\x55\xc2\x82\xdb\xe7\xcb\x96\x16\
\x10\x21\xb4\x58\xc2\xbd\x21\xd7\x58\x70\xc9\x29\xdf\xcf\x0f\x2f\
\x58\x08\x42\x7e\x0f\xc4\xc0\xe0\x90\x6a\x3b\x7c\xba\x2a\xa7\x00\
\x56\xbe\xaf\xa1\x2a\x3c\x5f\x2d\xd3\xe0\x73\xa4\x0b\x11\xdb\xbf\
\xc1\xb4\xd9\x57\xc1\x1e\xaf\x12\xa7\xc2\x21\x9d\x68\x24\x8c\x94\
\x5b\x7f\x35\x3d\x3d\x4d\xe6\xe5\x87\xda\x8e\xfd\x66\x4e\x5d\x39\
\xdf\xcb\xc0\xc5\x33\xae\xf7\x0b\x76\x99\x76\x9d\x21\x59\x8b\xa2\
\x7f\x73\x2a\x16\x0d\xd7\xd7\x90\x13\xa7\x7e\xf7\x95\x73\x21\x85\
\xa6\x02\x18\xfb\x47\x99\x67\x6a\x72\x6a\xb6\xcc\xa9\x36\xf9\x65\
\x13\xa7\xaa\xcb\xb2\x2c\x96\xcc\x04\x25\xbd\x01\x63\xed\x1b\xfd\
\x27\x8f\xf2\x13\x0c\xc0\x8b\x69\x94\xd1\x9d\xcc\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0f\
\x0f\x12\xef\x33\
\x00\x42\
\x00\x61\x00\x74\x00\x63\x00\x68\x00\x53\x00\x61\x00\x76\x00\x65\x00\x4c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
rjspiers/qgis-batch-save-layers
|
resources.py
|
Python
|
gpl-2.0
| 4,288
| 0.001166
|
# ----------------------------------------------------------------------------------
# Electrum plugin for the Digital Bitbox hardware wallet by Shift Devices AG
# digitalbitbox.com
#
try:
import electrum_arg as electrum
from electrum_arg.bitcoin import TYPE_ADDRESS, var_int, msg_magic, Hash, verify_message, public_key_to_p2pkh, EncodeAES, DecodeAES
from electrum_arg.i18n import _
from electrum_arg.keystore import Hardware_KeyStore
from ..hw_wallet import HW_PluginBase
from electrum_arg.util import print_error
import time
import hid
import json
import math
import hashlib
from ecdsa.ecdsa import generator_secp256k1
from ecdsa.util import sigencode_der
DIGIBOX = True
except ImportError as e:
DIGIBOX = False
# ----------------------------------------------------------------------------------
# USB HID interface
#
class DigitalBitbox_Client():
def __init__(self, hidDevice):
self.dbb_hid = hidDevice
self.opened = True
self.password = None
self.isInitialized = False
self.setupRunning = False
self.hidBufSize = 4096
def close(self):
if self.opened:
try:
self.dbb_hid.close()
except:
pass
self.opened = False
def timeout(self, cutoff):
pass
def label(self):
return " "
def is_pairable(self):
return True
def is_initialized(self):
return self.dbb_has_password()
def is_paired(self):
return self.password is not None
def get_xpub(self, bip32_path):
if self.check_device_dialog():
msg = '{"xpub":"' + bip32_path + '"}'
reply = self.hid_send_encrypt(msg)
return reply['xpub']
return None
def dbb_has_password(self):
reply = self.hid_send_plain('{"ping":""}')
if 'ping' not in reply:
raise Exception('Device communication error. Please unplug and replug your Digital Bitbox.')
if reply['ping'] == 'password':
return True
return False
def stretch_key(self, key):
import pbkdf2, hmac
return pbkdf2.PBKDF2(key, 'Digital Bitbox', iterations = 20480, macmodule = hmac, digestmodule = hashlib.sha512).read(64).encode('hex')
def backup_password_dialog(self):
msg = _("Enter the password used when the backup was created:")
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return None
if len(password) < 4:
msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:")
else:
return str(password)
def password_dialog(self, msg):
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return False
if len(password) < 4:
msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:")
else:
self.password = str(password)
return True
def check_device_dialog(self):
# Set password if fresh device
if self.password is None and not self.dbb_has_password():
if not self.setupRunning:
return False # A fresh device cannot connect to an existing wallet
msg = _("An uninitialized Digital Bitbox is detected. " \
"Enter a new password below.\r\n\r\n REMEMBER THE PASSWORD!\r\n\r\n" \
"You cannot access your coins or a backup without the password.\r\n" \
"A backup is saved automatically when generating a new wallet.")
if self.password_dialog(msg):
reply = self.hid_send_plain('{"password":"' + self.password + '"}')
else:
return False
# Get password from user if not yet set
msg = _("Enter your Digital Bitbox password:")
while self.password is None:
if not self.password_dialog(msg):
return False
reply = self.hid_send_encrypt('{"led":"blink"}')
if 'error' in reply:
self.password = None
if reply['error']['code'] == 109:
msg = _("Incorrect password entered.\r\n\r\n" \
+ reply['error']['message'] + "\r\n\r\n" \
"Enter your Digital Bitbox password:")
else:
# Should never occur
msg = _("Unexpected error occurred.\r\n\r\n" \
+ reply['error']['message'] + "\r\n\r\n" \
"Enter your Digital Bitbox password:")
# Initialize device if not yet initialized
if not self.setupRunning:
self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet
elif not self.isInitialized:
reply = self.hid_send_encrypt('{"device":"info"}')
if reply['device']['id'] <> "":
self.recover_or_erase_dialog() # Already seeded
else:
self.seed_device_dialog() # Seed if not initialized
return self.isInitialized
def recover_or_erase_dialog(self):
msg = _("The Digital Bitbox is already seeded. Choose an option:\n")
choices = [
(_("Create a wallet using the current seed")),
(_("Load a wallet from the micro SD card (the current seed is overwritten)")),
(_("Erase the Digital Bitbox"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 2:
self.dbb_erase()
elif reply == 1:
if not self.dbb_load_backup():
return
else:
pass # Use existing seed
self.isInitialized = True
def seed_device_dialog(self):
msg = _("Choose how to initialize your Digital Bitbox:\n")
choices = [
(_("Generate a new random wallet")),
(_("Load a wallet from the micro SD card"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 0:
self.dbb_generate_wallet()
else:
if not self.dbb_load_backup(show_msg=False):
return
self.isInitialized = True
def dbb_generate_wallet(self):
key = self.stretch_key(self.password)
filename = "Electrum-" + time.strftime("%Y-%m-%d-%H-%M-%S") + ".pdf"
msg = '{"seed":{"source": "create", "key": "%s", "filename": "%s", "entropy": "%s"}}' % (key, filename, 'Digital Bitbox Electrum Plugin')
reply = self.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
def dbb_erase(self):
self.handler.show_message(_("Are you sure you want to erase the Digital Bitbox?\r\n\r\n" \
"To continue, touch the Digital Bitbox's light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the light or wait for the timeout."))
hid_reply = self.hid_send_encrypt('{"reset":"__ERASE__"}')
self.handler.clear_dialog()
if 'error' in hid_reply:
raise Exception(hid_reply['error']['message'])
else:
self.password = None
raise Exception('Device erased')
def dbb_load_backup(self, show_msg=True):
backups = self.hid_send_encrypt('{"backup":"list"}')
if 'error' in backups:
raise Exception(backups['error']['message'])
try:
f = self.handler.win.query_choice(_("Choose a backup file:"), backups['backup'])
except Exception:
return False # Back button pushed
key = self.backup_password_dialog()
if key is None:
raise Exception('Canceled by user')
key = self.stretch_key(key)
if show_msg:
self.handler.show_message(_("Loading backup...\r\n\r\n" \
"To continue, touch the Digital Bitbox's light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the light or wait for the timeout."))
msg = '{"seed":{"source": "backup", "key": "%s", "filename": "%s"}}' % (key, backups['backup'][f])
hid_reply = self.hid_send_encrypt(msg)
self.handler.clear_dialog()
if 'error' in hid_reply:
raise Exception(hid_reply['error']['message'])
return True
def hid_send_plain(self, msg):
reply = ""
try:
self.dbb_hid.write('\0' + bytearray(msg) + '\0' * (self.hidBufSize - len(msg)))
r = []
while len(r) < self.hidBufSize:
r = r + self.dbb_hid.read(self.hidBufSize)
r = str(bytearray(r)).rstrip(' \t\r\n\0')
r = r.replace("\0", '')
reply = json.loads(r)
except Exception as e:
print_error('Exception caught ' + str(e))
return reply
def hid_send_encrypt(self, msg):
reply = ""
try:
secret = Hash(self.password)
msg = EncodeAES(secret, msg)
reply = self.hid_send_plain(msg)
if 'ciphertext' in reply:
reply = DecodeAES(secret, ''.join(reply["ciphertext"]))
reply = json.loads(reply)
if 'error' in reply:
self.password = None
except Exception as e:
print_error('Exception caught ' + str(e))
return reply
# ----------------------------------------------------------------------------------
#
#
class DigitalBitbox_KeyStore(Hardware_KeyStore):
hw_type = 'digitalbitbox'
device = 'DigitalBitbox'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
self.force_watching_only = False
self.maxInputs = 14 # maximum inputs per single sign command
def get_derivation(self):
return str(self.derivation)
def give_error(self, message, clear_client = False):
if clear_client:
self.client = None
raise Exception(message)
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for %s') % self.device)
def sign_message(self, sequence, message, password):
sig = None
try:
inputPath = self.get_derivation() + "/%d/%d" % sequence
inputHash = Hash(msg_magic(message)).encode('hex')
hasharray = []
hasharray.append({'hash': inputHash, 'keypath': inputPath})
hasharray = json.dumps(hasharray)
msg = '{"sign":{"meta":"sign message", "data":%s}}' % (hasharray)
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign message.")
reply = dbb_client.hid_send_encrypt(msg)
self.handler.show_message(_("Signing message ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign message.")
for i in range(4):
sig = chr(27 + i + 4) + reply['sign'][0]['sig'].decode('hex')
try:
addr = public_key_to_p2pkh(reply['sign'][0]['pubkey'].decode('hex'))
if verify_message(addr, sig, message):
break
except Exception:
continue
else:
raise Exception("Could not sign message")
except BaseException as e:
self.give_error(e)
return sig
def sign_transaction(self, tx, password):
if tx.is_complete():
return
try:
p2shTransaction = False
derivations = self.get_tx_derivations(tx)
hasharray = []
pubkeyarray = []
# Build hasharray from inputs
for i, txin in enumerate(tx.inputs()):
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
for x_pubkey in txin['x_pubkeys']:
if x_pubkey in derivations:
index = derivations.get(x_pubkey)
inputPath = "%s/%d/%d" % (self.get_derivation(), index[0], index[1])
inputHash = Hash(tx.serialize_preimage(i).decode('hex')).encode('hex')
hasharray_i = {'hash': inputHash, 'keypath': inputPath}
hasharray.append(hasharray_i)
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
# Sanity check
if p2shTransaction:
for txinput in tx.inputs():
if txinput['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
# Build pubkeyarray from outputs (unused because echo for smart verification not implemented)
if not p2shTransaction:
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
changePath = self.get_derivation() + "/%d/%d" % index
changePubkey = self.derive_pubkey(index[0], index[1])
pubkeyarray_i = {'pubkey': changePubkey, 'keypath': changePath}
pubkeyarray.append(pubkeyarray_i)
# Build sign command
dbb_signatures = []
steps = math.ceil(1.0 * len(hasharray) / self.maxInputs)
for step in range(int(steps)):
hashes = hasharray[step * self.maxInputs : (step + 1) * self.maxInputs]
msg = '{"sign": {"meta":"%s", "data":%s, "checkpub":%s} }' % \
(Hash(tx.serialize()).encode('hex'), json.dumps(hashes), json.dumps(pubkeyarray))
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign transaction.")
reply = dbb_client.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'echo' not in reply:
raise Exception("Could not sign transaction.")
if steps > 1:
self.handler.show_message(_("Signing large transaction. Please be patient ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds. " \
"(Touch " + str(step + 1) + " of " + str(int(steps)) + ")\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout.\r\n\r\n"))
else:
self.handler.show_message(_("Signing transaction ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign transaction.")
dbb_signatures.extend(reply['sign'])
# Fill signatures
if len(dbb_signatures) <> len(tx.inputs()):
raise Exception("Incorrect number of transactions signed.") # Should never occur
for i, txin in enumerate(tx.inputs()):
num = txin['num_sig']
for pubkey in txin['pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
break # txin is complete
ii = txin['pubkeys'].index(pubkey)
signed = dbb_signatures[i]
if signed['pubkey'] != pubkey:
continue
sig_r = int(signed['sig'][:64], 16)
sig_s = int(signed['sig'][64:], 16)
sig = sigencode_der(sig_r, sig_s, generator_secp256k1.order())
txin['signatures'][ii] = sig.encode('hex')
tx._inputs[i] = txin
except BaseException as e:
self.give_error(e, True)
else:
print_error("Transaction is_complete", tx.is_complete())
tx.raw = tx.serialize()
class DigitalBitboxPlugin(HW_PluginBase):
libraries_available = DIGIBOX
keystore_class = DigitalBitbox_KeyStore
client = None
DEVICE_IDS = [
(0x03eb, 0x2402) # Digital Bitbox
]
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_dbb_device(self, device):
dev = hid.device()
dev.open_path(device.path)
return dev
def create_client(self, device, handler):
self.handler = handler
client = self.get_dbb_device(device)
if client <> None:
client = DigitalBitbox_Client(client)
return client
def setup_device(self, device_info, wizard):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.setupRunning = True
client.get_xpub("m/44'/0'")
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.check_device_dialog()
xpub = client.get_xpub(derivation)
return xpub
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
if client <> None:
client.check_device_dialog()
return client
|
argentumproject/electrum-arg
|
plugins/digitalbitbox/digitalbitbox.py
|
Python
|
mit
| 20,399
| 0.008285
|
__author__ = 'dimitris'
import os
# Flask Configuration
basedir = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'knaskndfknasdfiaosifoaignaosdnfoasodfnaodgnas'
PREFERRED_URL_SCHEME = 'https'
#SqlAlchemy Configuration
DB_NAME = 'puzzles.db'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, DB_NAME)
#Cache Configuration
CACHE_TYPE = 'simple'
|
gdimitris/ChessPuzzlerBackend
|
Application/app_configuration.py
|
Python
|
mit
| 368
| 0.008152
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from setuptools import setup
from glob import glob
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
exec(open("goatools/version.py").read())
setup(
name="goatools",
version=__version__,
author='Haibao Tang',
author_email='tanghaibao@gmail.com',
packages=['goatools'],
scripts=glob('scripts/*.py'),
license='BSD',
classifiers=classifiers,
url='http://github.com/tanghaibao/goatools',
description="Python scripts to find enrichment of GO terms",
long_description=open("README.rst").read(),
install_requires=['fisher', 'xlsxwriter', 'statsmodels']
)
|
fidelram/goatools
|
setup.py
|
Python
|
bsd-2-clause
| 889
| 0
|
from StringIO import StringIO
import textwrap
import importer
def test_import_csv():
current = StringIO(textwrap.dedent('''\
status,qty,type,transaction_date,posting_date,description,amount
A,,,2016/11/02,,This is a test,$4.53
'''))
new = StringIO(textwrap.dedent('''\
"Trans Date", "Summary", "Amount"
5/2/2007, Regal Theaters, $15.99
11/2/2016, This is a test , $4.53
5/2/2007, Regal Theaters, $15.99
'''))
mapping = {
'Trans Date': 'transaction_date',
'Summary': 'description',
'Amount': 'amount'
}
importer.save_csv(current, new, mapping, '%m/%d/%Y')
lines = current.getvalue().splitlines()
assert lines[0].rstrip() == 'status,qty,type,transaction_date,posting_date,description,amount'
assert lines[1].rstrip() == 'N,2,,2007/05/02,,Regal Theaters,$15.99'
assert lines[2].rstrip() == 'A,,,2016/11/02,,This is a test,$4.53'
assert len(lines) == 3
|
kalafut/go-ledger
|
importer_test.py
|
Python
|
mit
| 1,021
| 0.001959
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
import time
from _common import ceiling
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv, fields, expression
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import psycopg2
import openerp.addons.decimal_precision as dp
from openerp.tools.float_utils import float_round, float_compare
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) != 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) != 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
uom_categ = self.pool.get('product.uom.categ')
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
categ_id = categ_id[0]
else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc)
uom_id = self.create(cr, uid, {self._rec_name: name,
'category_id': categ_id,
'factor': 1})
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] != 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Unit of Measure Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True, digits=0, # force NUMERIC with unlimited precision
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=0, # force NUMERIC with unlimited precision
fnct_inv=_factor_inv_write,
string='Bigger Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits=0, required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'factor': 1,
'uom_type': 'reference',
'factor': 1.0,
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True, rounding_method='UP'):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round, rounding_method=rounding_method)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, rounding_method='UP', context=None):
if context is None:
context = {}
if from_unit.category_id.id != to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
amount = qty/from_unit.factor
if to_unit:
amount = amount * to_unit.factor
if round:
amount = float_round(amount, precision_rounding=to_unit.rounding, rounding_method=rounding_method)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if (not from_uom_id or not price or not to_uom_id
or (to_uom_id == from_uom_id)):
return price
from_unit, to_unit = self.browse(cr, uid, [from_uom_id, to_uom_id])
if from_unit.category_id.id != to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
class product_ul(osv.osv):
_name = "product.ul"
_description = "Logistic Unit"
_columns = {
'name' : fields.char('Name', select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
'weight': fields.float('Empty Package Weight'),
}
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class produce_price_history(osv.osv):
"""
Keep track of the ``product.template`` standard prices as they are changed.
"""
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_columns = {
'company_id': fields.many2one('res.company', required=True),
'product_template_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'datetime': fields.datetime('Historization Time'),
'cost': fields.float('Historized Cost'),
}
def _get_default_company(self, cr, uid, context=None):
if 'force_company' in context:
return context['force_company']
else:
company = self.pool['res.users'].browse(cr, uid, uid,
context=context).company_id
return company.id if company else False
_defaults = {
'datetime': fields.datetime.now,
'company_id': _get_default_company,
}
#----------------------------------------------------------
# Product Attributes
#----------------------------------------------------------
class product_attribute(osv.osv):
_name = "product.attribute"
_description = "Product Attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values', copy=True),
}
class product_attribute_value(osv.osv):
_name = "product.attribute.value"
_order = 'sequence'
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, 0)
if not context.get('active_id'):
return result
for obj in self.browse(cr, uid, ids, context=context):
for price_id in obj.price_ids:
if price_id.product_tmpl_id.id == context.get('active_id'):
result[obj.id] = price_id.price_extra
break
return result
def _set_price_extra(self, cr, uid, id, name, value, args, context=None):
if context is None:
context = {}
if 'active_id' not in context:
return None
p_obj = self.pool['product.attribute.price']
p_ids = p_obj.search(cr, uid, [('value_id', '=', id), ('product_tmpl_id', '=', context['active_id'])], context=context)
if p_ids:
p_obj.write(cr, uid, p_ids, {'price_extra': value}, context=context)
else:
p_obj.create(cr, uid, {
'product_tmpl_id': context['active_id'],
'value_id': id,
'price_extra': value,
}, context=context)
def name_get(self, cr, uid, ids, context=None):
if context and not context.get('show_attribute', True):
return super(product_attribute_value, self).name_get(cr, uid, ids, context=context)
res = []
for value in self.browse(cr, uid, ids, context=context):
res.append([value.id, "%s: %s" % (value.attribute_id.name, value.name)])
return res
_columns = {
'sequence': fields.integer('Sequence', help="Determine the display order"),
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='cascade'),
'product_ids': fields.many2many('product.product', id1='att_id', id2='prod_id', string='Variants', readonly=True),
'price_extra': fields.function(_get_price_extra, type='float', string='Attribute Price Extra',
fnct_inv=_set_price_extra,
digits_compute=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute value on sale price. eg. 200 price extra, 1000 + 200 = 1200."),
'price_ids': fields.one2many('product.attribute.price', 'value_id', string='Attribute Prices', readonly=True),
}
_sql_constraints = [
('value_company_uniq', 'unique (name,attribute_id)', 'This attribute value already exists !')
]
_defaults = {
'price_extra': 0.0,
}
def unlink(self, cr, uid, ids, context=None):
ctx = dict(context or {}, active_test=False)
product_ids = self.pool['product.product'].search(cr, uid, [('attribute_value_ids', 'in', ids)], context=ctx)
if product_ids:
raise osv.except_osv(_('Integrity Error!'), _('The operation cannot be completed:\nYou trying to delete an attribute value with a reference on a product variant.'))
return super(product_attribute_value, self).unlink(cr, uid, ids, context=context)
class product_attribute_price(osv.osv):
_name = "product.attribute.price"
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'value_id': fields.many2one('product.attribute.value', 'Product Attribute Value', required=True, ondelete='cascade'),
'price_extra': fields.float('Price Extra', digits_compute=dp.get_precision('Product Price')),
}
class product_attribute_line(osv.osv):
_name = "product.attribute.line"
_rec_name = 'attribute_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='restrict'),
'value_ids': fields.many2many('product.attribute.value', id1='line_id', id2='val_id', string='Product Attribute Value'),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_inherit = ['mail.thread']
_description = "Product Template"
_order = "name"
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, False)
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders. "
"Expressed in the default unit of measure of the product.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'packaging_ids': fields.one2many(
'product.packaging', 'product_tmpl_id', 'Logistical Units',
help="Gives the different ways to package the same product. This has no impact on "
"the picking order and is mainly used if you use the EDI module."),
'seller_ids': fields.one2many('product.supplierinfo', 'product_tmpl_id', 'Supplier'),
'seller_delay': fields.related('seller_ids','delay', type='integer', string='Supplier Lead Time',
help="This is the average delay in days between the purchase order confirmation and the receipts for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.related('seller_ids','qty', type='float', string='Supplier Quantity',
help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.related('seller_ids','name', type='many2one', relation='res.partner', string='Main Supplier',
help="Main Supplier who has highest priority in Supplier List."),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'color': fields.integer('Color Index'),
'is_product_variant': fields.function( _is_product_variant, type='boolean', string='Is product variant'),
'attribute_line_ids': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product Attributes'),
'product_variant_ids': fields.one2many('product.product', 'product_tmpl_id', 'Products', required=True),
'product_variant_count': fields.function( _get_product_variant_count, type='integer', string='# of Product Variants'),
# related to display product product information if is_product_variant
'ean13': fields.related('product_variant_ids', 'ean13', type='char', string='EAN13 Barcode'),
'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference'),
}
def _price_get_list_price(self, product):
return 0.0
def _price_get(self, cr, uid, products, ptype='lst_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in products:
# standard_price field can only be seen by users in base.group_user
# Thus, in order to compute the sale price from the cost price for users not in this group
# We fetch the standard price as the superuser
if ptype != 'standard_price':
res[product.id] = product[ptype] or 0.0
else:
company_id = product.env.user.company_id.id
product = product.with_context(force_company=company_id)
res[product.id] = res[product.id] = product.sudo()[ptype]
if ptype == 'lst_price':
res[product.id] += product._name == "product.product" and product.price_extra or 0.0
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def create_variant_ids(self, cr, uid, ids, context=None):
product_obj = self.pool.get("product.product")
ctx = context and context.copy() or {}
if ctx.get("create_product_variant"):
return None
ctx.update(active_test=False, create_product_variant=True)
tmpl_ids = self.browse(cr, uid, ids, context=ctx)
for tmpl_id in tmpl_ids:
# list of values combination
variant_alone = []
all_variants = [[]]
for variant_id in tmpl_id.attribute_line_ids:
if len(variant_id.value_ids) == 1:
variant_alone.append(variant_id.value_ids[0])
temp_variants = []
for variant in all_variants:
for value_id in variant_id.value_ids:
temp_variants.append(sorted(variant + [int(value_id)]))
if temp_variants:
all_variants = temp_variants
# adding an attribute with only one value should not recreate product
# write this attribute on every product to make sure we don't lose them
for variant_id in variant_alone:
product_ids = []
for product_id in tmpl_id.product_variant_ids:
if variant_id.id not in map(int, product_id.attribute_value_ids):
product_ids.append(product_id.id)
product_obj.write(cr, uid, product_ids, {'attribute_value_ids': [(4, variant_id.id)]}, context=ctx)
# check product
variant_ids_to_active = []
variants_active_ids = []
variants_inactive = []
for product_id in tmpl_id.product_variant_ids:
variants = sorted(map(int,product_id.attribute_value_ids))
if variants in all_variants:
variants_active_ids.append(product_id.id)
all_variants.pop(all_variants.index(variants))
if not product_id.active:
variant_ids_to_active.append(product_id.id)
else:
variants_inactive.append(product_id)
if variant_ids_to_active:
product_obj.write(cr, uid, variant_ids_to_active, {'active': True}, context=ctx)
# create new product
for variant_ids in all_variants:
values = {
'product_tmpl_id': tmpl_id.id,
'attribute_value_ids': [(6, 0, variant_ids)]
}
id = product_obj.create(cr, uid, values, context=ctx)
variants_active_ids.append(id)
# unlink or inactive product
for variant_id in map(int,variants_inactive):
try:
with cr.savepoint():
product_obj.unlink(cr, uid, [variant_id], context=ctx)
except (psycopg2.Error, osv.except_osv):
product_obj.write(cr, uid, [variant_id], {'active': False}, context=ctx)
pass
return True
def create(self, cr, uid, vals, context=None):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
product_template_id = super(product_template, self).create(cr, uid, vals, context=context)
if not context or "create_product_product" not in context:
self.create_variant_ids(cr, uid, [product_template_id], context=context)
self._set_standard_price(cr, uid, product_template_id, vals.get('standard_price', 0.0), context=context)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
related_vals = {}
if vals.get('ean13'):
related_vals['ean13'] = vals['ean13']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if related_vals:
self.write(cr, uid, product_template_id, related_vals, context=context)
return product_template_id
def write(self, cr, uid, ids, vals, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if isinstance(ids, (int, long)):
ids = [ids]
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
if 'standard_price' in vals:
for prod_template_id in ids:
self._set_standard_price(cr, uid, prod_template_id, vals['standard_price'], context=context)
res = super(product_template, self).write(cr, uid, ids, vals, context=context)
if 'attribute_line_ids' in vals or vals.get('active'):
self.create_variant_ids(cr, uid, ids, context=context)
if 'active' in vals and not vals.get('active'):
ctx = context and context.copy() or {}
ctx.update(active_test=False)
product_ids = []
for product in self.browse(cr, uid, ids, context=ctx):
product_ids = map(int,product.product_variant_ids)
self.pool.get("product.product").write(cr, uid, product_ids, {'active': vals.get('active')}, context=ctx)
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
template = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (template['name'])
return super(product_template, self).copy(cr, uid, id, default=default, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'standard_price': 0.0,
'sale_ok': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff': 1.0,
'mes_type': 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
'active': True,
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id != product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
# Only use the product.product heuristics if there is a search term and the domain
# does not specify a match on `product.template` IDs.
if not name or any(term[0] == 'id' for term in (args or [])):
return super(product_template, self).name_search(
cr, user, name=name, args=args, operator=operator, context=context, limit=limit)
product_product = self.pool['product.product']
results = product_product.name_search(
cr, user, name, args, operator=operator, context=context, limit=limit)
product_ids = [p[0] for p in results]
template_ids = [p.product_tmpl_id.id
for p in product_product.browse(
cr, user, product_ids, context=context)]
# re-apply product.template order + name_get
return super(product_template, self).name_search(
cr, user, '', args=[('id', 'in', template_ids)],
operator='ilike', context=context, limit=limit)
class product_product(osv.osv):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
def _product_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ') + self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
product_uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price or product.product_tmpl_id.list_price, context['uom'])
else:
res[product.id] = product.list_price or product.product_tmpl_id.list_price
res[product.id] = res[product.id] + (0.0 if product.list_price else product.price_extra)
return res
def _set_product_lst_price(self, cr, uid, id, name, value, args, context=None):
product_uom_obj = self.pool.get('product.uom')
product = self.browse(cr, uid, id, context=context)
if 'uom' in context:
uom = product.uos_id or product.uom_id
value = product_uom_obj._compute_price(cr, uid,
context['uom'], value, uom.id)
#value = value - product.price_extra
return product.write({'list_price': value})
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name}
res = {'code': product.default_code, 'name': product.name}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + (data['name'] or '')
return res
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, True)
def _get_name_template_ids(self, cr, uid, ids, context=None):
template_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', 'in', ids)])
return list(set(template_ids))
def _get_image_variant(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.image_variant or getattr(obj.product_tmpl_id, name)
return result
def _set_image_variant(self, cr, uid, id, name, value, args, context=None):
image = tools.image_resize_image_big(value)
res = self.write(cr, uid, [id], {'image_variant': image}, context=context)
product = self.browse(cr, uid, id, context=context)
if not product.product_tmpl_id.image:
product.write({'image_variant': None})
product.product_tmpl_id.write({'image': image})
return res
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for product in self.browse(cr, uid, ids, context=context):
price_extra = 0.0
for variant_id in product.attribute_value_ids:
for price_id in variant_id.price_ids:
if price_id.product_tmpl_id.id == product.product_tmpl_id.id:
price_extra += price_id.price_extra
result[product.id] = price_extra
return result
_columns = {
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'price_extra': fields.function(_get_price_extra, type='float', string='Variant Extra Price', help="This is the sum of the extra price of all attributes", digits_compute=dp.get_precision('Product Price')),
'lst_price': fields.function(_product_lst_price, fnct_inv=_set_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price. Set to zero to disable static price list and activate the price based on attributes prices"),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True, auto_join=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', store={
'product.template': (_get_name_template_ids, ['name'], 10),
'product.product': (lambda self, cr, uid, ids, c=None: ids, [], 10),
}, select=True),
'attribute_value_ids': fields.many2many('product.attribute.value', id1='prod_id', id2='att_id', string='Attributes', readonly=True, ondelete='restrict'),
'is_product_variant': fields.function( _is_product_variant_impl, type='boolean', string='Is product variant'),
# image: all image fields are base64 encoded and PIL-supported
'image_variant': fields.binary("Variant Image",
help="This field holds the image used as image for the product variant, limited to 1024x1024px."),
'image': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Big-sized image", type="binary",
help="Image of the product variant (Big-sized image of product template if false). It is automatically "\
"resized as a 1024x1024px image, with aspect ratio preserved."),
'image_small': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Small-sized image", type="binary",
help="Image of the product variant (Small-sized image of product template if false)."),
'image_medium': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Medium-sized image", type="binary",
help="Image of the product variant (Medium-sized image of product template if false)."),
}
_defaults = {
'active': 1,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
# Check if product still exists, in case it has been unlinked by unlinking its template
if not product.exists():
continue
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
if not check_ean(product['ean13']):
return False
return True
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = context.get('display_default_code', True) and d.get('default_code',False) or False
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
if partner_id:
partner_ids = [partner_id, self.pool['res.partner'].browse(cr, user, partner_id, context=context).commercial_partner_id.id]
else:
partner_ids = []
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
for product in self.browse(cr, SUPERUSER_ID, ids, context=context):
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_ids:
sellers = filter(lambda x: x.name.id in partner_ids, product.seller_ids)
if sellers:
for s in sellers:
seller_variant = s.product_name and (
variant and "%s (%s)" % (s.product_name, variant) or s.product_name
) or False
mydict = {
'id': product.id,
'name': seller_variant or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
positive_operators = ['=', 'ilike', '=ilike', 'like', '=like']
ids = []
if operator in positive_operators:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids and operator not in expression.NEGATIVE_TERM_OPERATORS:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = self.search(cr, user, args + [('default_code', operator, name)], limit=limit, context=context)
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(ids)) if limit else False
ids += self.search(cr, user, args + [('name', operator, name), ('id', 'not in', ids)], limit=limit2, context=context)
elif not ids and operator in expression.NEGATIVE_TERM_OPERATORS:
ids = self.search(cr, user, args + ['&', ('default_code', operator, name), ('name', operator, name)], limit=limit, context=context)
if not ids and operator in positive_operators:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='lst_price', context=None):
products = self.browse(cr, uid, ids, context=context)
return self.pool.get("product.template")._price_get(cr, uid, products, ptype=ptype, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
if default is None:
default = {}
product = self.browse(cr, uid, id, context)
if context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = product.product_tmpl_id.id
elif 'name' not in default:
default['name'] = _("%s (copy)") % (product.name,)
return super(product_product, self).copy(cr, uid, id, default=default, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def open_product_template(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Template" button in product views """
product = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': product.product_tmpl_id.id,
'target': 'new'}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ctx = dict(context or {}, create_product_product=True)
return super(product_product, self).create(cr, uid, vals, context=ctx)
def need_procurement(self, cr, uid, ids, context=None):
return False
def _compute_uos_qty(self, cr, uid, ids, uom, qty, uos, context=None):
'''
Computes product's invoicing quantity in UoS from quantity in UoM.
Takes into account the
:param uom: Source unit
:param qty: Source quantity
:param uos: Target UoS unit.
'''
if not uom or not qty or not uos:
return qty
uom_obj = self.pool['product.uom']
product_id = ids[0] if isinstance(ids, (list, tuple)) else ids
product = self.browse(cr, uid, product_id, context=context)
if isinstance(uos, (int, long)):
uos = uom_obj.browse(cr, uid, uos, context=context)
if isinstance(uom, (int, long)):
uom = uom_obj.browse(cr, uid, uom, context=context)
if product.uos_id: # Product has UoS defined
# We cannot convert directly between units even if the units are of the same category
# as we need to apply the conversion coefficient which is valid only between quantities
# in product's default UoM/UoS
qty_default_uom = uom_obj._compute_qty_obj(cr, uid, uom, qty, product.uom_id) # qty in product's default UoM
qty_default_uos = qty_default_uom * product.uos_coeff
return uom_obj._compute_qty_obj(cr, uid, product.uos_id, qty_default_uos, uos)
else:
return uom_obj._compute_qty_obj(cr, uid, uom, qty, uos)
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description'),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Package Logistic Unit', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'ul_container': fields.many2one('product.ul', 'Pallet Logistic Unit'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_tmpl_id' : fields.many2one('product.template', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14, help="The EAN code of the package unit."),
'code' : fields.char('Code', help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if not check_ean(pack.ean):
return False
return True
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : 3,
'sequence' : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_tmpl_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_tmpl_id' : fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade', select=True, oldname='product_id'),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist', copy=True),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'min_qty': 0.0,
'sequence': 1,
'delay': 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
_order = 'sequence'
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description'),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if float_compare(main_currency.rounding, 10 ** -digits, precision_digits=6) == -1:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if float_compare(main_currency.rounding, 10 ** -digits, precision_digits=6) == -1:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CubicERP/odoo
|
addons/product/product.py
|
Python
|
agpl-3.0
| 67,910
| 0.006685
|
#!/usr/bin/env python
import sys
from weather import Weather
def main(args):
weather = Weather()
location = weather.lookup_by_location(args[1])
condition = location.forecast()[0]
if condition:
return condition['text'] + ' with high of ' + condition['high'] + ' and low of ' + condition['low']
else:
return "City not found. It's probably raining meatballs. Please try again."
if __name__ == '__main__':
main(sys.argv)
|
Daniel-Hoerauf/group-assistant
|
assistant/weatherTest.py
|
Python
|
gpl-3.0
| 433
| 0.027714
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.http import HttpResponseRedirect
from shuup import configuration
def toggle_all_seeing(request):
return_url = request.META["HTTP_REFERER"]
if not request.user.is_superuser:
return HttpResponseRedirect(return_url)
all_seeing_key = "is_all_seeing:%d" % request.user.pk
is_all_seeing = not configuration.get(None, all_seeing_key, False)
configuration.set(None, all_seeing_key, is_all_seeing)
return HttpResponseRedirect(return_url)
|
suutari-ai/shoop
|
shuup/front/views/misc.py
|
Python
|
agpl-3.0
| 730
| 0
|
import sys
from collections import deque
from Catalog.Schema import DBSchema
from Query.Operators.TableScan import TableScan
from Query.Operators.Select import Select
from Query.Operators.Project import Project
from Query.Operators.Union import Union
from Query.Operators.Join import Join
from Query.Operators.GroupBy import GroupBy
from Query.Operators.Sort import Sort
class Plan:
"""
A data structure implementing query plans.
Query plans are tree data structures whose nodes are objects
inheriting from the Query.Operator class.
Our Query.Plan class tracks the root of the plan tree,
and provides basic accessors such as the ability to
retrieve the relations accessed by the query, the query's
output schema, and plan pretty printing facilities.
Plan instances delegate their iterator to the root operator,
enabling direct iteration over query results.
Plan instances should use the 'prepare' method prior to
iteration (as done with Database.processQuery), to initialize
all operators contained in the plan.
"""
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
elif "root" in kwargs:
self.root = kwargs["root"]
else:
raise ValueError("No root operator specified for query plan")
def fromOther(self):
self.root = other.root
# Returns the root operator in the query plan
def root(self):
return self.root
# Returns the query result schema.
def schema(self):
return self.root.schema()
# Returns the relations used by the query.
def relations(self):
return [op.relationId() for (_,op) in self.flatten() if isinstance(op, TableScan)]
# Pre-order depth-first flattening of the query tree.
def flatten(self):
if self.root:
result = []
queue = deque([(0, self.root)])
while queue:
(depth, operator) = queue.popleft()
children = operator.inputs()
result.append((depth, operator))
if children:
queue.extendleft([(depth+1, c) for c in children])
return result
# Plan preparation and execution
# Returns a prepared plan, where every operator has filled in
# internal parameters necessary for processing data.
def prepare(self, database):
if self.root:
for (_, operator) in self.flatten():
operator.prepare(database)
return self
else:
raise ValueError("Invalid query plan")
# Iterator abstraction for query processing.
# Thus, we can use: "for page in plan: ..."
def __iter__(self):
return iter(self.root)
# Plan and statistics information.
# Returns a description for the entire query plan, based on the
# description of each individual operator.
def explain(self):
if self.root:
planDesc = []
indent = ' ' * 2
for (depth, operator) in self.flatten():
planDesc.append(indent * depth + operator.explain())
return '\n'.join(planDesc)
# Returns the cost of this query plan. Each operator should determine
# its own local cost added to the cost of its children.
def cost(self):
return self.root.cost()
# Plan I/O, e.g., for query shipping.
def pack(self):
raise NotImplementedError
def unpack(self):
raise NotImplementedError
class PlanBuilder:
"""
A query plan builder class that can be used for LINQ-like construction of queries.
A plan builder consists of an operator field, as the running root of the query tree.
Each method returns a plan builder instance, that can be used to further
operators compose with additional builder methods.
A plan builder yields a Query.Plan instance through its finalize() method.
>>> import Database
>>> db = Database.Database()
>>> db.createRelation('employee', [('id', 'int'), ('age', 'int')])
>>> schema = db.relationSchema('employee')
# Populate relation
>>> for tup in [schema.pack(schema.instantiate(i, 2*i+20)) for i in range(20)]:
... _ = db.insertTuple(schema.name, tup)
...
### SELECT * FROM Employee WHERE age < 30
>>> query1 = db.query().fromTable('employee').where("age < 30").finalize()
>>> query1.relations()
['employee']
>>> print(query1.explain()) # doctest: +ELLIPSIS
Select[...,cost=...](predicate='age < 30')
TableScan[...,cost=...](employee)
>>> [schema.unpack(tup).age for page in db.processQuery(query1) for tup in page[1]]
[20, 22, 24, 26, 28]
### SELECT eid FROM Employee WHERE age < 30
>>> query2 = db.query().fromTable('employee').where("age < 30").select({'id': ('id', 'int')}).finalize()
>>> print(query2.explain()) # doctest: +ELLIPSIS
Project[...,cost=...](projections={'id': ('id', 'int')})
Select[...,cost=...](predicate='age < 30')
TableScan[...,cost=...](employee)
>>> [query2.schema().unpack(tup).id for page in db.processQuery(query2) for tup in page[1]]
[0, 1, 2, 3, 4]
### SELECT * FROM Employee UNION ALL Employee
>>> query3 = db.query().fromTable('employee').union(db.query().fromTable('employee')).finalize()
>>> print(query3.explain()) # doctest: +ELLIPSIS
UnionAll[...,cost=...]
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> [query3.schema().unpack(tup).id for page in db.processQuery(query3) for tup in page[1]] # doctest:+ELLIPSIS
[0, 1, 2, ..., 19, 0, 1, 2, ..., 19]
### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id
>>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'})
>>> query4 = db.query().fromTable('employee').join( \
db.query().fromTable('employee'), \
rhsSchema=e2schema, \
method='block-nested-loops', expr='id == id2').finalize()
>>> print(query4.explain()) # doctest: +ELLIPSIS
BNLJoin[...,cost=...](expr='id == id2')
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> q4results = [query4.schema().unpack(tup) for page in db.processQuery(query4) for tup in page[1]]
>>> [(tup.id, tup.id2) for tup in q4results] # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
### Hash join test with the same query.
### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id
>>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'})
>>> keySchema = DBSchema('employeeKey', [('id', 'int')])
>>> keySchema2 = DBSchema('employeeKey2', [('id2', 'int')])
>>> query5 = db.query().fromTable('employee').join( \
db.query().fromTable('employee'), \
rhsSchema=e2schema, \
method='hash', \
lhsHashFn='hash(id) % 4', lhsKeySchema=keySchema, \
rhsHashFn='hash(id2) % 4', rhsKeySchema=keySchema2, \
).finalize()
>>> print(query5.explain()) # doctest: +ELLIPSIS
HashJoin[...,cost=...](lhsKeySchema=employeeKey[(id,int)],rhsKeySchema=employeeKey2[(id2,int)],lhsHashFn='hash(id) % 4',rhsHashFn='hash(id2) % 4')
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> q5results = [query5.schema().unpack(tup) for page in db.processQuery(query5) for tup in page[1]]
>>> [(tup.id, tup.id2) for tup in q5results] # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
>>> sorted([(tup.id, tup.id2) for tup in q5results]) # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
### Group by aggregate query
### SELECT id, max(age) FROM Employee GROUP BY id
>>> aggMinMaxSchema = DBSchema('minmax', [('minAge', 'int'), ('maxAge','int')])
>>> query6 = db.query().fromTable('employee').groupBy( \
groupSchema=keySchema, \
aggSchema=aggMinMaxSchema, \
groupExpr=(lambda e: e.id), \
aggExprs=[(sys.maxsize, lambda acc, e: min(acc, e.age), lambda x: x), \
(0, lambda acc, e: max(acc, e.age), lambda x: x)], \
groupHashFn=(lambda gbVal: hash(gbVal[0]) % 2) \
).finalize()
>>> print(query6.explain()) # doctest: +ELLIPSIS
GroupBy[...,cost=...](groupSchema=employeeKey[(id,int)], aggSchema=minmax[(minAge,int),(maxAge,int)])
TableScan[...,cost=...](employee)
>>> q6results = [query6.schema().unpack(tup) for page in db.processQuery(query6) for tup in page[1]]
>>> sorted([(tup.id, tup.minAge, tup.maxAge) for tup in q6results]) # doctest:+ELLIPSIS
[(0, 20, 20), (1, 22, 22), ..., (18, 56, 56), (19, 58, 58)]
### Order by query
### SELECT id FROM Employee ORDER by age
>>> query7 = db.query().fromTable('employee') \
.order(sortKeyFn=lambda x: x.age, sortKeyDesc='age') \
.select({'id': ('id', 'int')}).finalize()
>>> print(query7.explain()) # doctest: +ELLIPSIS
Project[...,cost=...](projections={'id': ('id', 'int')})
Sort[...,cost=...](sortKeyDesc='age')
TableScan[...,cost=...](employee)
"""
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
elif "operator" in kwargs:
self.operator = kwargs["operator"]
elif "db" in kwargs:
self.database = kwargs["db"]
else:
raise ValueError("No initial operator or database given for a plan builder")
def fromOther(self, other):
self.database = other.database
self.operator = other.operator
def fromTable(self, relId):
if self.database:
schema = self.database.relationSchema(relId)
return PlanBuilder(operator=TableScan(relId, schema))
def where(self, conditionExpr):
if self.operator:
return PlanBuilder(operator=Select(self.operator, conditionExpr))
else:
raise ValueError("Invalid where clause")
def select(self, projectExprs):
if self.operator:
return PlanBuilder(operator=Project(self.operator, projectExprs))
else:
raise ValueError("Invalid select list")
def join(self, rhsQuery, **kwargs):
if rhsQuery:
rhsPlan = rhsQuery.operator
else:
raise ValueError("Invalid Join RHS query")
lhsPlan = self.operator
return PlanBuilder(operator=Join(lhsPlan, rhsPlan, **kwargs))
def union(self, subQuery):
if self.operator:
return PlanBuilder(operator=Union(self.operator, subQuery.operator))
else:
raise ValueError("Invalid union clause")
def groupBy(self, **kwargs):
if self.operator:
return PlanBuilder(operator=GroupBy(self.operator, **kwargs))
else:
raise ValueError("Invalid group by operator")
def order(self, **kwargs):
if self.operator:
return PlanBuilder(operator=Sort(self.operator, **kwargs))
else:
raise ValueError("Invalid order by operator")
# Constructs a plan instance from the running plan tree.
def finalize(self):
if self.operator:
return Plan(root=self.operator)
else:
raise ValueError("Invalid query plan")
if __name__ == "__main__":
import doctest
doctest.testmod()
|
yliu120/dbsystem
|
HW2/dbsys-hw2/Query/Plan.py
|
Python
|
apache-2.0
| 10,860
| 0.01105
|
import streamcorpus as sc
import cuttsum.events
import cuttsum.corpora
from cuttsum.trecdata import SCChunkResource
from cuttsum.pipeline import ArticlesResource, DedupedArticlesResource
import os
import pandas as pd
from datetime import datetime
from collections import defaultdict
import matplotlib.pylab as plt
plt.style.use('ggplot')
pd.set_option('display.max_rows', 500)
pd.set_option('display.width', 200)
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
def format_int(x):
return locale.format("%d", x, grouping=True)
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
chunk_res = SCChunkResource()
articles_res = ArticlesResource()
ded_articles_res = DedupedArticlesResource()
data = []
event2ids = defaultdict(set)
fltr_event2ids = defaultdict(set)
for event in cuttsum.events.get_events():
corpus = cuttsum.corpora.get_raw_corpus(event)
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
if event.query_num > 25:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
#print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
num_raw_si += int(fname.split("-")[1])
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id,
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
corpus = cuttsum.corpora.FilteredTS2015()
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
fltr_event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
print hour
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
#num_raw_si += int(fname.split("-")[1])
with sc.Chunk(path=chunk, mode="rb", message=corpus.sc_msg()) as c:
for si in c:
num_raw_si += 1
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id + " (filtered)",
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
df = pd.DataFrame(data)
cols = ["raw articles", "goose articles", "deduped articles",
"deduped match articles"]
df_sum = df.groupby("event")[cols].sum()
df_sum["raw articles"] = df_sum["raw articles"].apply(format_int)
df_sum["goose articles"] = df_sum["goose articles"].apply(format_int)
df_sum["deduped articles"] = df_sum["deduped articles"].apply(format_int)
df_sum["deduped match articles"] = df_sum["deduped match articles"].apply(format_int)
print df_sum
print
coverage = []
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
isect = event2ids[event.fs_name()].intersection(fltr_event2ids[event.fs_name()])
n_isect = len(isect)
n_unfltr = max(len(event2ids[event.fs_name()]), 1)
n_fltr = max(len(fltr_event2ids[event.fs_name()]), 1)
print event.fs_name()
print n_isect, float(n_isect) / n_fltr, float(n_isect) / n_unfltr
coverage.append({
"event": event.query_id,
"intersection": n_isect,
"isect/n_2015F": float(n_isect) / n_fltr,
"isect/n_2014": float(n_isect) / n_unfltr,
})
df = pd.DataFrame(coverage)
df_u = df.mean()
df_u["event"] = "mean"
print pd.concat([df, df_u.to_frame().T]).set_index("event")
exit()
with open("article_count.tex", "w") as f:
f.write(df_sum.to_latex())
import os
if not os.path.exists("plots"):
os.makedirs("plots")
import cuttsum.judgements
ndf = cuttsum.judgements.get_merged_dataframe()
for (event, title), group in df.groupby(["event", "title"]):
matches = ndf[ndf["query id"] == event]
#fig = plt.figure()
group = group.set_index(["hour"])
#ax = group[["goose articles", "deduped articles", "deduped match articles"]].plot()
linex = epoch(group.index[10])
ax = plt.plot(group.index, group["goose articles"], label="goose")
ax = plt.plot(group.index, group["deduped articles"], label="dedupe")
ax = plt.plot(group.index, group["deduped match articles"], label="dedupe qmatch")
for nugget, ngroup in matches.groupby("nugget id"):
times = ngroup["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0])))
#ngroup = ngroup.sort("timestamp")
times.sort()
times = times.reset_index(drop=True)
if len(times) == 0: continue
plt.plot_date(
(times[0], times[0]),
(0, plt.ylim()[1]),
'--', color="black", linewidth=.5, alpha=.5)
plt.gcf().autofmt_xdate()
plt.gcf().suptitle(title)
plt.gcf().savefig(os.path.join("plots", "{}-stream.png".format(event)))
plt.close("all")
|
kedz/cuttsum
|
trec2015/sbin/reports/raw-stream-count.py
|
Python
|
apache-2.0
| 7,738
| 0.005557
|
from flask import session
from appconfig import *
class UserModel:
def __init__(self):
from models import Tag
from models import Post
from models import User
self.Tag = Tag.Tag
self.Post = Post.Post
self.User = User.User
def login(self, email, password):
user = self.User.query.filter_by(Email = email).first()
if user and user.check_password(password):
session['email'] = user.Email
session['nick'] = user.Nick
session['Id'] = user.Id
return True
return False
def register(self, email, password, nick, role, id = None):
from models import db
if id:
u = self.User.query.filter_by(Id=id).first()
u.Email = email
u.Role = role
u.set_password(password)
u.Nick = nick
subject = "You account is updated"
else:
u = self.User(nick, email, role, password)
db.session.add(u)
subject = "Account is created"
db.session.commit()
body = "<p>Hello "+nick+", </p> <p>Your login details for "+URL+" :</p> <p>Username: "+email+" <br />Password: "+password+"</p>"
self.send_email(subject, email, body, nick)
return u.Id
def list(self):
users = self.User.query.all()
if users:
return users
return False
def getUser(self, id):
user = self.User.query.filter_by(Id=id).first()
if user:
return user
return False
def send_email(self, subject, recipients, html_body, nick):
import mandrill
try:
mandrill_client = mandrill.Mandrill('ajQ8I81AVELYSYn--6xbmw')
message = {
'from_email': ADMINS[0],
'from_name': 'Blog admin',
'headers': {'Reply-To': ADMINS[0]},
'html': html_body,
'important': True,
'subject': subject,
'to': [{'email': recipients,
'name': nick,
'type': 'to'}],
}
result = mandrill_client.messages.send(message=message, async=False)
'''
[{'_id': 'abc123abc123abc123abc123abc123',
'email': 'recipient.email@example.com',
'reject_reason': 'hard-bounce',
'status': 'sent'}]
'''
except mandrill.Error, e:
# Mandrill errors are thrown as exceptions
print 'A mandrill error occurred: %s - %s' % (e.__class__, e)
# A mandrill error occurred: <class 'mandrill.UnknownSubaccountError'> - No subaccount exists with the id 'customer-123'
raise
|
goors/flask-microblog
|
UserModel.py
|
Python
|
apache-2.0
| 2,758
| 0.005801
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Simple mathematical captcha."""
from __future__ import unicode_literals
import ast
from base64 import b64encode, b64decode
import hashlib
import operator
from random import SystemRandom
import time
from django.conf import settings
TIMEDELTA = 600
# Supported operators
OPERATORS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
}
class MathCaptcha(object):
"""Simple match captcha object."""
operators = ('+', '-', '*')
operators_display = {
'+': '<i class="fa fa-plus"></i>',
'-': '<i class="fa fa-minus"></i>',
'*': '<i class="fa fa-times"></i>',
}
interval = (1, 10)
def __init__(self, question=None, timestamp=None):
if question is None:
self.question = self.generate_question()
else:
self.question = question
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
def generate_question(self):
"""Generate random question."""
generator = SystemRandom()
operation = generator.choice(self.operators)
first = generator.randint(self.interval[0], self.interval[1])
second = generator.randint(self.interval[0], self.interval[1])
# We don't want negative answers
if operation == '-':
first += self.interval[1]
return ' '.join((
str(first),
operation,
str(second)
))
@staticmethod
def from_hash(hashed):
"""Create object from hash."""
question, timestamp = unhash_question(hashed)
return MathCaptcha(question, timestamp)
@property
def hashed(self):
"""Return hashed question."""
return hash_question(self.question, self.timestamp)
def validate(self, answer):
"""Validate answer."""
return (
self.result == answer and
self.timestamp + TIMEDELTA > time.time()
)
@property
def result(self):
"""Return result."""
return eval_expr(self.question)
@property
def display(self):
"""Get unicode for display."""
parts = self.question.split()
return ' '.join((
parts[0],
self.operators_display[parts[1]],
parts[2],
))
def format_timestamp(timestamp):
"""Format timestamp in a form usable in captcha."""
return '{0:>010x}'.format(int(timestamp))
def checksum_question(question, timestamp):
"""Return checksum for a question."""
challenge = ''.join((settings.SECRET_KEY, question, timestamp))
sha = hashlib.sha1(challenge.encode('utf-8'))
return sha.hexdigest()
def hash_question(question, timestamp):
"""Hashe question so that it can be later verified."""
timestamp = format_timestamp(timestamp)
hexsha = checksum_question(question, timestamp)
return ''.join((
hexsha,
timestamp,
b64encode(question.encode('utf-8')).decode('ascii')
))
def unhash_question(question):
"""Unhashe question, verifying its content."""
if len(question) < 40:
raise ValueError('Invalid data')
hexsha = question[:40]
timestamp = question[40:50]
try:
question = b64decode(question[50:]).decode('utf-8')
except (TypeError, UnicodeError):
raise ValueError('Invalid encoding')
if hexsha != checksum_question(question, timestamp):
raise ValueError('Tampered question!')
return question, int(timestamp, 16)
def eval_expr(expr):
"""Evaluate arithmetic expression used in Captcha.
>>> eval_expr('2+6')
8
>>> eval_expr('2*6')
12
"""
return eval_node(ast.parse(expr).body[0].value)
def eval_node(node):
"""Evaluate single AST node."""
if isinstance(node, ast.Num):
# number
return node.n
elif isinstance(node, ast.operator):
# operator
return OPERATORS[type(node)]
elif isinstance(node, ast.BinOp):
# binary operation
return eval_node(node.op)(
eval_node(node.left),
eval_node(node.right)
)
else:
raise ValueError(node)
|
lem9/weblate
|
weblate/accounts/captcha.py
|
Python
|
gpl-3.0
| 4,975
| 0
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests for Help
from nose.tools import assert_true, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
def test_about():
c = make_logged_in_client(username="test", is_superuser=True)
# Test default output
response = c.get('/help/')
assert_true("Welcome to Hue!" in response.content)
# Test default to index.md
response = c.get("/help/about/")
response2 = c.get("/help/about/index.html")
assert_equal(response.content, response2.content)
# Test index at the bottom
assert_true('href="/help/desktop' in response.content)
|
hortonworks/hortonworks-sandbox
|
apps/help/src/help/tests.py
|
Python
|
apache-2.0
| 1,364
| 0.008065
|
import os
from flask import Flask, request, g
from flask_sqlalchemy import SQLAlchemy
from .decorators import json
db = SQLAlchemy()
def create_app(config_name):
""" Create the usual Flask application instance."""
app = Flask(__name__)
# Apply configuration
cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')
app.config.from_pyfile(cfg)
# initialize extensions
db.init_app(app)
# register blueprints
from .api_v1 import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
# authentication token route
from .auth import auth
from .models import User
@app.route('/api/v1', methods=['GET'])
@json
def api_index():
return {
"message": "Welcome to Maisha Goals. Register a new "
" user or login to get started"}
@app.route('/auth/register', methods=['POST'])
@json
def register_user():
u = User()
u.import_data(request.json)
db.session.add(u)
db.session.commit()
return {
'message': 'Your account has been successfuly created'
}, 201, {'Location': u.get_url()}
@app.route('/auth/login')
@auth.login_required
@json
def login_user():
return {'token': g.user.generate_auth_token()}
return app
|
andela-akhenda/maisha-goals
|
app/__init__.py
|
Python
|
mit
| 1,333
| 0
|
from com.googlecode.fascinator.api.indexer import SearchRequest
from com.googlecode.fascinator.common.solr import SolrResult
from com.googlecode.fascinator.spring import ApplicationContextProvider
from java.io import ByteArrayInputStream, ByteArrayOutputStream
class MaintenanceData:
def __init__(self):
pass
def __activate__(self, context):
self.velocityContext = context
self.response = self.velocityContext["response"]
self.maintenanceModeService = ApplicationContextProvider.getApplicationContext().getBean("maintenanceModeService")
if self.maintenanceModeService.isMaintanceMode() == False:
self.response.sendRedirect(self.velocityContext["portalPath"]+"/home")
|
the-fascinator/fascinator-portal
|
src/main/config/portal/default/default/scripts/maintenance.py
|
Python
|
gpl-2.0
| 729
| 0.005487
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for Credential cache library."""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import unittest
import os
import roster_core
from roster_server import credentials
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
class TestCredentialsLibrary(unittest.TestCase):
def setUp(self):
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
self.cred_instance = credentials.CredCache(self.config_instance,
u'sharrell')
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.core_instance = roster_core.Core(u'sharrell', self.config_instance)
def is_valid_uuid (self, uuid):
"""
TAKEN FROM THE BLUEZ MODULE
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True
def testCredentials(self):
self.assertTrue(self.cred_instance.Authenticate(u'sharrell', 'test'))
cred_string = self.cred_instance.GetCredentials(u'sharrell', 'test',
self.core_instance)
self.assertEqual(self.cred_instance.CheckCredential(cred_string,
u'sharrell',
self.core_instance),
u'')
self.assertEqual(self.cred_instance.CheckCredential(u'test', u'sharrell',
self.core_instance),
None)
if( __name__ == '__main__' ):
unittest.main()
|
stephenlienharrell/roster-dns-management
|
test/credentials_test.py
|
Python
|
bsd-3-clause
| 4,275
| 0.010526
|
# Generated by Django 2.0.1 on 2018-01-21 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0016_add_filepath'),
]
operations = [
migrations.AddField(
model_name='secondobject',
name='floating',
field=models.FloatField(default=0.0),
),
]
|
David-Wobrock/django-fake-database-backends
|
tests/test_project/test_app/migrations/0017_add_float.py
|
Python
|
mit
| 386
| 0
|
# -*- coding: utf-8 -*-
#
# CoderDojo Twin Cities Python for Minecraft documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 24 00:52:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'CoderDojo Twin Cities Python for Minecraft'
copyright = u'by multiple <a href="https://github.com/CoderDojoTC/python-minecraft/graphs/contributors">contributors</a>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CoderDojoTwinCitiesPythonforMinecraftdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft.tex', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coderdojotwincitiespythonforminecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
[u'Mike McCallister'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'CoderDojoTwinCitiesPythonforMinecraft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
CoderDojoTC/python-minecraft
|
docs/conf.py
|
Python
|
mit
| 8,611
| 0.006503
|
from django.conf.urls import patterns, url
from .views import PhotoListView
urlpatterns = patterns('',
url(r'^(?P<slug>[\w-]+)/$', PhotoListView.as_view(), name='image'),
)
|
mailfish/helena
|
gallery/urls.py
|
Python
|
apache-2.0
| 197
| 0.015228
|
# -*- coding: utf-8 -*-
"""
Default Controllers
@author: Fran Boon
"""
module = "default"
# Options Menu (available in all Functions)
#response.menu_options = [
#[T("About Sahana"), False, URL(r=request, f="about")],
#]
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
def download():
"Download a file"
return response.download(request, db)
# Add newly-registered users to Person Registry & 'Authenticated' role
auth.settings.register_onaccept = lambda form: auth.s3_register(form)
_table_user = auth.settings.table_user
_table_user.first_name.label = T("First Name")
_table_user.first_name.comment = SPAN("*", _class="req")
_table_user.last_name.label = T("Last Name")
#_table_user.last_name.comment = SPAN("*", _class="req")
_table_user.email.label = T("E-mail")
_table_user.email.comment = SPAN("*", _class="req")
_table_user.password.comment = SPAN("*", _class="req")
#_table_user.password.label = T("Password")
#_table_user.language.label = T("Language")
_table_user.language.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Language"),
T("The language you wish the site to be displayed in.")))
_table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT)
# -----------------------------------------------------------------------------
def index():
""" Main Home Page """
title = T("Sahana Eden Disaster Management Platform")
response.title = title
# Menu Boxes
#modules = deployment_settings.modules
def menu_box( title, ci, fi ):
""" Returns a menu_box linking to URL(ci, fi) """
return A( DIV(title, _class = "menu-box-r"), _class = "menu-box-l",
_href = URL( r=request, c=ci, f=fi) )
div_arrow_1 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
request.application),
_class = "div_arrow")
div_arrow_2 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
request.application),
_class = "div_arrow")
div_sit = DIV( H3(T("SITUATION")),
_class = "menu_div")
if deployment_settings.has_module("irs"):
div_sit.append(menu_box(T("Incidents"), "irs", "ireport"))
if deployment_settings.has_module("assess"):
div_sit.append(menu_box(T("Assessments"), "assess", "assess"))
div_sit.append(menu_box(T("Organizations"), "org", "organisation"))
div_dec = DIV( H3(T("DECISION")),
_class = "menu_div")
div_dec.append(menu_box(T("Map"), "gis", "index"))
if deployment_settings.has_module("assess"):
div_dec.append(menu_box(T("Gap Report"), "project", "gap_report"))
div_dec.append(menu_box(T("Gap Map"), "project", "gap_map"))
div_res = DIV(H3(T("RESPONSE")),
_class = "menu_div",
_id = "menu_div_response")
if deployment_settings.has_module("req"):
div_res.append(menu_box(T("Requests"), "req", "req"))
if deployment_settings.has_module("project"):
div_res.append(menu_box(T("Activities"), "project", "activity"))
#div_additional = DIV(A(DIV(T("Mobile Assess."),
# _class = "menu_box"
# ),
# _href = URL( r=request, c="assess", f= "mobile_basic_assess")
# ))
menu_boxes = DIV(div_sit,
div_arrow_1,
div_dec,
div_arrow_2,
div_res,
#div_additional,
)
# @ToDo: Replace this with an easily-customisable section on the homepage
#settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
#if settings:
# admin_name = settings.admin_name
# admin_email = settings.admin_email
# admin_tel = settings.admin_tel
#else:
# # db empty and prepopulate is false
# admin_name = T("Sahana Administrator").xml(),
# admin_email = "support@Not Set",
# admin_tel = T("Not Set").xml(),
# Login/Registration forms
self_registration = deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if 2 not in session.s3.roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML("%s <b>%s</b> %s" % (T("Registered users can"),
T("login"),
T("to access the system")))))
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML("%s <b>%s</b>" % (T("If you would like to help, then please"),
T("sign-up now")))))
if session.s3.debug:
validate_script = SCRIPT(_type="text/javascript",
_src=URL(r=request, c="static", f="scripts/S3/jquery.validate.js"))
else:
validate_script = SCRIPT(_type="text/javascript",
_src=URL(r=request, c="static", f="scripts/S3/jquery.validate.pack.js"))
register_div.append(validate_script)
if request.env.request_method == "POST":
post_script = """
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
"""
else:
post_script = ""
register_script = SCRIPT("""
$(document).ready(function() {
// Change register/login links to avoid page reload, make back button work.
$('#register-btn').attr('href', '#register');
$('#login-btn').attr('href', '#login');
%s
// Redirect Register Button to unhide
$('#register-btn').click(function() {
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
});
// Redirect Login Button to unhide
$('#login-btn').click(function() {
// Hide register form
$('#register_form').addClass('hide');
// Unhide login form
$('#login_form').removeClass('hide');
});
});
""" % post_script)
register_div.append(register_script)
return dict(title = title,
#modules=modules,
menu_boxes=menu_boxes,
#admin_name=admin_name,
#admin_email=admin_email,
#admin_tel=admin_tel,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user():
"Auth functions based on arg. See gluon/tools.py"
auth.settings.on_failed_authorization = URL(r=request, f="error")
if request.args and request.args(0) == "login_next":
# Can redirect the user to another page on first login for workflow (set in 00_settings.py)
# Note the timestamp of last login through the browser
if auth.is_logged_in():
db(db.auth_user.id == auth.user.id).update(timestmp = request.utcnow)
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
login_form = register_form = None
if request.args and request.args(0) == "login":
auth.messages.submit_button = T("Login")
form = auth()
login_form = form
elif request.args and request.args(0) == "register":
auth.messages.submit_button = T("Register")
form = auth()
register_form = form
else:
form = auth()
if request.args and request.args(0) == "profile" and deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
self_registration = deployment_settings.get_security_self_registration()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
# -------------------------------------------------------------------------
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# -------------------------------------------------------------------------
# About Sahana
def apath(path=""):
"Application path"
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software
depedencies and versions available to this instance
of Sahana Eden.
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
try:
sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
except:
sqlite_version = T("Not installed or incorrectly configured.")
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Not installed or incorrectly configured.")
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
pgsql_version = string.split(pgsql_reply)[2]
except:
pgsql_version = T("Not installed or incorrectly configured.")
try:
import MySQLdb
pymysql_version = MySQLdb.__revision__
except:
pymysql_version = T("Not installed or incorrectly configured.")
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
pgsql_version=pgsql_version,
pymysql_version=pymysql_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
"Custom View"
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and deployment_settings.get_options_support_requests():
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
# Only Admins should be able to update ticket status
if not auth.s3_has_role(1):
table.status.writable = False
table.actions.writable = False
if r.interactive and r.method == "create":
table.status.readable = False
table.actions.readable = False
return True
response.s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
else:
# Default: Simple Custom View
response.title = T("Contact us")
return dict()
# END -------------------------------------------------------------------------
|
sinsai/Sahana_eden
|
controllers/default.py
|
Python
|
mit
| 13,802
| 0.007245
|
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from datetime import datetime
class StockScheduleWizard(models.TransientModel):
_name = "stock.schedule.wizard"
scheduled_date = fields.Datetime('Scheduled shipping date')
@api.multi
def action_button_schedule(self):
if self.scheduled_date:
date_now = str(datetime.now())
difference = datetime.strptime(date_now, '%Y-%m-%d %H:%M:%S.%f') - \
datetime.strptime(self.scheduled_date, '%Y-%m-%d %H:%M:%S')
difference = difference.total_seconds() / float(60)
if difference > 0:
raise ValidationError(_("Scheduled date must be bigger than current date"))
picking = self.env['stock.picking'].browse(self.env.context['parent_obj'])
cron_id = self.env['queue.job'].search([('model_name','=','stock.picking'),('state','=','pending'),('record_ids','like',picking.id), ('method_name','=','make_picking_sync')])
if cron_id:
if len(cron_id) > 1:
cron_id = cron_id[0]
if self.scheduled_date > cron_id.eta:
cron_id.unlink()
picking.sale_id.scheduled_date = self.scheduled_date
picking.not_sync = True
picking._process_picking_scheduled_time()
|
Comunitea/CMNT_004_15
|
project-addons/scheduled_shipment/wizard/schedule_wizard.py
|
Python
|
agpl-3.0
| 1,376
| 0.011628
|
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import Quartz
from AppKit import NSEvent, NSScreen
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
|
Sebelino/PyUserInput
|
pymouse/mac.py
|
Python
|
lgpl-3.0
| 5,547
| 0.003966
|
# Copyright (C)
#
# Author :
from GIC.Channels.GenericChannel import *
class ChannelTest (GenericChannel):
# mandatory fields to work on LibreGeoSocial search engine
MANDATORY_FIELDS = ["latitude", "longitude", "radius", "category"]
CATEGORIES = [{"id" : "0", "name" : "all", "desc" : "All supported categories "},
{"id" : "1", "name" : "category1", "desc" : "Category for..."},
]
def __init__ (self):
self.options = {}
def get_categories(self):
return self.CATEGORIES
def get_info(self):
return "Channel description"
def set_options(self, options):
"""
Fill self.options with the received dictionary
regarding mandatory and optional fields of your channel
"""
return True, ""
def process (self):
"""
Make the search and return the nodes
"""
|
kgblll/libresoft-gymkhana
|
libs/ChannelTemplate.py
|
Python
|
gpl-2.0
| 822
| 0.06691
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dataset', '0009_remove_selection_model'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(default=b'', max_length=150),
),
]
|
michaelbrooks/uw-message-coding
|
message_coding/apps/dataset/migrations/0010_auto_20150619_2106.py
|
Python
|
mit
| 419
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "1.0.0b3"
|
Azure/azure-sdk-for-python
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/_version.py
|
Python
|
mit
| 488
| 0.004098
|
import copy
import glob
import fnmatch
import os
import logging
import yaml
log = logging.getLogger("subiquitycore.netplan")
def _sanitize_inteface_config(iface_config):
for ap, ap_config in iface_config.get('access-points', {}).items():
if 'password' in ap_config:
ap_config['password'] = '<REDACTED>'
def sanitize_interface_config(iface_config):
iface_config = copy.deepcopy(iface_config)
_sanitize_inteface_config(iface_config)
return iface_config
def sanitize_config(config):
"""Return a copy of config with passwords redacted."""
config = copy.deepcopy(config)
interfaces = config.get('network', {}).get('wifis', {}).items()
for iface, iface_config in interfaces:
_sanitize_inteface_config(iface_config)
return config
class Config:
"""A NetplanConfig represents the network config for a system.
Call parse_netplan_config() with each piece of yaml config, and then
call config_for_device to get the config that matches a particular
network device, if any.
"""
def __init__(self):
self.physical_devices = []
self.virtual_devices = []
self.config = {}
def parse_netplan_config(self, config):
try:
self.config = config = yaml.safe_load(config)
except yaml.ReaderError as e:
log.info("could not parse config: %s", e)
return
network = config.get('network')
if network is None:
log.info("no 'network' key in config")
return
version = network.get("version")
if version != 2:
log.info("network has no/unexpected version %s", version)
return
for phys_key in 'ethernets', 'wifis':
for dev, dev_config in network.get(phys_key, {}).items():
self.physical_devices.append(_PhysicalDevice(dev, dev_config))
for virt_key in 'bonds', 'vlans':
for dev, dev_config in network.get(virt_key, {}).items():
self.virtual_devices.append(_VirtualDevice(dev, dev_config))
def config_for_device(self, link):
if link.is_virtual:
for dev in self.virtual_devices:
if dev.name == link.name:
return copy.deepcopy(dev.config)
else:
allowed_matches = ('macaddress',)
match_key = 'match'
for dev in self.physical_devices:
if dev.matches_link(link):
config = copy.deepcopy(dev.config)
if match_key in config:
match = {k: v for k, v in config[match_key].items()
if k in allowed_matches}
if match:
config[match_key] = match
else:
del config[match_key]
return config
return {}
def load_from_root(self, root):
for path in configs_in_root(root):
try:
fp = open(path)
except OSError:
log.exception("opening %s failed", path)
with fp:
self.parse_netplan_config(fp.read())
class _PhysicalDevice:
def __init__(self, name, config):
match = config.get('match')
if match is None:
self.match_name = name
self.match_mac = None
self.match_driver = None
else:
self.match_name = match.get('name')
self.match_mac = match.get('macaddress')
self.match_driver = match.get('driver')
self.config = config
log.debug(
"config for %s = %s" % (
name, sanitize_interface_config(self.config)))
def matches_link(self, link):
if self.match_name is not None:
matches_name = fnmatch.fnmatch(link.name, self.match_name)
else:
matches_name = True
if self.match_mac is not None:
matches_mac = self.match_mac == link.hwaddr
else:
matches_mac = True
if self.match_driver is not None:
matches_driver = self.match_driver == link.driver
else:
matches_driver = True
return matches_name and matches_mac and matches_driver
class _VirtualDevice:
def __init__(self, name, config):
self.name = name
self.config = config
log.debug(
"config for %s = %s" % (
name, sanitize_interface_config(self.config)))
def configs_in_root(root, masked=False):
"""Return a list of all netplan configs under root.
The list is ordered in increasing precedence.
@param masked: if True, include config paths that are masked
by the same basename in a different directory."""
if not os.path.isabs(root):
root = os.path.abspath(root)
wildcard = "*.yaml"
dirs = {"lib": "0", "etc": "1", "run": "2"}
rootlen = len(root)
paths = []
for d in dirs:
paths.extend(glob.glob(os.path.join(root, d, "netplan", wildcard)))
def mykey(path):
"""returned key is basename + string-precidence based on dir."""
bname = os.path.basename(path)
bdir = path[rootlen + 1]
bdir = bdir[:bdir.find(os.path.sep)]
return "%s/%s" % (bname, bdir)
if not masked:
paths = {os.path.basename(p): p for p in paths}.values()
return sorted(paths, key=mykey)
|
CanonicalLtd/subiquity
|
subiquitycore/netplan.py
|
Python
|
agpl-3.0
| 5,471
| 0
|
from __future__ import print_function
#from collections import defaultdict
#from functools import reduce
from six import iteritems, string_types, PY2
#from six.moves import zip, range
import numpy as np
from numpy import (array, unique, arange, searchsorted,
setdiff1d, intersect1d, asarray)
from numpy.linalg import norm
import scipy
from pyNastran.utils import integer_types
from pyNastran.bdf.bdf import BDF
def bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None,
size=8, is_double=False,
remove_collapsed_elements=False,
avoid_collapsed_elements=False,
crash_on_collapse=False, log=None, debug=True):
"""
Equivalences nodes; keeps the lower node id; creates two nodes with the same
Parameters
----------
bdf_filename : str / BDF
str : bdf file path
BDF : a BDF model that is fully valid (see xref)
bdf_filename_out : str
a bdf_filename to write
tol : float
the spherical tolerance
renumber_nodes : bool
should the nodes be renumbered (default=False)
neq_max : int
the number of "close" points (default=4)
xref bool: bool
does the model need to be cross_referenced
(default=True; only applies to model option)
node_set : List[int] / (n, ) ndarray
the list/array of nodes to consider (not supported with renumber_nodes=True)
size : int; {8, 16}; default=8
the bdf write precision
is_double : bool; default=False
the field precision to write
crash_on_collapse : bool; default=False
stop if nodes have been collapsed
False: blindly move on
True: rereads the BDF which catches doubled nodes (temporary);
in the future collapse=True won't need to double read;
an alternative is to do Patran's method of avoiding collapse)
remove_collapsed_elements : bool; default=False (unsupported)
True : 1D/2D/3D elements will not be collapsed;
CELASx/CDAMP/MPC/etc. are not considered
False : no elements will be removed
avoid_collapsed_elements : bool; default=False (unsupported)
True : only collapses that don't break 1D/2D/3D elements will be considered;
CELASx/CDAMP/MPC/etc. are considered
False : element can be collapsed
debug : bool
bdf debugging
log : logger(); default=None
bdf logging
Returns
-------
model : BDF()
The BDF model corresponding to bdf_filename_out
.. warning:: I doubt SPOINTs/EPOINTs work correctly
.. warning:: xref not fully implemented (assumes cid=0)
.. todo:: node_set stil does work on the all the nodes in the big
kdtree loop, which is very inefficient
.. todo:: remove_collapsed_elements is not supported
.. todo:: avoid_collapsed_elements is not supported
"""
if not isinstance(tol, float):
tol = float(tol)
nodes_xyz, model, nids, inew = _eq_nodes_setup(
bdf_filename, tol, renumber_nodes=renumber_nodes,
xref=xref, node_set=node_set, debug=debug)
ieq, slots = _eq_nodes_build_tree(nodes_xyz, nids, tol,
inew=inew, node_set=node_set,
neq_max=neq_max)[1:]
nid_pairs = _eq_nodes_find_pairs(nids, slots, ieq, node_set=node_set)
_eq_nodes_final(nid_pairs, model, tol, node_set=node_set)
if bdf_filename_out is not None:
model.write_bdf(bdf_filename_out, size=size, is_double=is_double)
if crash_on_collapse:
# lazy way to make sure there aren't any collapsed nodes
model2 = BDF(log=log, debug=debug)
model2.read_bdf(bdf_filename_out)
return model
def _eq_nodes_setup(bdf_filename, tol,
renumber_nodes=False, xref=True,
node_set=None, debug=True):
"""helper function for `bdf_equivalence_nodes`"""
if node_set is not None:
if renumber_nodes:
raise NotImplementedError('node_set is not None & renumber_nodes=True')
#print(type(node_set))
#print('*node_set', node_set)
assert len(node_set) > 0, node_set
if isinstance(node_set, set):
node_set = asarray(list(node_set), dtype='int32')
else:
node_set = asarray(node_set, dtype='int32')
if isinstance(bdf_filename, string_types):
xref = True
model = BDF(debug=debug)
model.read_bdf(bdf_filename, xref=True)
else:
model = bdf_filename
model.cross_reference(xref=xref)
coord_ids = model.coord_ids
needs_get_position = True if coord_ids == [0] else False
# quads / tris
#nids_quads = []
#eids_quads = []
#nids_tris = []
#eids_tris = []
# map the node ids to the slot in the nids array
renumber_nodes = False
inode = 0
nid_map = {}
if node_set is not None:
if PY2:
all_nids = array(model.nodes.keys(), dtype='int32')
else:
all_nids = array(list(model.nodes.keys()), dtype='int32')
# B - A
# these are all the nodes that are requested from node_set that are missing
# thus len(diff_nodes) == 0
diff_nodes = setdiff1d(node_set, all_nids)
if len(diff_nodes) != 0:
msg = ('The following nodes cannot be found, but are included'
' in the reduced set; nids=%s' % diff_nodes)
raise RuntimeError(msg)
# A & B
# the nodes to analyze are the union of all the nodes and the desired set
# which is basically the same as:
# nids = unique(node_set)
nids = intersect1d(all_nids, node_set, assume_unique=True) # the new values
if renumber_nodes:
raise NotImplementedError('node_set is not None & renumber_nodes=True')
else:
for nid in all_nids:
nid_map[inode] = nid
inode += 1
#nids = array([node.nid for nid, node in sorted(iteritems(model.nodes))
#if nid in node_set], dtype='int32')
else:
if renumber_nodes:
for nid, node in sorted(iteritems(model.nodes)):
node.nid = inode + 1
nid_map[inode] = nid
inode += 1
nnodes = len(model.nodes)
nids = arange(1, inode + 1, dtype='int32')
assert nids[-1] == nnodes
else:
for nid, node in sorted(iteritems(model.nodes)):
nid_map[inode] = nid
inode += 1
nids = array([node.nid for nid, node in sorted(iteritems(model.nodes))], dtype='int32')
all_nids = nids
if needs_get_position:
nodes_xyz = array([model.nodes[nid].get_position()
for nid in nids], dtype='float32')
else:
nodes_xyz = array([model.nodes[nid].xyz
for nid in nids], dtype='float32')
if node_set is not None:
assert nodes_xyz.shape[0] == len(nids)
if 0:
# I forget entirely what this block of code is for, but my general
# recollection was that it checked that all the nodes that were
# referenced were included in the nids list. I'd rather break that
# check in order to support nodes_set.
#
# It's also possible that it's here, so you only consider nodes that
# are associated...
# there is some set of points that are used on the elements that
# will be considered.
#
# Presumably this is enough to capture all the node ids and NOT
# spoints, but I doubt it...
spoint_epoint_nid_set = set([])
for eid, element in sorted(iteritems(model.elements)):
spoint_epoint_nid_set.update(element.node_ids)
for eid, element in sorted(iteritems(model.masses)):
spoint_epoint_nid_set.update(element.node_ids)
if model.spoints and model.epoints:
nids_new = spoint_epoint_nid_set - model.spoints.points - model.epoints.points
elif model.spoints:
nids_new = spoint_epoint_nid_set - model.spoints.points
elif model.epoints:
nids_new = spoint_epoint_nid_set - model.epoints.points
else:
nids_new = spoint_epoint_nid_set
if None in nids_new:
nids_new.remove(None)
# autosorts the data
nids_new = unique(list(nids_new))
assert isinstance(nids_new[0], integer_types), type(nids_new[0])
missing_nids = list(set(nids_new) - set(all_nids))
if missing_nids:
missing_nids.sort()
msg = 'There are missing nodes...\n' # TODO: in what???
msg = 'missing nids=%s' % str(missing_nids)
raise RuntimeError(msg)
# get the node_id mapping for the kdtree
inew = searchsorted(nids, nids_new, side='left')
# print('nids_new =', nids_new)
else:
inew = slice(None)
#assert np.array_equal(nids[inew], nids_new), 'some nodes are not defined'
return nodes_xyz, model, nids, inew
def _eq_nodes_find_pairs(nids, slots, ieq, node_set=None):
"""helper function for `bdf_equivalence_nodes`"""
irows, icols = slots
#replacer = unique(ieq[slots]) ## TODO: turn this back on?
#skip_nodes = []
nid_pairs = []
for (irow, icol) in zip(irows, icols):
inid2 = ieq[irow, icol]
nid1 = nids[irow]
nid2 = nids[inid2]
if nid1 == nid2:
continue
if node_set is not None:
if nid1 not in node_set and nid2 not in node_set:
continue
nid_pairs.append((nid1, nid2))
return nid_pairs
def _eq_nodes_final(nid_pairs, model, tol, node_set=None):
"""apply nodal equivalencing to model"""
for (nid1, nid2) in nid_pairs:
node1 = model.nodes[nid1]
node2 = model.nodes[nid2]
# TODO: doesn't use get position...
distance = norm(node1.xyz - node2.xyz)
#print(' irow=%s->n1=%s icol=%s->n2=%s' % (irow, nid1, icol, nid2))
if distance > tol:
#print(' *n1=%-4s xyz=%s\n *n2=%-4s xyz=%s\n *distance=%s\n' % (
# nid1, list_print(node1.xyz),
# nid2, list_print(node2.xyz),
# distance))
continue
if node_set is not None:
assert nid1 in node_set, 'nid1=%s node_set=%s' % (nid1, node_set)
assert nid2 in node_set, 'nid2=%s node_set=%s' % (nid2, node_set)
#print(' n1=%-4s xyz=%s\n n2=%-4s xyz=%s\n distance=%s\n' % (
#nid1, str(node1.xyz),
#nid2, str(node2.xyz),
#distance))
# if hasattr(node2, 'new_node_id'):
# else:
node2.nid = node1.nid
node2.xyz = node1.xyz
node2.cp = node1.cp
assert node2.cd == node1.cd
assert node2.ps == node1.ps
assert node2.seid == node1.seid
# node2.new_nid = node1.nid
#skip_nodes.append(nid2)
return
def _eq_nodes_build_tree(nodes_xyz, nids, tol, inew=None, node_set=None, neq_max=4):
"""helper function for `bdf_equivalence_nodes`"""
assert isinstance(tol, float), 'tol=%r' % tol
kdt = _get_tree(nodes_xyz)
# check the closest 10 nodes for equality
deq, ieq = kdt.query(nodes_xyz[inew, :], k=neq_max, distance_upper_bound=tol)
if node_set is not None:
assert len(deq) == len(nids)
nnodes = len(nids)
# get the ids of the duplicate nodes
slots = np.where(ieq[:, :] < nnodes)
return kdt, ieq, slots
def _get_tree(nodes_xyz):
"""gets the kdtree"""
assert isinstance(nodes_xyz, np.ndarray), type(nodes_xyz)
# build the kdtree
if scipy.__version__ < '0.18.1':
try:
kdt = scipy.spatial.KDTree(nodes_xyz)
except RuntimeError:
print(nodes_xyz)
raise RuntimeError(nodes_xyz)
else:
try:
kdt = scipy.spatial.cKDTree(nodes_xyz)
except RuntimeError:
print(nodes_xyz)
raise RuntimeError(nodes_xyz)
return kdt
|
saullocastro/pyNastran
|
pyNastran/bdf/mesh_utils/bdf_equivalence.py
|
Python
|
lgpl-3.0
| 12,362
| 0.003074
|
#
# THIS FILE WAS AUTOGENERATED BY makeSip6.py
# Do not edit this file manually. All changes will be lost.
#
"""
# TOP2049 Open Source programming suite
#
# Microchip PIC24f08kl201 SIP6
#
# Copyright (c) 2014 Pavel Stemberk <stemberk@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from .microchip16_common import *
from .configWords import klx0x_fuseDesc
class Chip_Pic24f08kl201sip6(Chip_Microchip16_common):
voltageVDD = 3.3
voltageVPP = 8
logicalFlashProgramMemorySize = 0x800000
logicalFlashConfigurationMemorySize = 0x800000
hasEEPROM = False
def __init__(self):
Chip_Microchip16_common.__init__(self,
chipPackage = "DIP10",
chipPinVCC = 9,
chipPinsVPP = 10,
chipPinGND = 8,
signature=b"\x06\x4b",
# flashPageSize (in number of 24bit words)
flashPageSize=0x15FE // 2 + 2,
# flashPageSize=0x40,
flashPages=1,
# eepromPageSize (in 16bit words)
eepromPageSize=0,
eepromPages=0,
# all 7 words uses lowest byte only
fuseBytes=2 * 9
)
self.configWordAddr = 0xF80000
# self.osccalBackupAddr = self.userIDLocationAddr + self.userIDLocationSize
fuseDesc = klx0x_fuseDesc
ChipDescription(
Chip_Pic24f08kl201sip6,
bitfile = "microchip16sip6",
chipID="pic24f08kl201sip6",
runtimeID = (0xDF05, 0x01),
chipVendors="Microchip",
description = "PIC24F08KL201 - ICD",
packages = (("DIP10", ""), ),
fuseDesc=fuseDesc,
maintainer="Pavel Stemberk <stemberk@gmail.com>",
)
|
mbuesch/toprammer
|
libtoprammer/chips/microchip16/pic24f08kl201sip6.py
|
Python
|
gpl-2.0
| 2,151
| 0.034868
|
#!/usr/bin/env python
'''
Created on 18 gru 2014
@author: ghalajko
'''
from lvstop.screen import Screen
from lvstop import loop
if __name__ == '__main__':
with Screen() as scr:
try:
scr.main_loop(loop)
except KeyboardInterrupt:
pass
except:
raise
|
cybermaniax/lvstop
|
src/main/python/ipvstop.py
|
Python
|
apache-2.0
| 332
| 0.009036
|
#!/usr/bin/env python3
# Questo file visualizza la chiave "lists" redis
#
# Prima verifica che ci sia la chiave nel form
# Serve per la parte di gestione html in python
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
# Le mie librerie mjl (Json, Files), mhl (Html), flt (T w/ Redis)
import mjl, mhl, flt
import redis, subprocess
# Parametri generali
TestoPagina="Genera file \".csv\" dei valori di chiave \"lists\" Redis"
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
#ExecFile="/cgi-bin/<exefile>"
# Redis "key"
RedisKey = "*" # Tutte le chiavi
# Form name/s
FormName = "rkey"
# Apro il database Redis con l'istruzione della mia libreria
MyDB = flt.OpenDBFile(ConfigFile)
# Start web page - Sono blocchi di html presenti nella libreria
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
# Scrivo il Titolo/Testo della pagina
print ("<h1>","<center>",TestoPagina,"</center>","</h1>")
#print ("<hr/>","<br/>")
# Eventuale help/annotazione
#print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>")
form=cgi.FieldStorage()
if FormName not in form:
print ("<h2>ERRORE: Non e` stata passata la chiave Redis</h2>")
else:
RedisKey = cgi.escape(form[FormName].value)
RedisKeyStart = cgi.escape(form["VStart"].value)
RedisKeyStop = cgi.escape(form["VStop"].value)
print ("La chiave viene passata come argomento ad un'altro programma, quindi l'unico feedback possibile e` 0 se e` andato a buon fine, o 1 se c'e` stato un'errore.</br></br>")
print ("Comando eseguito:</br>/var/www/cgi-bin/setsVals2csv.py {0:s} {1:s} {2:s}</br></br>".format(RedisKey, RedisKeyStart, RedisKeyStop))
print (subprocess.call(['/var/www/cgi-bin/setsVals2csv.py', RedisKey, RedisKeyStart, RedisKeyStop]))
# End web page
print (mhl.MyHtmlBottom())
|
raspibo/Livello1
|
var/www/cgi-bin/writecsvlistsetsredis.py
|
Python
|
mit
| 1,795
| 0.018942
|
#!/usr/bin/env python
description = ">> make final magnitude"
usage = "%prog image [options] "
import os
import string
import re
import sys
from optparse import OptionParser
import time
import math
import agnkey
import numpy as np
if __name__ == "__main__":
start_time = time.time()
parser = OptionParser(usage=usage, description=description)
parser.add_option("-i", "--interactive", action="store_true", dest='interactive', default=False,
help='Interactive \t\t\t [%default]')
parser.add_option("-e", "--exzp", dest="exzp", default='',
type='str', help='external zero point from different field \t\t %default')
parser.add_option("-t", "--typemag", dest="typemag", default='fit',
type='str', help='type of magnitude fit,ph \t\t %default')
parser.add_option("--datatable", dest="datatable", default='dataredulco',
type='str', help='mysql table where stroe reduction info \t\t %default')
parser.add_option("--calib", dest="calibration", default='sloan',
type='str', help='calibration to (sloan,sloanprime,natural,apass) \t\t %default')
parser.add_option("-s", "--system", dest="field", default='',
type='str', help='photometric system [sloan, landolt] \t\t %default')
option, args = parser.parse_args()
if len(args) < 1:
sys.argv.append('--help')
_typemag = option.typemag
if _typemag not in ['fit', 'ph']:
sys.argv.append('--help')
option, args = parser.parse_args()
imglist = args[0]
lista = agnkey.util.readlist(imglist)
hdr = agnkey.util.readhdr(lista[0])
tel = agnkey.util.readkey3(hdr, 'telescop')
filters = agnkey.sites.filterst(tel)
filters1 = agnkey.sites.filterst1(tel)
_datatable = option.datatable
_exzp = option.exzp
_calib = option.calibration
_field = option.field
_interactive = option.interactive
typemag = 'PSFMAG1'
typemagerr = 'PSFDMAG1'
namemag = {'fit': ['PSFMAG1', 'PSFDMAG1'], 'ph': ['APMAG1', 'PSFDMAG1']}
dicti0 = agnkey.util.makecatalogue(lista)
dicti = {}
for _filter in dicti0:
for img in dicti0[_filter]:
if dicti0[_filter][img][namemag[_typemag][0]] != 9999:
if _filter not in dicti:
dicti[_filter] = {}
if img not in dicti[_filter]:
dicti[_filter][img] = {}
for key in dicti0[_filter][img].keys():
dicti[_filter][img][key] = dicti0[_filter][img][key]
if len(dicti) > 0:
allfilters = ''
for fil in dicti:
allfilters = allfilters + filters1[fil]
if _interactive:
print allfilters
if _field == 'apass' or _calib == 'apass':
queste0 = agnkey.agnloopdef.chosecolor(allfilters, False, 'apass')
queste1 = agnkey.agnloopdef.chosecolor(allfilters, True, 'apass')
else:
queste0 = agnkey.agnloopdef.chosecolor(allfilters, False)
queste1 = agnkey.agnloopdef.chosecolor(allfilters, True)
if _exzp:
lista2 = agnkey.util.readlist(_exzp)
dicti2 = agnkey.util.makecatalogue(lista2)
for _filter2 in dicti2:
img2 = dicti2[_filter2].keys()[0]
for jj in dicti2[_filter2][img2].keys():
if 'ZP' in jj:
if _filter2 in dicti:
for img in dicti[_filter2].keys():
dicti[_filter2][img][jj] = dicti2[_filter2][img2][jj]
agnkey.util.updateheader(img, 0,
{jj: [dicti2[_filter2][img2][jj], 'a b sa sb in y=a+bx']})
agnkey.util.updateheader(img, 0, {'CATALOG': [str(img2), 'catalogue source']})
print jj, dicti2[_filter2][img2][jj]
for _filter in dicti:
for img in dicti[_filter]:
if _interactive: print '\#### ', img
# if dicti[_filter][img][namemag[_typemag][0]]!=9999:
# start calibrating image 1
secondimage = []
jdvec = []
filtvec = []
colore = []
for ii in dicti[_filter][img].keys():
if 'ZP' in ii: # for each zero point available
cc = ii[-2:] # color used
for filt2 in dicti.keys():
if filt2 != _filter:
for jj in dicti[filt2].keys():
for ll in dicti[filt2][jj].keys():
if 'ZP' in ll and ll[-2:] == cc:
secondimage.append(jj)
jdvec.append(dicti[filt2][jj]['MJD'] - dicti[_filter][img]['MJD'])
filtvec.append(filt2)
colore.append(cc)
if len(secondimage) > 0:
colorescelto = ''
vv = queste1[agnkey.sites.filterst1(tel)[_filter]]
if len(vv) > 0:
if vv[0].upper() in colore:
colorescelto = vv[0].upper()
else:
vv = queste0[agnkey.sites.filterst1(tel)[_filter]]
if len(vv) > 0:
if vv[0].upper() in colore:
colorescelto = vv[0].upper()
if colorescelto:
print 'use ' + _filter + ' with color ' + colorescelto
filtvec = np.compress(np.array(colore) == colorescelto, filtvec)
jdvec = np.compress(np.array(colore) == colorescelto, jdvec)
secondimage = np.compress(np.array(colore) == colorescelto, secondimage)
colore = np.compress(np.array(colore) == colorescelto, colore)
dicti[_filter][img]['secondimg'] = secondimage[np.argmin(jdvec)]
dicti[_filter][img]['secondfilt'] = filtvec[np.argmin(jdvec)]
_filter2 = dicti[_filter][img]['secondfilt']
img2 = dicti[_filter][img]['secondimg']
col = colore[np.argmin(jdvec)]
if dicti[_filter][img]['telescope'] in ['lsc', '1m0-04', '1m0-05', '1m0-06', '1m0-09']:
kk = agnkey.sites.extintion('ctio')
elif dicti[_filter][img]['telescope'] in ['elp', '1m0-08']:
kk = agnkey.sites.extintion('mcdonald')
elif dicti[_filter][img]['telescope'] in ['cpt', '1m0-12', '1m0-10', '1m0-13']:
kk = agnkey.sites.extintion('southafrica')
elif dicti[_filter][img]['telescope'] in ['ftn']:
kk = agnkey.sites.extintion('mauna')
elif dicti[_filter][img]['telescope'] in ['1m0-03', '1m0-11', 'fts', 'coj']:
kk = agnkey.sites.extintion('siding')
else:
print _filter, img, dicti[_filter][img]
sys.exit('problem with dicti')
if _interactive:
print dicti[_filter][img]['airmass']
print kk[filters1[_filter]]
print 2.5 * math.log10(dicti[_filter][img]['exptime'])
print dicti[_filter][img][namemag[_typemag][0]]
# instrumental mag corrected for exp time and airmass
# mag0=dicti[_filter][img][namemag[_typemag][0]]+2.5*math.log10(dicti[_filter][img]['exptime'])-kk[filters1[_filter]]*dicti[_filter][img]['airmass']
mag0 = dicti[_filter][img][namemag[_typemag][0]] - kk[filters1[_filter]] * dicti[_filter][img][
'airmass']
dmag0 = dicti[_filter][img][namemag[_typemag][1]]
if dicti[_filter2][img2]['telescope'] in ['1m0-04', '1m0-05', '1m0-06', '1m0-09']:
kk = agnkey.sites.extintion('ctio')
elif dicti[_filter2][img2]['telescope'] in ['elp', '1m0-08']:
kk = agnkey.sites.extintion('mcdonald')
elif dicti[_filter2][img2]['telescope'] in ['cpt', '1m0-12', '1m0-10', '1m0-13']:
kk = agnkey.sites.extintion('southafrica')
elif dicti[_filter][img]['telescope'] in ['ftn']:
kk = agnkey.sites.extintion('mauna')
elif dicti[_filter][img]['telescope'] in ['1m0-03', '1m0-11', 'coj', 'fts']:
kk = agnkey.sites.extintion('siding')
else:
print dicti[_filter2][img2]
sys.exit('problem with dicti') # instrumental mag corrected for exp time and airmass
# mag1=dicti[_filter2][img2][namemag[_typemag][0]]+2.5*math.log10(dicti[_filter2][img2]['exptime'])-kk[filters1[_filter2]]*dicti[_filter2][img2]['airmass']
mag1 = dicti[_filter2][img2][namemag[_typemag][0]] - kk[filters1[_filter2]] * dicti[_filter2][img2][
'airmass']
dmag1 = dicti[_filter2][img2][namemag[_typemag][1]]
if filters1[_filter].upper() == col[0]:
Z1 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[1])
C1 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[2])
Z2 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[1])
C2 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[2])
M1, M2 = agnkey.agnabsphotdef.finalmag(Z1, Z2, C1, C2, mag0, mag1)
DZ1 = 0.0
DZ2 = 0.0
dc1, dc2, dz1, dz2, dm1, dm2 = agnkey.agnabsphotdef.erroremag(Z1, Z2, mag0, mag1, C1, C2, 0)
DM11 = np.sqrt((dm1 * dmag0) ** 2 + (dz1 * DZ1) ** 2 + (dm2 * dmag1) ** 2 + (dz2 * DZ2) ** 2)
if _interactive:
print '\n#### example computation '
print 'Z1 Z1 C1 C2 mag1 mag 2'
print 'M1 M2 '
print img, img2
print _filter, _filter2
print Z1, Z2, C1, C2, mag0, mag1
print M1, M2
try:
if np.isfinite(M1) and M1 < 999:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', M1,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
if _typemag == 'fit':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M1, 'calibrated mag']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
if np.isfinite(DM11):
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', DM11,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [DM11, 'calibrated mag error']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [9999, 'calibrated mag error']})
except:
print 'module mysqldef not found'
else:
Z2 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[1])
C2 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[2])
Z1 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[1])
C1 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[2])
M1, M2 = agnkey.agnabsphotdef.finalmag(Z1, Z2, C1, C2, mag1, mag0)
DZ1 = 0.0
DZ2 = 0.0
dc1, dc2, dz1, dz2, dm1, dm2 = agnkey.agnabsphotdef.erroremag(Z1, Z2, mag0, mag1, C1, C2, 1)
DM22 = np.sqrt((dm1 * dmag0) ** 2 + (dz1 * DZ1) ** 2 + (dm2 * dmag1) ** 2 + (dz2 * DZ2) ** 2)
if _interactive:
print '\n#### example computation '
print 'Z1 Z1 C1 C2 mag1 mag 2'
print 'M1 M2 '
print Z1, Z2, C1, C2, mag1, mag0
print M1, M2
try:
if np.isfinite(M2) and M2 < 999:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', M2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
if _typemag == 'fit':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M2, 'calibrated mag']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
if np.isfinite(DM22):
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', DM22,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [DM22, 'calibrated mag error']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [9999, 'calibrated mag error']})
except:
print 'module mysqldef not found'
print _filter, col
else:
if dicti[_filter][img]['telescope'] in ['lsc', '1m0-04', '1m0-05', '1m0-06', '1m0-09']:
kk = agnkey.sites.extintion('ctio')
elif dicti[_filter][img]['telescope'] in ['elp', '1m0-08']:
kk = agnkey.sites.extintion('mcdonald')
elif dicti[_filter][img]['telescope'] in ['cpt', '1m0-12', '1m0-10', '1m0-13']:
kk = agnkey.sites.extintion('southafrica')
elif dicti[_filter][img]['telescope'] in ['ftn']:
kk = agnkey.sites.extintion('mauna')
elif dicti[_filter][img]['telescope'] in ['1m0-03', '1m0-11', 'coj', 'fts']:
kk = agnkey.sites.extintion('siding')
else:
print _filter, img, dicti[_filter][img]
sys.exit('problem with dicti')
Z1 = ''
for ww in dicti[_filter][img].keys():
if 'ZP' + filters1[_filter].upper() == ww[0:3] and float(
string.split(dicti[_filter][img][ww])[1]) < 99:
Z1 = float(string.split(dicti[_filter][img][ww])[1])
C1 = float(string.split(dicti[_filter][img][ww])[2])
break
# mag0=dicti[_filter][img][namemag[_typemag][0]]+2.5*math.log10(dicti[_filter][img]['exptime'])-kk[filters1[_filter]]*dicti[_filter][img]['airmass']
mag0 = dicti[_filter][img][namemag[_typemag][0]] - kk[filters1[_filter]] * dicti[_filter][img][
'airmass']
dmag0 = dicti[_filter][img][namemag[_typemag][1]]
if Z1 and mag0 < 99:
M1 = mag0 + Z1
agnkey.agnsqldef.updatevalue(_datatable, 'mag', M1,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
if _typemag == 'fit':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M1, 'calibrated mag']})
else:
print 'no other filters with calibration in ' + _filter + ' band'
print img, _filter, mag0, dmag0, Z1, C1
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
# try:
# except:
print 'module mysqldef not found'
else:
print '\n### warning: no measurement in sn2 files'
|
svalenti/agnkey
|
trunk/bin/agnmag.py
|
Python
|
mit
| 19,750
| 0.005266
|
"""
Model to hold edx-video-pipeline configurations.
"""
from __future__ import absolute_import
from config_models.models import ConfigurationModel
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opaque_keys.edx.django.models import CourseKeyField
import six
class VideoPipelineIntegration(ConfigurationModel):
"""
Manages configuration for connecting to the edx-video-pipeline service and using its API.
.. no_pii:
"""
client_name = models.CharField(
max_length=100,
default='VEDA-Prod',
null=False,
blank=False,
help_text=_('Oauth client name of video pipeline service.')
)
api_url = models.URLField(
verbose_name=_('Internal API URL'),
help_text=_('edx-video-pipeline API URL.')
)
service_username = models.CharField(
max_length=100,
default='veda_service_user',
null=False,
blank=False,
help_text=_('Username created for Video Pipeline Integration, e.g. veda_service_user.')
)
def get_service_user(self):
# NOTE: We load the user model here to avoid issues at startup time that result from the hacks
# in lms/startup.py.
User = get_user_model() # pylint: disable=invalid-name
return User.objects.get(username=self.service_username)
class VideoUploadsEnabledByDefault(ConfigurationModel):
"""
Enables video uploads enabled By default feature across the platform.
.. no_pii:
"""
# this field overrides course-specific settings
enabled_for_all_courses = models.BooleanField(default=False)
@classmethod
def feature_enabled(cls, course_id):
"""
Looks at the currently active configuration model to determine whether
the VideoUploadsEnabledByDefault feature is available.
If the feature flag is not enabled, the feature is not available.
If the flag is enabled for all the courses, feature is available.
If the flag is enabled and the provided course_id is for a course
with CourseVideoUploadsEnabledByDefault enabled, then the
feature is available.
Arguments:
course_id (CourseKey): course id for whom feature will be checked.
"""
if not cls.is_enabled():
return False
elif not cls.current().enabled_for_all_courses:
feature = (CourseVideoUploadsEnabledByDefault.objects
.filter(course_id=course_id)
.order_by('-change_date')
.first())
return feature.enabled if feature else False
return True
def __unicode__(self):
current_model = VideoUploadsEnabledByDefault.current()
return u"VideoUploadsEnabledByDefault: enabled {is_enabled}".format(
is_enabled=current_model.is_enabled()
)
class CourseVideoUploadsEnabledByDefault(ConfigurationModel):
"""
Enables video uploads enabled by default feature for a specific course. Its global feature must be
enabled for this to take effect.
.. no_pii:
"""
KEY_FIELDS = ('course_id',)
course_id = CourseKeyField(max_length=255, db_index=True)
def __unicode__(self):
not_en = "Not "
if self.enabled:
not_en = ""
return u"Course '{course_key}': Video Uploads {not_enabled}Enabled by default.".format(
course_key=six.text_type(self.course_id),
not_enabled=not_en
)
|
jolyonb/edx-platform
|
openedx/core/djangoapps/video_pipeline/models.py
|
Python
|
agpl-3.0
| 3,579
| 0.001397
|
"""
Django settings for SimpleMooc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#f3*&^_56z9tj4=l%7+0gzg17o(sw&%(use@zt+_k@=y(ke2f5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# libs
'taggit',
# apps
'SimpleMooc.core',
'SimpleMooc.courses',
'SimpleMooc.accounts',
'SimpleMooc.forum',
)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'SimpleMooc.urls'
WSGI_APPLICATION = 'SimpleMooc.wsgi.application'
# Database
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "SimpleMooc", "media")
MEDIA_URL = "/media/"
# Email
# EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DEFAULT_FROM_EMAIL = "Nome <anderson.bcc.uag@gmail.com>"
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.gmail.com"
EMAIL_HOST_USER = "anderson.bcc.uag@gmail.com"
EMAIL_HOST_PASSWORD = "123"
EMAIL_PORT = "587"
CONTACT_EMAIL = "anderson.adss.hotmail@gmail.com"
# auth
LOGIN_URL = "accounts:login"
LOGIN_REDIRECT_URL = "core:home"
LOGOUT_URL = "accounts:logout"
AUTH_USER_MODEL = "accounts.User"
# Heroku settings
DATABASES = {
'default': dj_database_url.config(),
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "SimpleMooc", "core", "static/"),)
try:
from SimpleMooc.local_settings import *
except ImportError:
pass
|
andersondss/LearningDjango
|
udemy/SimpleMooc/SimpleMooc/settings.py
|
Python
|
mit
| 3,174
| 0.000315
|
"""
This module is used to generate graphs that show the interaction between authors either through multiple edges or
through edge weights. There is an edge from one author to another if the former sent a message to the latter. These
graphs depict thread-wise interaction of the authors for the entire mailing list and these interactions are labelled
in chronological order to help identify the flow of messages across authors.
"""
import json
from util.read_utils import *
def add_to_multigraph(graph_obj, discussion_graph, json_data, nbunch, label_prefix=''):
"""
"""
i = 0
for node in sorted(nbunch):
node_attr = json_data[node]
if node_attr['Cc'] is None:
addr_list = node_attr['To']
else:
addr_list = node_attr['To'] | node_attr['Cc']
for to_address in addr_list:
graph_obj.add_edge(node_attr['From'], to_address, label=label_prefix+str(i))
succ_nbunch = [int(x) for x in discussion_graph.successors(node)]
if succ_nbunch is not None:
add_to_multigraph(graph_obj, discussion_graph, json_data, succ_nbunch, label_prefix+str(i)+'.')
i += 1
def author_interaction_multigraph(discussion_graph, json_data, limit=10):
"""
"""
niter = 0
for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph):
interaction_graph = nx.MultiDiGraph()
origin = min(int(x) for x in conn_subgraph.nodes())
add_to_multigraph(interaction_graph, discussion_graph, json_data, [origin])
# print(json_data[origin])
g1 = nx.to_agraph(interaction_graph)
g1.draw("author_multi/"+str(origin)+'.png', prog='circo')
niter += 1
if limit == niter and limit > 0:
break
def add_to_weighted_graph(graph_obj, discussion_graph, json_data, nbunch, node_enum=list()):
"""
"""
for node in sorted(nbunch):
node_attr = json_data[node]
if node_attr['Cc'] is None:
addr_list = node_attr['To']
else:
addr_list = node_attr['To'] | node_attr['Cc']
if node_attr['From'] not in node_enum:
node_enum.append(node_attr['From'])
from_node = node_enum.index(node_attr['From'])
for to_address in addr_list:
if to_address not in node_enum:
node_enum.append(to_address)
to_node = node_enum.index(to_address)
if not graph_obj.has_edge(from_node, to_node):
graph_obj.add_edge(from_node, to_node, label=1)
else:
graph_obj[from_node][to_node]['label'] += 1
succ_nbunch = [int(x) for x in discussion_graph.successors(node)]
if succ_nbunch is not None:
add_to_weighted_graph(graph_obj, discussion_graph, json_data, succ_nbunch, node_enum)
def author_interaction_weighted_graph(discussion_graph, json_data, limit=10):
"""
"""
niter = 0
for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph):
interaction_graph = nx.DiGraph()
origin = min(int(x) for x in conn_subgraph.nodes())
add_to_weighted_graph(interaction_graph, discussion_graph, json_data, [origin], [])
# print(json_data[origin])
g1 = nx.to_agraph(interaction_graph)
g1.draw("author_weighted/"+str(origin)+'.png', prog='circo')
niter += 1
if limit == niter and limit > 0:
break
def weighted_multigraph():
# Time limit can be specified here in the form of a timestamp in one of the identifiable formats and all messages
# that have arrived after this timestamp will be ignored.
time_limit = None
# If true, then messages that belong to threads that have only a single author are ignored.
ignore_lat = True
if time_limit is None:
time_limit = time.strftime("%a, %d %b %Y %H:%M:%S %z")
msgs_before_time = set()
time_limit = get_datetime_object(time_limit)
print("All messages before", time_limit, "are being considered.")
discussion_graph = nx.DiGraph()
email_re = re.compile(r'[\w\.-]+@[\w\.-]+')
json_data = dict()
# Add nodes into NetworkX graph by reading from CSV file
if not ignore_lat:
with open("graph_nodes.csv", "r") as node_file:
for pair in node_file:
node = pair.split(';', 2)
if get_datetime_object(node[2].strip()) < time_limit:
node[0] = int(node[0])
msgs_before_time.add(node[0])
from_addr = email_re.search(node[1].strip())
from_addr = from_addr.group(0) if from_addr is not None else node[1].strip()
discussion_graph.add_node(node[0], time=node[2].strip(), color="#ffffff", style='bold', sender=from_addr)
node_file.close()
print("Nodes added.")
# Add edges into NetworkX graph by reading from CSV file
with open("graph_edges.csv", "r") as edge_file:
for pair in edge_file:
edge = pair.split(';')
edge[0] = int(edge[0])
edge[1] = int(edge[1])
if edge[0] in msgs_before_time and edge[1] in msgs_before_time:
discussion_graph.add_edge(*edge)
edge_file.close()
print("Edges added.")
else:
lone_author_threads = get_lone_author_threads(False)
# Add nodes into NetworkX graph only if they are not a part of a thread that has only a single author
with open("graph_nodes.csv", "r") as node_file:
for pair in node_file:
node = pair.split(';', 2)
node[0] = int(node[0])
if get_datetime_object(node[2].strip()) < time_limit and node[0] not in lone_author_threads:
msgs_before_time.add(node[0])
from_addr = email_re.search(node[1].strip())
from_addr = from_addr.group(0) if from_addr is not None else node[1].strip()
discussion_graph.add_node(node[0], time=node[2].strip(), color="#ffffff", style='bold', sender=from_addr)
node_file.close()
print("Nodes added.")
# Add edges into NetworkX graph only if they are not a part of a thread that has only a single author
with open("graph_edges.csv", "r") as edge_file:
for pair in edge_file:
edge = pair.split(';')
edge[0] = int(edge[0])
edge[1] = int(edge[1])
if edge[0] not in lone_author_threads and edge[1] not in lone_author_threads:
if edge[0] in msgs_before_time and edge[1] in msgs_before_time:
discussion_graph.add_edge(*edge)
edge_file.close()
print("Edges added.")
with open('clean_data.json', 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
json_obj = json.loads(chunk)
# print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc'])
from_addr = email_re.search(json_obj['From'])
json_obj['From'] = from_addr.group(0) if from_addr is not None else json_obj['From']
json_obj['To'] = set(email_re.findall(json_obj['To']))
json_obj['Cc'] = set(email_re.findall(json_obj['Cc'])) if json_obj['Cc'] is not None else None
# print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc'])
json_data[json_obj['Message-ID']] = json_obj
print("JSON data loaded.")
author_interaction_weighted_graph(discussion_graph, json_data, limit=20)
author_interaction_multigraph(discussion_graph, json_data, limit=20)
|
yashpungaliya/MailingListParser
|
lib/analysis/author/graph/interaction.py
|
Python
|
gpl-3.0
| 7,719
| 0.003627
|
#!/usr/bin/python
# encoding: utf-8
import sys
from gist import create_workflow, set_github_token
from workflow import Workflow, web
from workflow.background import run_in_background, is_running
def main(wf):
arg = wf.args[0]
if len(arg) > 0:
token = wf.args[0]
set_github_token(wf, token)
if __name__ == '__main__':
wf = create_workflow()
sys.exit(wf.run(main))
|
danielecook/gist-alfred
|
set_info.py
|
Python
|
mit
| 400
| 0
|
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py 2014/07/05 09:42:21 garyo"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = SCons.Node.Alias.Alias(name, **kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs)
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
dezelin/scons
|
scons-local/SCons/Node/Alias.py
|
Python
|
mit
| 4,197
| 0.001906
|
def f(x):
"""
Returns
-------
object
"""
return 42
|
smmribeiro/intellij-community
|
python/testData/intentions/returnTypeInNewNumpyDocString_after.py
|
Python
|
apache-2.0
| 75
| 0.013333
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
js_to_json,
parse_iso8601,
)
class NetzkinoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/(?P<category>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.netzkino.de/#!/scifikino/rakete-zum-mond',
'md5': '92a3f8b76f8d7220acce5377ea5d4873',
'info_dict': {
'id': 'rakete-zum-mond',
'ext': 'mp4',
'title': 'Rakete zum Mond (Endstation Mond, Destination Moon)',
'comments': 'mincount:3',
'description': 'md5:1eddeacc7e62d5a25a2d1a7290c64a28',
'upload_date': '20120813',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1344858571,
'age_limit': 12,
},
'params': {
'skip_download': 'Download only works from Germany',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
category_id = mobj.group('category')
video_id = mobj.group('id')
api_url = 'http://api.netzkino.de.simplecache.net/capi-2.0a/categories/%s.json?d=www' % category_id
api_info = self._download_json(api_url, video_id)
info = next(
p for p in api_info['posts'] if p['slug'] == video_id)
custom_fields = info['custom_fields']
production_js = self._download_webpage(
'http://www.netzkino.de/beta/dist/production.min.js', video_id,
note='Downloading player code')
avo_js = self._search_regex(
r'var urlTemplate=(\{.*?"\})',
production_js, 'URL templates')
templates = self._parse_json(
avo_js, video_id, transform_source=js_to_json)
suffix = {
'hds': '.mp4/manifest.f4m',
'hls': '.mp4/master.m3u8',
'pmd': '.mp4',
}
film_fn = custom_fields['Streaming'][0]
formats = [{
'format_id': key,
'ext': 'mp4',
'url': tpl.replace('{}', film_fn) + suffix[key],
} for key, tpl in templates.items()]
self._sort_formats(formats)
comments = [{
'timestamp': parse_iso8601(c.get('date'), delimiter=' '),
'id': c['id'],
'author': c['name'],
'html': c['content'],
'parent': 'root' if c.get('parent', 0) == 0 else c['parent'],
} for c in info.get('comments', [])]
return {
'id': video_id,
'formats': formats,
'comments': comments,
'title': info['title'],
'age_limit': int_or_none(custom_fields.get('FSK')[0]),
'timestamp': parse_iso8601(info.get('date'), delimiter=' '),
'description': clean_html(info.get('content')),
'thumbnail': info.get('thumbnail'),
'playlist_title': api_info.get('title'),
'playlist_id': category_id,
}
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/netzkino.py
|
Python
|
gpl-3.0
| 2,537
| 0.028774
|
"""Auto-generated file, do not edit by hand. BL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BL = PhoneMetadata(id='BL', country_code=590, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:590|(?:69|80)\\d|976)\\d{6}', possible_length=(9,)),
fixed_line=PhoneNumberDesc(national_number_pattern='590(?:2[7-9]|5[12]|87)\\d{4}', example_number='590271234', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='69(?:0\\d\\d|1(?:2[2-9]|3[0-5]))\\d{4}', example_number='690001234', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[0-5]\\d{6}', example_number='800012345', possible_length=(9,)),
voip=PhoneNumberDesc(national_number_pattern='976[01]\\d{5}', example_number='976012345', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
mobile_number_portable_region=True)
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/data/region_BL.py
|
Python
|
apache-2.0
| 959
| 0.007299
|
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.benchmark.scenarios.neutron import network
from tests.unit import test
NEUTRON_NETWORKS = "rally.benchmark.scenarios.neutron.network.NeutronNetworks"
class NeutronNetworksTestCase(test.TestCase):
@mock.patch(NEUTRON_NETWORKS + "._list_networks")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_list_networks(self, mock_create, mock_list):
neutron_scenario = network.NeutronNetworks()
# Default options
network_create_args = {}
neutron_scenario.create_and_list_networks(
network_create_args=network_create_args)
mock_create.assert_called_once_with(network_create_args)
mock_list.assert_called_once_with()
mock_create.reset_mock()
mock_list.reset_mock()
# Explicit network name is specified
network_create_args = {"name": "given-name"}
neutron_scenario.create_and_list_networks(
network_create_args=network_create_args)
mock_create.assert_called_once_with(network_create_args)
mock_list.assert_called_once_with()
@mock.patch(NEUTRON_NETWORKS + "._update_network")
@mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={
"network": {
"id": "network-id",
"name": "network-name",
"admin_state_up": False
}
})
def test_create_and_update_networks(self,
mock_create_network,
mock_update_network):
scenario = network.NeutronNetworks()
network_update_args = {"name": "_updated", "admin_state_up": True}
# Default options
scenario.create_and_update_networks(
network_update_args=network_update_args)
mock_create_network.assert_called_once_with({})
mock_update_network.assert_has_calls(
[mock.call(mock_create_network.return_value, network_update_args)])
mock_create_network.reset_mock()
mock_update_network.reset_mock()
# Explicit network name is specified
network_create_args = {"name": "network-name", "admin_state_up": False}
scenario.create_and_update_networks(
network_create_args=network_create_args,
network_update_args=network_update_args)
mock_create_network.assert_called_once_with(network_create_args)
mock_update_network.assert_has_calls(
[mock.call(mock_create_network.return_value, network_update_args)])
@mock.patch(NEUTRON_NETWORKS + "._delete_network")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_delete_networks(self, mock_create, mock_delete):
neutron_scenario = network.NeutronNetworks()
# Default options
network_create_args = {}
neutron_scenario.create_and_delete_networks()
mock_create.assert_called_once_with(network_create_args)
self.assertEqual(1, mock_delete.call_count)
mock_create.reset_mock()
mock_delete.reset_mock()
# Explict network name is specified
network_create_args = {"name": "given-name"}
neutron_scenario.create_and_delete_networks(
network_create_args=network_create_args)
mock_create.assert_called_once_with(network_create_args)
self.assertEqual(1, mock_delete.call_count)
@mock.patch(NEUTRON_NETWORKS + "._list_subnets")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
def test_create_and_list_subnets(self,
mock_create_network_and_subnets,
mock_list):
scenario = network.NeutronNetworks()
subnets_per_network = 4
subnet_cidr_start = "default_cidr"
mock_create_network_and_subnets.reset_mock()
mock_list.reset_mock()
# Default options
scenario.create_and_list_subnets(
subnets_per_network=subnets_per_network,
subnet_cidr_start=subnet_cidr_start)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network,
subnet_cidr_start)])
mock_list.assert_called_once_with()
mock_create_network_and_subnets.reset_mock()
mock_list.reset_mock()
# Custom options
scenario.create_and_list_subnets(
subnet_create_args={"allocation_pools": []},
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {"allocation_pools": []},
subnets_per_network, "custom_cidr")])
mock_list.assert_called_once_with()
@mock.patch(NEUTRON_NETWORKS + "._update_subnet")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
def test_create_and_update_subnets(self,
mock_create_network_and_subnets,
mock_update_subnet):
scenario = network.NeutronNetworks()
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
mock_create_network_and_subnets.return_value = (net, [subnet])
subnet_update_args = {"name": "_updated", "enable_dhcp": True}
mock_create_network_and_subnets.reset_mock()
mock_update_subnet.reset_mock()
# Default options
scenario.create_and_update_subnets(
subnet_update_args=subnet_update_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network, subnet_cidr_start)])
mock_update_subnet.assert_has_calls(
[mock.call(subnet, subnet_update_args)])
mock_create_network_and_subnets.reset_mock()
mock_update_subnet.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
scenario.create_and_update_subnets(
subnet_update_args=subnet_update_args,
subnet_create_args={"allocation_pools": []},
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {"allocation_pools": []}, subnets_per_network,
subnet_cidr_start)])
mock_update_subnet.assert_has_calls(
[mock.call(subnet, subnet_update_args)])
@mock.patch(NEUTRON_NETWORKS + "._delete_subnet")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
def test_create_and_delete_subnets(self,
mock_create_network_and_subnets,
mock_delete):
scenario = network.NeutronNetworks()
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
mock_create_network_and_subnets.return_value = (net, [subnet])
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
mock_create_network_and_subnets.reset_mock()
mock_delete.reset_mock()
# Default options
scenario.create_and_delete_subnets(
subnets_per_network=subnets_per_network,
subnet_cidr_start=subnet_cidr_start)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network,
subnet_cidr_start)])
mock_delete.assert_has_calls([mock.call(subnet)])
mock_create_network_and_subnets.reset_mock()
mock_delete.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
scenario.create_and_delete_subnets(
subnet_create_args={"allocation_pools": []},
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {"allocation_pools": []}, subnets_per_network,
subnet_cidr_start)])
mock_delete.assert_has_calls([mock.call(subnet)])
@mock.patch(NEUTRON_NETWORKS + "._list_routers")
@mock.patch(NEUTRON_NETWORKS + "._create_router")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
@mock.patch(NEUTRON_NETWORKS + ".clients")
def test_create_and_list_routers(self,
mock_clients,
mock_create_network_and_subnets,
mock_create_router,
mock_list):
scenario = network.NeutronNetworks()
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
mock_create_network_and_subnets.return_value = (net, [subnet])
mock_clients("neutron").add_interface_router = mock.Mock()
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
mock_create_router.return_value = router
# Default options
scenario.create_and_list_routers(
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network, subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call({})] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_create_network_and_subnets.reset_mock()
mock_create_router.reset_mock()
mock_clients("neutron").add_interface_router.reset_mock()
mock_list.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
subnet_create_args = {"allocation_pools": []}
router_create_args = {"admin_state_up": False}
scenario.create_and_list_routers(
subnet_create_args=subnet_create_args,
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, subnet_create_args, subnets_per_network,
subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call(router_create_args)] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_list.assert_called_once_with()
@mock.patch(NEUTRON_NETWORKS + "._update_router")
@mock.patch(NEUTRON_NETWORKS + "._create_router")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
@mock.patch(NEUTRON_NETWORKS + ".clients")
def test_create_and_update_routers(self,
mock_clients,
mock_create_network_and_subnets,
mock_create_router,
mock_update_router):
scenario = network.NeutronNetworks()
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
router_update_args = {
"name": "_updated",
"admin_state_up": False
}
mock_create_router.return_value = router
mock_create_network_and_subnets.return_value = (net, [subnet])
mock_clients("neutron").add_interface_router = mock.Mock()
# Default options
scenario.create_and_update_routers(
router_update_args=router_update_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network, subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call({})] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_update_router.assert_has_calls(
[mock.call(router, router_update_args)
] * subnets_per_network)
mock_create_network_and_subnets.reset_mock()
mock_create_router.reset_mock()
mock_clients("neutron").add_interface_router.reset_mock()
mock_update_router.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
subnet_create_args = {"allocation_pools": []}
router_create_args = {"admin_state_up": False}
scenario.create_and_update_routers(
router_update_args=router_update_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, subnet_create_args, subnets_per_network,
subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call(router_create_args)] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_update_router.assert_has_calls(
[mock.call(router, router_update_args)
] * subnets_per_network)
@mock.patch(NEUTRON_NETWORKS + "._delete_router")
@mock.patch(NEUTRON_NETWORKS + "._create_router")
@mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets")
@mock.patch(NEUTRON_NETWORKS + ".clients")
def test_create_and_delete_routers(self,
mock_clients,
mock_create_network_and_subnets,
mock_create_router,
mock_delete_router):
scenario = network.NeutronNetworks()
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
mock_create_router.return_value = router
mock_create_network_and_subnets.return_value = (net, [subnet])
mock_clients("neutron").add_interface_router = mock.Mock()
# Default options
scenario.create_and_delete_routers(
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network, subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call({})] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_delete_router.assert_has_calls(
[mock.call(router)] * subnets_per_network)
mock_create_network_and_subnets.reset_mock()
mock_create_router.reset_mock()
mock_clients("neutron").add_interface_router.reset_mock()
mock_delete_router.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
subnet_create_args = {"allocation_pools": []}
router_create_args = {"admin_state_up": False}
scenario.create_and_delete_routers(
subnet_create_args=subnet_create_args,
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, subnet_create_args, subnets_per_network,
subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call(router_create_args)] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_delete_router.assert_has_calls(
[mock.call(router)] * subnets_per_network)
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._list_ports")
@mock.patch(NEUTRON_NETWORKS + "._create_port")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_list_ports(self,
mock_create_network,
mock_create_port,
mock_list,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
net = {"network": {"id": "fake-id"}}
mock_create_network.return_value = net
ports_per_network = 10
self.assertRaises(TypeError, scenario.create_and_list_ports)
mock_create_network.reset_mock()
# Defaults
scenario.create_and_list_ports(ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
self.assertEqual(mock_create_port.mock_calls,
[mock.call(net, {})] * ports_per_network)
mock_list.assert_called_once_with()
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_list.reset_mock()
# Custom options
scenario.create_and_list_ports(
network_create_args={"name": "given-name"},
port_create_args={"allocation_pools": []},
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({"name": "given-name"})
self.assertEqual(
mock_create_port.mock_calls,
[mock.call(net, {"allocation_pools": []})] * ports_per_network)
mock_list.assert_called_once_with()
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._update_port")
@mock.patch(NEUTRON_NETWORKS + "._create_port", return_value={
"port": {
"name": "port-name",
"id": "port-id",
"admin_state_up": True
}
})
@mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={
"network": {
"id": "fake-id"
}
})
def test_create_and_update_ports(self,
mock_create_network,
mock_create_port,
mock_update_port,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
ports_per_network = 10
port_update_args = {
"name": "_updated",
"admin_state_up": False
}
# Defaults
scenario.create_and_update_ports(
port_update_args=port_update_args,
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
mock_create_port.assert_has_calls(
[mock.call({"network": {"id": "fake-id"}},
{})] * ports_per_network)
mock_update_port.assert_has_calls(
[mock.call(mock_create_port.return_value, port_update_args)
] * ports_per_network)
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_update_port.reset_mock()
# Custom options
scenario.create_and_update_ports(
port_update_args=port_update_args,
network_create_args={"name": "given-name"},
port_create_args={"allocation_pools": []},
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({"name": "given-name"})
mock_create_port.assert_has_calls(
[mock.call({"network": {"id": "fake-id"}},
{"allocation_pools": []})] * ports_per_network)
mock_update_port.assert_has_calls(
[mock.call(mock_create_port.return_value, port_update_args)
] * ports_per_network)
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._delete_port")
@mock.patch(NEUTRON_NETWORKS + "._create_port")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_delete_ports(self,
mock_create_network,
mock_create_port,
mock_delete,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
net = {"network": {"id": "fake-id"}}
mock_create_network.return_value = net
ports_per_network = 10
self.assertRaises(TypeError, scenario.create_and_delete_ports)
mock_create_network.reset_mock()
# Default options
scenario.create_and_delete_ports(ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
self.assertEqual(mock_create_port.mock_calls,
[mock.call(net, {})] * ports_per_network)
self.assertEqual(mock_delete.mock_calls,
[mock.call(mock_create_port())] * ports_per_network)
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_delete.reset_mock()
# Custom options
scenario.create_and_delete_ports(
network_create_args={"name": "given-name"},
port_create_args={"allocation_pools": []},
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({"name": "given-name"})
self.assertEqual(
mock_create_port.mock_calls,
[mock.call(net, {"allocation_pools": []})] * ports_per_network)
self.assertEqual(mock_delete.mock_calls,
[mock.call(mock_create_port())] * ports_per_network)
|
pandeyop/rally
|
tests/unit/benchmark/scenarios/neutron/test_network.py
|
Python
|
apache-2.0
| 24,839
| 0
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Chassis100ChassisActions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Chassis100ChassisActions - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'oem': 'object',
'chassis_reset': 'Chassis100Reset'
}
self.attribute_map = {
'oem': 'Oem',
'chassis_reset': '#Chassis.Reset'
}
self._oem = None
self._chassis_reset = None
@property
def oem(self):
"""
Gets the oem of this Chassis100ChassisActions.
:return: The oem of this Chassis100ChassisActions.
:rtype: object
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Chassis100ChassisActions.
:param oem: The oem of this Chassis100ChassisActions.
:type: object
"""
self._oem = oem
@property
def chassis_reset(self):
"""
Gets the chassis_reset of this Chassis100ChassisActions.
:return: The chassis_reset of this Chassis100ChassisActions.
:rtype: Chassis100Reset
"""
return self._chassis_reset
@chassis_reset.setter
def chassis_reset(self, chassis_reset):
"""
Sets the chassis_reset of this Chassis100ChassisActions.
:param chassis_reset: The chassis_reset of this Chassis100ChassisActions.
:type: Chassis100Reset
"""
self._chassis_reset = chassis_reset
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
jlongever/redfish-client-python
|
on_http_redfish_1_0/models/chassis_1_0_0_chassis_actions.py
|
Python
|
apache-2.0
| 3,731
| 0.001072
|
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from Cleanup import *
#from Documentation import *
#from ForEach import *
#from Install import *
#from Lint import *
#from Missing import *
#from Replay import *
#from SanityCheck import *#
#f#rom Testing import *
#f#rom Update import *
#from Upgrade import *
|
creasyw/IMTAphy
|
wnsbase/playground/builtins/__init__.py
|
Python
|
gpl-2.0
| 1,457
| 0.00755
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from six.moves import zip_longest
import copy
import re
from types import GeneratorType
from collections import Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
from skbio.util import assert_data_frame_almost_equal
from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
_as_slice_if_single_index)
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.lowercase_seq = Sequence('AAAAaaaa', lowercase='key')
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(11)))
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(11)})
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
def test_init_handles_missing_metadata_efficiently(self):
seq = Sequence('ACGT')
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
# initializing from an existing Sequence object should handle metadata
# attributes efficiently on both objects
new_seq = Sequence(seq)
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(new_seq._metadata)
self.assertIsNone(new_seq._positional_metadata)
self.assertFalse(seq.has_metadata())
self.assertFalse(seq.has_positional_metadata())
self.assertFalse(new_seq.has_metadata())
self.assertFalse(new_seq.has_positional_metadata())
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0, ))
npt.assert_equal(seq.values, np.array('', dtype='c'))
self.assertEqual(str(seq), '')
self.assertEqual(len(seq), 0)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
self.assertEqual(str(seq), 'A')
self.assertEqual(len(seq), 1)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(1)))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
self.assertEqual(str(seq), '.ABC\t123 xyz-')
self.assertEqual(len(seq), 14)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(14)))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}),
Sequence('ACGT', metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}))
# subclasses work too
seq = SequenceSubclass('ACGT',
metadata={'id': 'foo',
'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(
Sequence(seq),
Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)}))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_metadata(self):
for empty in None, {}:
seq = Sequence('', metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
def test_init_empty_metadata_key(self):
seq = Sequence('', metadata={'': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'': ''})
def test_init_empty_metadata_item(self):
seq = Sequence('', metadata={'foo': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': ''})
def test_init_single_character_metadata_item(self):
seq = Sequence('', metadata={'foo': 'z'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': 'z'})
def test_init_multiple_character_metadata_item(self):
seq = Sequence('', metadata={'foo': '\nabc\tdef G123'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': '\nabc\tdef G123'})
def test_init_metadata_multiple_keys(self):
seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata,
{'foo': 'abc', 42: {'nested': 'metadata'}})
def test_init_empty_positional_metadata(self):
# empty seq with missing/empty positional metadata
for empty in None, {}, pd.DataFrame():
seq = Sequence('', positional_metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# non-empty seq with missing positional metadata
seq = Sequence('xyz', positional_metadata=None)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_init_empty_positional_metadata_item(self):
for item in ([], (), np.array([])):
seq = Sequence('', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(0)))
def test_init_single_positional_metadata_item(self):
for item in ([2], (2, ), np.array([2])):
seq = Sequence('G', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(1)))
def test_init_multiple_positional_metadata_item(self):
for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(9)))
def test_init_positional_metadata_multiple_columns(self):
seq = Sequence('^' * 5,
positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_positional_metadata_with_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
seq = Sequence('^' * 5, positional_metadata=df)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with six.assertRaisesRegex(self, TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with six.assertRaisesRegex(self, TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with six.assertRaisesRegex(self, TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with six.assertRaisesRegex(self, TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with six.assertRaisesRegex(self, TypeError, 'int'):
Sequence(42)
with six.assertRaisesRegex(self, TypeError, 'float'):
Sequence(4.2)
with six.assertRaisesRegex(self, TypeError, 'int64'):
Sequence(np.int_(50))
with six.assertRaisesRegex(self, TypeError, 'float64'):
Sequence(np.float_(50))
with six.assertRaisesRegex(self, TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_metadata(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with six.assertRaisesRegex(self, TypeError,
'metadata must be a dict'):
Sequence('abc', metadata=md)
def test_init_invalid_positional_metadata(self):
# not consumable by Pandas
with six.assertRaisesRegex(self, TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
Sequence('ACGT', positional_metadata=2)
# 0 elements
with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
Sequence('ACGT', positional_metadata=[])
# not enough elements
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4])
# too many elements
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(3)))
# Series too many rows
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(5)}))
def test_values_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_metadata_property_getter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertIsInstance(seq.metadata, dict)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# update existing key
seq.metadata['foo'] = 'baz'
self.assertEqual(seq.metadata, {'foo': 'baz'})
# add new key
seq.metadata['foo2'] = 'bar2'
self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
def test_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
self.assertEqual(seq.metadata, {})
self.assertIsNotNone(seq._metadata)
def test_metadata_property_setter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
new_md = {'bar': 'baz', 42: 42}
seq.metadata = new_md
self.assertEqual(seq.metadata, new_md)
self.assertIsNot(seq.metadata, new_md)
seq.metadata = {}
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_metadata())
def test_metadata_property_setter_invalid_type(self):
seq = Sequence('abc', metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with six.assertRaisesRegex(self, TypeError,
'metadata must be a dict'):
seq.metadata = md
# object should still be usable and its original metadata shouldn't
# have changed
self.assertEqual(seq.metadata, {123: 456})
def test_metadata_property_deleter(self):
md = {'foo': 'bar'}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting again
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting missing metadata immediately after instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
del seq.metadata
self.assertIsNone(seq._metadata)
def test_metadata_property_shallow_copy(self):
md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# updates to keys
seq.metadata['key1'] = 'new val'
self.assertEqual(seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
# original metadata untouched
self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
# updates to mutable value (by reference)
seq.metadata['key3'].append(3)
self.assertEqual(
seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
# original metadata changed because we didn't deep copy
self.assertEqual(
md,
{'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
def test_positional_metadata_property_getter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
# update existing column
seq.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# add new column
seq.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame(index=np.arange(4)))
self.assertIsNotNone(seq._positional_metadata)
def test_positional_metadata_property_setter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
new_md = pd.DataFrame({'bar': np.arange(3)}, index=['a', 'b', 'c'])
seq.positional_metadata = new_md
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': np.arange(3)}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, new_md)
seq.positional_metadata = pd.DataFrame(index=np.arange(3))
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
self.assertFalse(seq.has_positional_metadata())
def test_positional_metadata_property_setter_invalid_type(self):
# More extensive tests for invalid input are on Sequence.__init__ tests
seq = Sequence('abc', positional_metadata={'foo': [1, 2, 42]})
# not consumable by Pandas
with six.assertRaisesRegex(self, TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
seq.positional_metadata = 2
# object should still be usable and its original metadata shouldn't
# have changed
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# wrong length
with six.assertRaisesRegex(self, ValueError, '\(2\).*\(3\)'):
seq.positional_metadata = {'foo': [1, 2]}
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# None isn't valid when using setter (differs from constructor)
with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
seq.positional_metadata = None
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_property_deleter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting again
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting missing positional metadata immediately after
# instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
def test_positional_metadata_property_shallow_copy(self):
# define metadata as a DataFrame because this has the potential to have
# its underlying data shared
md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, md)
# original metadata untouched
orig_md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
assert_data_frame_almost_equal(md, orig_md)
# change values of column (using same dtype)
seq.positional_metadata['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# change single value of underlying data
seq.positional_metadata.values[0][0] = 10
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [10, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# create column of object dtype -- these aren't deep copied
md = pd.DataFrame({'obj': [[], [], []]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[], [], []]}, index=np.arange(3)))
# mutate list
seq.positional_metadata['obj'][0].append(42)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[42], [], []]}, index=np.arange(3)))
# original metadata changed because we didn't do a full deep copy
assert_data_frame_almost_equal(
md,
pd.DataFrame({'obj': [[42], [], []]}, index=['a', 'b', 'c']))
def test_positional_metadata_property_set_column_series(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
seq.positional_metadata['bar'] = pd.Series(range(l-3))
# pandas.Series will be padded with NaN if too short
npt.assert_equal(seq.positional_metadata['bar'],
np.array(list(range(l-3)) + [np.NaN]*3))
seq.positional_metadata['baz'] = pd.Series(range(l+3))
# pandas.Series will be truncated if too long
npt.assert_equal(seq.positional_metadata['baz'],
np.array(range(l)))
def test_positional_metadata_property_set_column_array(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
# array-like objects will fail if wrong size
for array_like in (np.array(range(l-1)), range(l-1),
np.array(range(l+1)), range(l+1)):
with six.assertRaisesRegex(self, ValueError,
"Length of values does not match "
"length of index"):
seq.positional_metadata['bar'] = array_like
def test_observed_chars_property(self):
self.assertEqual(Sequence('').observed_chars, set())
self.assertEqual(Sequence('x').observed_chars, {'x'})
self.assertEqual(Sequence('xYz').observed_chars, {'x', 'Y', 'z'})
self.assertEqual(Sequence('zzz').observed_chars, {'z'})
self.assertEqual(Sequence('xYzxxZz').observed_chars,
{'x', 'Y', 'z', 'Z'})
self.assertEqual(Sequence('\t ').observed_chars, {' ', '\t'})
self.assertEqual(
Sequence('aabbcc', metadata={'foo': 'bar'},
positional_metadata={'foo': range(6)}).observed_chars,
{'a', 'b', 'c'})
with self.assertRaises(AttributeError):
Sequence('ACGT').observed_chars = {'a', 'b', 'c'}
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) ==
Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a",
metadata={'id': 'b', 'description': 'c'}) ==
Sequence("a",
metadata={'id': 'b', 'description': 'c'}))
self.assertTrue(Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}) ==
Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a", metadata={'id': 'c'}) !=
Sequence("a",
metadata={'id': 'c', 'description': 't'}))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a"))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a", positional_metadata={'quality': [2]}))
self.assertTrue(Sequence("c", positional_metadata={'quality': [3]}) !=
Sequence("b", positional_metadata={'quality': [3]}))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) !=
Sequence("c", metadata={'id': 'b'}))
def test_eq_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('') == Sequence(''))
self.assertTrue(Sequence('z') == Sequence('z'))
self.assertTrue(
Sequence('ACGT') == Sequence('ACGT'))
def test_eq_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
self.assertTrue(seq1 == seq2)
# order shouldn't matter
self.assertTrue(seq2 == seq1)
def test_eq_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': (1, 2, 3, 4)})
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': np.array([1, 2, 3,
4])})
self.assertTrue(seq1 == seq2)
def test_eq_type_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT', metadata={'id': 'bar'})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_positional_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 5]})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1 == seq2)
def test_eq_handles_missing_metadata_efficiently(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('ACGT')
self.assertTrue(seq1 == seq2)
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq1._metadata)
self.assertIsNone(seq1._positional_metadata)
self.assertIsNone(seq2._metadata)
self.assertIsNone(seq2._positional_metadata)
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_has_positional_metadata(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("S", {'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.array([0])})
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality':
np.array([len(seq) - 1])})
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': [10]})
self.assertEqual(seq[10], eseq)
def test_single_index_to_slice(self):
a = [1, 2, 3, 4]
self.assertEqual(slice(0, 1), _single_index_to_slice(0))
self.assertEqual([1], a[_single_index_to_slice(0)])
self.assertEqual(slice(-1, None),
_single_index_to_slice(-1))
self.assertEqual([4], a[_single_index_to_slice(-1)])
def test_is_single_index(self):
self.assertTrue(_is_single_index(0))
self.assertFalse(_is_single_index(True))
self.assertFalse(_is_single_index(bool()))
self.assertFalse(_is_single_index('a'))
def test_as_slice_if_single_index(self):
self.assertEqual(slice(0, 1), _as_slice_if_single_index(0))
slice_obj = slice(2, 3)
self.assertIs(slice_obj,
_as_slice_if_single_index(slice_obj))
def test_slice_positional_metadata(self):
seq = Sequence('ABCDEFGHIJ',
positional_metadata={'foo': np.arange(10),
'bar': np.arange(100, 110)})
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(0)))
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(slice(0, 1))))
self.assertTrue(pd.DataFrame({'foo': [0, 1],
'bar': [100, 101]}).equals(
seq._slice_positional_metadata(slice(0, 2))))
self.assertTrue(pd.DataFrame(
{'foo': [9], 'bar': [109]}, index=[9]).equals(
seq._slice_positional_metadata(9)))
def test_getitem_with_int_no_positional_metadata(self):
seq = Sequence("Sequence string !1@2#3?.,",
metadata={'id': 'id2', 'description': 'no_qual'})
eseq = Sequence("t", metadata={'id': 'id2', 'description': 'no_qual'})
self.assertEqual(seq[10], eseq)
def test_getitem_with_slice_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("012", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': np.arange(3)})
self.assertEqual(seq[0:3], eseq)
self.assertEqual(seq[:3], eseq)
self.assertEqual(seq[:3:1], eseq)
eseq = Sequence("def", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [13, 14, 15]})
self.assertEqual(seq[-3:], eseq)
self.assertEqual(seq[-3::1], eseq)
eseq = Sequence("02468ace",
metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [0, 2, 4, 6, 8, 10,
12, 14]})
self.assertEqual(seq[0:length:2], eseq)
self.assertEqual(seq[::2], eseq)
eseq = Sequence(s[::-1], metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality':
np.arange(length)[::-1]})
self.assertEqual(seq[length::-1], eseq)
self.assertEqual(seq[::-1], eseq)
eseq = Sequence('fdb97531',
metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [15, 13, 11, 9, 7, 5,
3, 1]})
self.assertEqual(seq[length::-2], eseq)
self.assertEqual(seq[::-2], eseq)
self.assertEqual(seq[0:500:], seq)
eseq = Sequence('', metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality':
np.array([], dtype=np.int64)})
self.assertEqual(seq[length:0], eseq)
self.assertEqual(seq[-length:0], eseq)
self.assertEqual(seq[1:0], eseq)
eseq = Sequence("0", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [0]})
self.assertEqual(seq[0:1], eseq)
self.assertEqual(seq[0:1:1], eseq)
self.assertEqual(seq[-length::-1], eseq)
def test_getitem_with_slice_no_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id4', 'description': 'no_qual4'})
eseq = Sequence("02468ace",
metadata={'id': 'id4', 'description': 'no_qual4'})
self.assertEqual(seq[0:length:2], eseq)
self.assertEqual(seq[::2], eseq)
def test_getitem_with_tuple_of_mixed_with_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("00000", metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': [0, 0, 0, 0, 0]})
self.assertEqual(seq[0, 0, 0, 0, 0], eseq)
self.assertEqual(seq[0, 0:1, 0, 0, 0], eseq)
self.assertEqual(seq[0, 0:1, 0, -length::-1, 0, 1:0], eseq)
self.assertEqual(seq[0:1, 0:1, 0:1, 0:1, 0:1], eseq)
self.assertEqual(seq[0:1, 0, 0, 0, 0], eseq)
eseq = Sequence("0123fed9",
metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9, 1:0], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_tuple_of_mixed_no_positional_metadata(self):
seq = Sequence("0123456789abcdef",
metadata={'id': 'id6', 'description': 'no_qual6'})
eseq = Sequence("0123fed9",
metadata={'id': 'id6', 'description': 'no_qual6'})
self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_iterable_of_mixed_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'},
positional_metadata={'quality': np.arange(length)})
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield -1
yield slice(-2, -4, -1)
yield 9
eseq = Sequence("0123fed9",
metadata={'id': 'id7', 'description': 'dsc7'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEqual(seq[generator()], eseq)
self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEqual(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_iterable_of_mixed_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'})
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield slice(None, -4, -1)
yield 9
eseq = Sequence("0123fed9",
metadata={'id': 'id7', 'description': 'dsc7'})
self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEqual(seq[generator()], eseq)
self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEqual(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_numpy_index_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id9', 'description': 'dsc9'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("0123fed9",
metadata={'id': 'id9', 'description': 'dsc9'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_numpy_index_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence("0123fed9",
metadata={'id': 'id10', 'description': 'dsc10'})
self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_empty_indices_empty_seq_no_pos_metadata(self):
s = ""
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_no_pos_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_boolean_vector_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("13579bdf",
metadata={'id': 'id11', 'description': 'dsc11'},
positional_metadata={'quality': [1, 3, 5, 7, 9, 11,
13, 15]})
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
self.assertEqual(seq[[False, True] * 8], eseq)
def test_getitem_with_boolean_vector_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'})
eseq = Sequence("13579bdf",
metadata={'id': 'id11', 'description': 'dsc11'})
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
def test_getitem_with_invalid(self):
seq = Sequence("123456",
metadata={'id': 'idm', 'description': 'description'},
positional_metadata={'quality': [1, 2, 3, 4, 5, 6]})
with self.assertRaises(IndexError):
seq['not an index']
with self.assertRaises(IndexError):
seq[['1', '2']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), 'a']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), True]]
with self.assertRaises(IndexError):
seq[True]
with self.assertRaises(IndexError):
seq[np.array([True, False])]
with self.assertRaises(IndexError):
seq[999]
with self.assertRaises(IndexError):
seq[0, 0, 999]
# numpy 1.8.1 and 1.9.2 raise different error types
# (ValueError, IndexError).
with self.assertRaises(Exception):
seq[100 * [True, False, True]]
def test_getitem_handles_missing_metadata_efficiently(self):
# there are two paths in __getitem__ we need to test for efficient
# handling of missing metadata
# path 1: mixed types
seq = Sequence('ACGT')
subseq = seq[1, 2:4]
self.assertEqual(subseq, Sequence('CGT'))
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(subseq._metadata)
self.assertIsNone(subseq._positional_metadata)
# path 2: uniform types
seq = Sequence('ACGT')
subseq = seq[1:3]
self.assertEqual(subseq, Sequence('CG'))
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(subseq._metadata)
self.assertIsNone(subseq._positional_metadata)
def test_len(self):
self.assertEqual(len(Sequence("")), 0)
self.assertEqual(len(Sequence("a")), 1)
self.assertEqual(len(Sequence("abcdef")), 6)
def test_nonzero(self):
# blank
self.assertFalse(Sequence(""))
self.assertFalse(Sequence("",
metadata={'id': 'foo'},
positional_metadata={'quality': range(0)}))
# single
self.assertTrue(Sequence("A"))
self.assertTrue(Sequence("A",
metadata={'id': 'foo'},
positional_metadata={'quality': range(1)}))
# multi
self.assertTrue(Sequence("ACGT"))
self.assertTrue(Sequence("ACGT",
metadata={'id': 'foo'},
positional_metadata={'quality': range(4)}))
def test_contains(self):
seq = Sequence("#@ACGT,24.13**02")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertTrue(c(',24') in seq)
self.assertTrue(c('*') in seq)
self.assertTrue(c('') in seq)
self.assertFalse(c("$") in seq)
self.assertFalse(c("AGT") in seq)
self.assertEqual(tested, 4)
def test_contains_sequence_subclass(self):
with self.assertRaises(TypeError):
SequenceSubclass("A") in Sequence("AAA")
self.assertTrue(SequenceSubclass("A").values in Sequence("AAA"))
def test_hash(self):
with self.assertRaises(TypeError):
hash(Sequence("ABCDEFG"))
self.assertNotIsInstance(Sequence("ABCDEFG"), Hashable)
def test_iter_has_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': np.arange(10)})
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i),
metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': [i]}))
self.assertTrue(tested)
def test_iter_no_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i),
metadata={'id': 'a', 'desc': 'b'}))
self.assertTrue(tested)
def test_reversed_has_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': np.arange(10)})
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i),
metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual':
[9 - i]}))
self.assertTrue(tested)
def test_reversed_no_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i),
metadata={'id': 'a', 'desc': 'b'}))
self.assertTrue(tested)
def test_repr(self):
# basic sanity checks -- more extensive testing of formatting and
# special cases is performed in SequenceReprDoctests below. here we
# only test that pieces of the repr are present. these tests also
# exercise coverage for py2/3 since the doctests in
# SequenceReprDoctests only currently run in py2.
# minimal
obs = repr(Sequence(''))
self.assertEqual(obs.count('\n'), 4)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 0', obs)
self.assertTrue(obs.endswith('-'))
# no metadata
obs = repr(Sequence('ACGT'))
self.assertEqual(obs.count('\n'), 5)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 4', obs)
self.assertTrue(obs.endswith('0 ACGT'))
# metadata and positional metadata of mixed types
obs = repr(
Sequence(
'ACGT',
metadata={'foo': 'bar', u'bar': 33.33, None: True, False: {},
(1, 2): 3, 'acb' * 100: "'", 10: 11},
positional_metadata={'foo': range(4),
42: ['a', 'b', [], 'c']}))
self.assertEqual(obs.count('\n'), 16)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('None: True', obs)
self.assertIn('\'foo\': \'bar\'', obs)
self.assertIn('42: <dtype: object>', obs)
self.assertIn('\'foo\': <dtype: int64>', obs)
self.assertIn('length: 4', obs)
self.assertTrue(obs.endswith('0 ACGT'))
# sequence spanning > 5 lines
obs = repr(Sequence('A' * 301))
self.assertEqual(obs.count('\n'), 9)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 301', obs)
self.assertIn('...', obs)
self.assertTrue(obs.endswith('300 A'))
def test_str(self):
self.assertEqual(str(Sequence("GATTACA")), "GATTACA")
self.assertEqual(str(Sequence("ACCGGTACC")), "ACCGGTACC")
self.assertEqual(str(Sequence("GREG")), "GREG")
self.assertEqual(
str(Sequence("ABC",
positional_metadata={'quality': [1, 2, 3]})),
"ABC")
self.assertIs(type(str(Sequence("A"))), str)
def test_to_default_behavior(self):
# minimal sequence, sequence with all optional attributes present, and
# a subclass of Sequence
for seq in (Sequence('ACGT'),
Sequence('ACGT', metadata={'id': 'foo', 'desc': 'bar'},
positional_metadata={'quality': range(4)}),
SequenceSubclass('ACGU', metadata={'id': 'rna seq'})):
to = seq._to()
self.assertEqual(seq, to)
self.assertIsNot(seq, to)
def test_to_update_single_attribute(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'description': 'gapped hello'},
positional_metadata={'quality': range(11)})
to = seq._to(metadata={'id': 'new id'})
self.assertIsNot(seq, to)
self.assertNotEqual(seq, to)
self.assertEqual(
to,
Sequence('HE..--..LLO', metadata={'id': 'new id'},
positional_metadata={'quality': range(11)}))
# metadata shouldn't have changed on the original sequence
self.assertEqual(seq.metadata,
{'id': 'hello', 'description': 'gapped hello'})
def test_to_update_multiple_attributes(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'description': 'gapped hello'},
positional_metadata={'quality': range(11)})
to = seq._to(metadata={'id': 'new id', 'description': 'new desc'},
positional_metadata={'quality': range(20, 25)},
sequence='ACGTA')
self.assertIsNot(seq, to)
self.assertNotEqual(seq, to)
# attributes should be what we specified in the _to call...
self.assertEqual(to.metadata['id'], 'new id')
npt.assert_array_equal(to.positional_metadata['quality'],
np.array([20, 21, 22, 23, 24]))
npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
self.assertEqual(to.metadata['description'], 'new desc')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.metadata['id'], 'hello')
npt.assert_array_equal(seq.positional_metadata['quality'], range(11))
npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
dtype='c'))
self.assertEqual(seq.metadata['description'], 'gapped hello')
def test_to_invalid_kwargs(self):
seq = Sequence('ACCGGTACC', metadata={'id': "test-seq",
'desc': "A test sequence"})
with self.assertRaises(TypeError):
seq._to(metadata={'id': 'bar'}, unrecognized_kwarg='baz')
def test_count(self):
def construct_char_array(s):
return np.fromstring(s, dtype='|S1')
def construct_uint8_array(s):
return np.fromstring(s, dtype=np.uint8)
seq = Sequence("1234567899876555")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertEqual(seq.count(c('4')), 1)
self.assertEqual(seq.count(c('8')), 2)
self.assertEqual(seq.count(c('5')), 4)
self.assertEqual(seq.count(c('555')), 1)
self.assertEqual(seq.count(c('555'), 0, 4), 0)
self.assertEqual(seq.count(c('555'), start=0, end=4), 0)
self.assertEqual(seq.count(c('5'), start=10), 3)
self.assertEqual(seq.count(c('5'), end=10), 1)
with self.assertRaises(ValueError):
seq.count(c(''))
self.assertEqual(tested, 4)
def test_count_on_subclass(self):
with self.assertRaises(TypeError) as cm:
Sequence("abcd").count(SequenceSubclass("a"))
self.assertIn("Sequence", str(cm.exception))
self.assertIn("SequenceSubclass", str(cm.exception))
def test_lowercase_mungeable_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
def test_lowercase_array_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('aaAAaaaa',
self.lowercase_seq.lowercase(
np.array([True, True, False, False, True, True,
True, True])))
self.assertEqual('AaAAaAAA',
self.lowercase_seq.lowercase([1, 4]))
def test_distance(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("abcdef")
seq2 = constructor("12bcef")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
self.assertEqual(tested, 4)
def test_distance_arbitrary_function(self):
def metric(x, y):
return len(x) ** 2 + len(y) ** 2
seq1 = Sequence("12345678")
seq2 = Sequence("1234")
result = seq1.distance(seq2, metric=metric)
self.assertIsInstance(result, float)
self.assertEqual(result, 80.0)
def test_distance_default_metric(self):
seq1 = Sequence("abcdef")
seq2 = Sequence("12bcef")
seq_wrong = Sequence("abcdefghijklmnop")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq1), 0.0)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
with self.assertRaises(ValueError):
seq1.distance(seq_wrong)
with self.assertRaises(ValueError):
seq_wrong.distance(seq1)
def test_distance_on_subclass(self):
seq1 = Sequence("abcdef")
seq2 = SequenceSubclass("12bcef")
with self.assertRaises(TypeError):
seq1.distance(seq2)
def test_matches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([True, False] * 4)
npt.assert_equal(seq1.matches(seq2), expected)
self.assertEqual(tested, 4)
def test_matches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.matches(seq2)
def test_matches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.matches(seq2)
def test_mismatches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([False, True] * 4)
npt.assert_equal(seq1.mismatches(seq2), expected)
self.assertEqual(tested, 4)
def test_mismatches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatches(seq2)
def test_mismatches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatches(seq2)
def test_mismatch_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1)), int)
self.assertEqual(seq1.mismatch_frequency(seq1), 0)
self.assertEqual(seq1.mismatch_frequency(seq2), 4)
self.assertEqual(seq1.mismatch_frequency(seq3), 8)
def test_mismatch_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.mismatch_frequency(seq1, relative=True), 0.0)
self.assertEqual(seq1.mismatch_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.mismatch_frequency(seq3, relative=True), 1.0)
def test_mismatch_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatch_frequency(seq2)
def test_mismatch_frequence_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatch_frequency(seq2)
def test_match_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1)), int)
self.assertEqual(seq1.match_frequency(seq1), 8)
self.assertEqual(seq1.match_frequency(seq2), 4)
self.assertEqual(seq1.match_frequency(seq3), 0)
def test_match_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.match_frequency(seq1, relative=True), 1.0)
self.assertEqual(seq1.match_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.match_frequency(seq3, relative=True), 0.0)
def test_match_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.match_frequency(seq2)
def test_match_frequency_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.match_frequency(seq2)
def test_index(self):
tested = 0
for c in self.sequence_kinds:
tested += 1
seq = Sequence("ABCDEFG@@ABCDFOO")
self.assertEqual(seq.index(c("A")), 0)
self.assertEqual(seq.index(c("@")), 7)
self.assertEqual(seq.index(c("@@")), 7)
with self.assertRaises(ValueError):
seq.index("A", start=1, end=5)
self.assertEqual(tested, 4)
def test_index_on_subclass(self):
with self.assertRaises(TypeError):
Sequence("ABCDEFG").index(SequenceSubclass("A"))
self.assertEqual(
SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
def test_frequencies_empty_sequence(self):
seq = Sequence('')
self.assertEqual(seq.frequencies(), {})
self.assertEqual(seq.frequencies(relative=True), {})
self.assertEqual(seq.frequencies(chars=set()), {})
self.assertEqual(seq.frequencies(chars=set(), relative=True), {})
self.assertEqual(seq.frequencies(chars={'a', 'b'}), {'a': 0, 'b': 0})
# use npt.assert_equal to explicitly handle nan comparisons
npt.assert_equal(seq.frequencies(chars={'a', 'b'}, relative=True),
{'a': np.nan, 'b': np.nan})
def test_frequencies_observed_chars(self):
seq = Sequence('x')
self.assertEqual(seq.frequencies(), {'x': 1})
self.assertEqual(seq.frequencies(relative=True), {'x': 1.0})
seq = Sequence('xYz')
self.assertEqual(seq.frequencies(), {'x': 1, 'Y': 1, 'z': 1})
self.assertEqual(seq.frequencies(relative=True),
{'x': 1/3, 'Y': 1/3, 'z': 1/3})
seq = Sequence('zzz')
self.assertEqual(seq.frequencies(), {'z': 3})
self.assertEqual(seq.frequencies(relative=True), {'z': 1.0})
seq = Sequence('xYzxxZz')
self.assertEqual(seq.frequencies(), {'x': 3, 'Y': 1, 'Z': 1, 'z': 2})
self.assertEqual(seq.frequencies(relative=True),
{'x': 3/7, 'Y': 1/7, 'Z': 1/7, 'z': 2/7})
seq = Sequence('\t ')
self.assertEqual(seq.frequencies(), {'\t': 1, ' ': 3})
self.assertEqual(seq.frequencies(relative=True), {'\t': 1/4, ' ': 3/4})
seq = Sequence('aabbcc', metadata={'foo': 'bar'},
positional_metadata={'foo': range(6)})
self.assertEqual(seq.frequencies(), {'a': 2, 'b': 2, 'c': 2})
self.assertEqual(seq.frequencies(relative=True),
{'a': 2/6, 'b': 2/6, 'c': 2/6})
def test_frequencies_specified_chars(self):
seq = Sequence('abcbca')
self.assertEqual(seq.frequencies(chars=set()), {})
self.assertEqual(seq.frequencies(chars=set(), relative=True), {})
self.assertEqual(seq.frequencies(chars='a'), {'a': 2})
self.assertEqual(seq.frequencies(chars='a', relative=True), {'a': 2/6})
self.assertEqual(seq.frequencies(chars={'a'}), {'a': 2})
self.assertEqual(seq.frequencies(chars={'a'}, relative=True),
{'a': 2/6})
self.assertEqual(seq.frequencies(chars={'a', 'b'}), {'a': 2, 'b': 2})
self.assertEqual(seq.frequencies(chars={'a', 'b'}, relative=True),
{'a': 2/6, 'b': 2/6})
self.assertEqual(seq.frequencies(chars={'a', 'b', 'd'}),
{'a': 2, 'b': 2, 'd': 0})
self.assertEqual(seq.frequencies(chars={'a', 'b', 'd'}, relative=True),
{'a': 2/6, 'b': 2/6, 'd': 0.0})
self.assertEqual(seq.frequencies(chars={'x', 'y', 'z'}),
{'x': 0, 'y': 0, 'z': 0})
self.assertEqual(seq.frequencies(chars={'x', 'y', 'z'}, relative=True),
{'x': 0.0, 'y': 0.0, 'z': 0.0})
def test_frequencies_chars_varied_type(self):
seq = Sequence('zabczzzabcz')
# single character case (shortcut)
chars = b'z'
self.assertEqual(seq.frequencies(chars=chars), {b'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{b'z': 5/11})
chars = u'z'
self.assertEqual(seq.frequencies(chars=chars), {u'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{u'z': 5/11})
chars = np.fromstring('z', dtype='|S1')[0]
self.assertEqual(seq.frequencies(chars=chars), {b'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{b'z': 5/11})
# set of characters, some present, some not
chars = {b'x', b'z'}
self.assertEqual(seq.frequencies(chars=chars), {b'x': 0, b'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{b'x': 0.0, b'z': 5/11})
chars = {u'x', u'z'}
self.assertEqual(seq.frequencies(chars=chars), {u'x': 0, u'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{u'x': 0.0, u'z': 5/11})
chars = {
np.fromstring('x', dtype='|S1')[0],
np.fromstring('z', dtype='|S1')[0]
}
self.assertEqual(seq.frequencies(chars=chars), {b'x': 0, b'z': 5})
self.assertEqual(seq.frequencies(chars=chars, relative=True),
{b'x': 0.0, b'z': 5/11})
def test_frequencies_equivalent_to_kmer_frequencies_k_of_1(self):
seq = Sequence('abcabc')
exp = {'a': 2, 'b': 2, 'c': 2}
self.assertEqual(seq.frequencies(chars=None), exp)
self.assertEqual(seq.kmer_frequencies(k=1), exp)
exp = {'a': 2/6, 'b': 2/6, 'c': 2/6}
self.assertEqual(seq.frequencies(chars=None, relative=True), exp)
self.assertEqual(seq.kmer_frequencies(k=1, relative=True), exp)
def test_frequencies_passing_observed_chars_equivalent_to_default(self):
seq = Sequence('abcabc')
exp = {'a': 2, 'b': 2, 'c': 2}
self.assertEqual(seq.frequencies(chars=None), exp)
self.assertEqual(seq.frequencies(chars=seq.observed_chars), exp)
exp = {'a': 2/6, 'b': 2/6, 'c': 2/6}
self.assertEqual(seq.frequencies(chars=None, relative=True), exp)
self.assertEqual(
seq.frequencies(chars=seq.observed_chars, relative=True),
exp)
def test_frequencies_invalid_chars(self):
seq = Sequence('abcabc')
with six.assertRaisesRegex(self, ValueError, '0 characters'):
seq.frequencies(chars='')
with six.assertRaisesRegex(self, ValueError, '0 characters'):
seq.frequencies(chars={''})
with six.assertRaisesRegex(self, ValueError, '2 characters'):
seq.frequencies(chars='ab')
with six.assertRaisesRegex(self, ValueError, '2 characters'):
seq.frequencies(chars={'b', 'ab'})
with six.assertRaisesRegex(self, TypeError, 'string.*NoneType'):
seq.frequencies(chars={'a', None})
with six.assertRaisesRegex(self, ValueError, 'outside the range'):
seq.frequencies(chars=u'\u1F30')
with six.assertRaisesRegex(self, ValueError, 'outside the range'):
seq.frequencies(chars={'c', u'\u1F30'})
with six.assertRaisesRegex(self, TypeError, 'set.*int'):
seq.frequencies(chars=42)
def _compare_kmers_results(self, observed, expected):
for obs, exp in zip_longest(observed, expected, fillvalue=None):
self.assertEqual(obs, exp)
def test_iter_kmers(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
expected = [
Sequence('G', positional_metadata={'quality': [0]}),
Sequence('A', positional_metadata={'quality': [1]}),
Sequence('T', positional_metadata={'quality': [2]}),
Sequence('T', positional_metadata={'quality': [3]}),
Sequence('A', positional_metadata={'quality': [4]}),
Sequence('C', positional_metadata={'quality': [5]}),
Sequence('A', positional_metadata={'quality': [6]})
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA', positional_metadata={'quality': [0, 1]}),
Sequence('TT', positional_metadata={'quality': [2, 3]}),
Sequence('AC', positional_metadata={'quality': [4, 5]})
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
Sequence('TAC', positional_metadata={'quality': [3, 4, 5]})
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA',
positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_no_positional_metadata(self):
seq = Sequence('GATTACA')
expected = [
Sequence('G'),
Sequence('A'),
Sequence('T'),
Sequence('T'),
Sequence('A'),
Sequence('C'),
Sequence('A')
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA'),
Sequence('TT'),
Sequence('AC')
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT'),
Sequence('TAC')
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA')
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
expected = [
Sequence('G', positional_metadata={'quality': [0]}),
Sequence('A', positional_metadata={'quality': [1]}),
Sequence('T', positional_metadata={'quality': [2]}),
Sequence('T', positional_metadata={'quality': [3]}),
Sequence('A', positional_metadata={'quality': [4]}),
Sequence('C', positional_metadata={'quality': [5]}),
Sequence('A', positional_metadata={'quality': [6]})
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA', positional_metadata={'quality': [0, 1]}),
Sequence('AT', positional_metadata={'quality': [1, 2]}),
Sequence('TT', positional_metadata={'quality': [2, 3]}),
Sequence('TA', positional_metadata={'quality': [3, 4]}),
Sequence('AC', positional_metadata={'quality': [4, 5]}),
Sequence('CA', positional_metadata={'quality': [5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
Sequence('ATT', positional_metadata={'quality': [1, 2, 3]}),
Sequence('TTA', positional_metadata={'quality': [2, 3, 4]}),
Sequence('TAC', positional_metadata={'quality': [3, 4, 5]}),
Sequence('ACA', positional_metadata={'quality': [4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA',
positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap_no_positional_metadata(self):
seq = Sequence('GATTACA')
expected = [
Sequence('G'),
Sequence('A'),
Sequence('T'),
Sequence('T'),
Sequence('A'),
Sequence('C'),
Sequence('A')
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA'),
Sequence('AT'),
Sequence('TT'),
Sequence('TA'),
Sequence('AC'),
Sequence('CA')
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT'),
Sequence('ATT'),
Sequence('TTA'),
Sequence('TAC'),
Sequence('ACA')
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA')
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_invalid_k(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_invalid_k_no_positional_metadata(self):
seq = Sequence('GATTACA')
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_different_sequences(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'desc': 'gapped hello'},
positional_metadata={'quality': range(11)})
expected = [
Sequence('HE.', positional_metadata={'quality': [0, 1, 2]},
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('.--', positional_metadata={'quality': [3, 4, 5]},
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('..L', positional_metadata={'quality': [6, 7, 8]},
metadata={'id': 'hello', 'desc': 'gapped hello'})
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_iter_kmers_different_sequences_no_positional_metadata(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'desc': 'gapped hello'})
expected = [
Sequence('HE.',
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('.--',
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('..L',
metadata={'id': 'hello', 'desc': 'gapped hello'})
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_kmer_frequencies_empty_sequence(self):
seq = Sequence('')
self.assertEqual(seq.kmer_frequencies(1), {})
self.assertEqual(seq.kmer_frequencies(1, overlap=False), {})
self.assertEqual(seq.kmer_frequencies(1, relative=True), {})
self.assertEqual(seq.kmer_frequencies(1, relative=True, overlap=False),
{})
def test_kmer_frequencies(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
# overlap = True
expected = {'G': 1, 'A': 3, 'T': 2, 'C': 1}
self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
expected = {'GAT': 1, 'ATT': 1, 'TTA': 1, 'TAC': 1, 'ACA': 1}
self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
expected = {}
self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
# overlap = False
expected = {'GAT': 1, 'TAC': 1}
self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
expected = {'GATTACA': 1}
self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
expected = {}
self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
def test_kmer_frequencies_relative(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
# overlap = True
expected = {'A': 3/7, 'C': 1/7, 'G': 1/7, 'T': 2/7}
self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
expected)
expected = {'GAT': 1/5, 'ATT': 1/5, 'TTA': 1/5, 'TAC': 1/5, 'ACA': 1/5}
self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
expected)
expected = {}
self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
expected)
# overlap = False
expected = {'GAT': 1/2, 'TAC': 1/2}
self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
expected)
expected = {'GATTACA': 1.0}
self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
expected)
expected = {}
self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
expected)
def test_kmer_frequencies_floating_point_precision(self):
# Test that a sequence having no variation in k-words yields a
# frequency of exactly 1.0. Note that it is important to use
# self.assertEqual here instead of self.assertAlmostEqual because we
# want to test for exactly 1.0. A previous implementation of
# Sequence.kmer_frequencies(relative=True) added (1 / num_words) for
# each occurrence of a k-word to compute the frequencies (see
# https://github.com/biocore/scikit-bio/issues/801). In certain cases,
# this yielded a frequency slightly less than 1.0 due to roundoff
# error. The test case here uses a sequence with 10 characters that are
# all identical and computes k-word frequencies with k=1. This test
# case exposes the roundoff error present in the previous
# implementation because there are 10 k-words (which are all
# identical), so 1/10 added 10 times yields a number slightly less than
# 1.0. This occurs because 1/10 cannot be represented exactly as a
# floating point number.
seq = Sequence('AAAAAAAAAA')
self.assertEqual(seq.kmer_frequencies(1, relative=True), {'A': 1.0})
def test_find_with_regex(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = re.compile('(T+A)(CA)')
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_string_as_input(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = '(T+A)(CA)'
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_no_groups(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = re.compile('(FOO)')
self.assertEqual(list(seq.find_with_regex(pat)), [])
def test_find_with_regex_ignore_no_difference(self):
seq = Sequence('..ABCDEFG..')
pat = "([A-Z]+)"
exp = [slice(2, 9)]
self.assertEqual(list(seq.find_with_regex(pat)), exp)
obs = seq.find_with_regex(
pat, ignore=np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=bool))
self.assertEqual(list(obs), exp)
def test_find_with_regex_ignore(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 1, 1, 0, 1], dtype=bool))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_find_with_regex_ignore_index_array(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([1, 2, 4, 5, 11, 13, 14, 17, 18, 20]))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_iter_contiguous_index_array(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c([0, 1, 2, 3, 8, 9, 10, 11]))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_boolean_vector(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(([True] * 4 + [False] * 4) * 2))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_iterable_slices(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(spaced_out()))
self.assertEqual(list(obs), exp)
exp = [Sequence("01234567"), Sequence("cdef")]
obs = s.iter_contiguous(c(contiguous()))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_with_max_length(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("234"), Sequence("678"), Sequence("abc")]
obs = s.iter_contiguous(c([True, False, True, True] * 4),
min_length=3)
self.assertEqual(list(obs), exp)
exp = [Sequence("0"), Sequence("234"), Sequence("678"),
Sequence("abc"), Sequence("ef")]
obs1 = list(s.iter_contiguous(c([True, False, True, True] * 4),
min_length=1))
obs2 = list(s.iter_contiguous(c([True, False, True, True] * 4)))
self.assertEqual(obs1, obs2)
self.assertEqual(obs1, exp)
def test_iter_contiguous_with_invert(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("4567"), Sequence("cdef")]
obs = s.iter_contiguous(c(spaced_out()), invert=True)
self.assertEqual(list(obs), exp)
exp = [Sequence("89ab")]
obs = s.iter_contiguous(c(contiguous()), invert=True)
self.assertEqual(list(obs), exp)
def test_has_metadata(self):
# truly missing
seq = Sequence('ACGT')
self.assertFalse(seq.has_metadata())
# metadata attribute should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
# looks empty
seq = Sequence('ACGT', metadata={})
self.assertFalse(seq.has_metadata())
# metadata is present
seq = Sequence('ACGT', metadata={'foo': 42})
self.assertTrue(seq.has_metadata())
def test_has_positional_metadata(self):
# truly missing
seq = Sequence('ACGT')
self.assertFalse(seq.has_positional_metadata())
# positional metadata attribute should be None and not initialized to a
# "missing" representation
self.assertIsNone(seq._positional_metadata)
# looks empty
seq = Sequence('ACGT',
positional_metadata=pd.DataFrame(index=np.arange(4)))
self.assertFalse(seq.has_positional_metadata())
# positional metadata is present
seq = Sequence('ACGT', positional_metadata={'foo': [1, 2, 3, 4]})
self.assertTrue(seq.has_positional_metadata())
def test_copy_without_metadata(self):
# shallow vs deep copy with sequence only should be equivalent. thus,
# copy.copy, copy.deepcopy, and Sequence.copy(deep=True|False) should
# all be equivalent
for copy_method in (lambda seq: seq.copy(deep=False),
lambda seq: seq.copy(deep=True),
copy.copy, copy.deepcopy):
seq = Sequence('ACGT')
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
# metadata attributes should be None and not initialized to a
# "missing" representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(seq_copy._metadata)
self.assertIsNone(seq_copy._positional_metadata)
def test_copy_with_metadata_shallow(self):
# copy.copy and Sequence.copy should behave identically
for copy_method in lambda seq: seq.copy(), copy.copy:
seq = Sequence('ACGT', metadata={'foo': [1]},
positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
self.assertIsNot(seq_copy._metadata, seq._metadata)
self.assertIsNot(seq_copy._positional_metadata,
seq._positional_metadata)
self.assertIsNot(seq_copy._positional_metadata.values,
seq._positional_metadata.values)
self.assertIs(seq_copy._metadata['foo'], seq._metadata['foo'])
self.assertIs(seq_copy._positional_metadata.loc[0, 'bar'],
seq._positional_metadata.loc[0, 'bar'])
seq_copy.metadata['foo'].append(2)
seq_copy.metadata['foo2'] = 42
self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(seq.metadata, {'foo': [1, 2]})
seq_copy.positional_metadata.loc[0, 'bar'].append(1)
seq_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
seq_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_copy_with_metadata_deep(self):
# copy.deepcopy and Sequence.copy(deep=True) should behave identically
for copy_method in lambda seq: seq.copy(deep=True), copy.deepcopy:
seq = Sequence('ACGT', metadata={'foo': [1]},
positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
self.assertIsNot(seq_copy._metadata, seq._metadata)
self.assertIsNot(seq_copy._positional_metadata,
seq._positional_metadata)
self.assertIsNot(seq_copy._positional_metadata.values,
seq._positional_metadata.values)
self.assertIsNot(seq_copy._metadata['foo'], seq._metadata['foo'])
self.assertIsNot(seq_copy._positional_metadata.loc[0, 'bar'],
seq._positional_metadata.loc[0, 'bar'])
seq_copy.metadata['foo'].append(2)
seq_copy.metadata['foo2'] = 42
self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(seq.metadata, {'foo': [1]})
seq_copy.positional_metadata.loc[0, 'bar'].append(1)
seq_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
seq_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_memo_is_respected(self):
# basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls
seq = Sequence('ACGT', metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(seq, memo)
self.assertGreater(len(memo), 2)
def test_munge_to_index_array_valid_index_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([1, 2, 3], dtype=int)
obs = s._munge_to_index_array(c([1, 2, 3]))
npt.assert_equal(obs, exp)
exp = np.array([1, 3, 5], dtype=int)
obs = s._munge_to_index_array(c([1, 3, 5]))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_index_array(self):
s = Sequence("12345678")
for c in list, tuple, np.array, pd.Series:
with self.assertRaises(ValueError):
s._munge_to_index_array(c([3, 2, 1]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([5, 6, 7, 2]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([0, 1, 2, 1]))
def test_munge_to_index_array_valid_bool_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([2, 3, 5], dtype=int)
obs = s._munge_to_index_array(
c([False, False, True, True, False, True]))
npt.assert_equal(obs, exp)
exp = np.array([], dtype=int)
obs = s._munge_to_index_array(
c([False] * 6))
npt.assert_equal(obs, exp)
exp = np.arange(6)
obs = s._munge_to_index_array(
c([True] * 6))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_bool_array(self):
s = Sequence('123456')
for c in (list, tuple, lambda x: np.array(x, dtype=bool),
lambda x: pd.Series(x, dtype=bool)):
with self.assertRaises(ValueError):
s._munge_to_index_array(c([]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True] * 10))
def test_munge_to_index_array_valid_iterable(self):
s = Sequence('')
def slices_only():
return (slice(i, i+1) for i in range(0, 10, 2))
def mixed():
return (slice(i, i+1) if i % 2 == 0 else i for i in range(10))
def unthinkable():
for i in range(10):
if i % 3 == 0:
yield slice(i, i+1)
elif i % 3 == 1:
yield i
else:
yield np.array([i], dtype=int)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(mixed()))
npt.assert_equal(obs, exp)
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(unthinkable()))
npt.assert_equal(obs, exp)
exp = np.arange(10, step=2, dtype=int)
obs = s._munge_to_index_array(c(slices_only()))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_iterable(self):
s = Sequence('')
def bad1():
yield "r"
yield [1, 2, 3]
def bad2():
yield 1
yield 'str'
def bad3():
yield False
yield True
yield 2
def bad4():
yield np.array([False, True])
yield slice(2, 5)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
with self.assertRaises(TypeError):
s._munge_to_index_array(bad1())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad2())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad3())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad4())
def test_munge_to_index_array_valid_string(self):
seq = Sequence('ACGTACGT',
positional_metadata={'introns': [False, True, True,
False, False, True,
False, False]})
npt.assert_equal(np.array([1, 2, 5]),
seq._munge_to_index_array('introns'))
seq.positional_metadata['exons'] = ~seq.positional_metadata['introns']
npt.assert_equal(np.array([0, 3, 4, 6, 7]),
seq._munge_to_index_array('exons'))
def test_munge_to_index_array_invalid_string(self):
seq_str = 'ACGT'
seq = Sequence(seq_str,
positional_metadata={'quality': range(len(seq_str))})
with six.assertRaisesRegex(self, ValueError,
"No positional metadata associated with "
"key 'introns'"):
seq._munge_to_index_array('introns')
with six.assertRaisesRegex(self, TypeError,
"Column 'quality' in positional metadata "
"does not correspond to a boolean "
"vector"):
seq._munge_to_index_array('quality')
def test_munge_to_bytestring_return_bytes(self):
seq = Sequence('')
m = 'dummy_method'
str_inputs = ('', 'a', 'acgt')
unicode_inputs = (u'', u'a', u'acgt')
byte_inputs = (b'', b'a', b'acgt')
seq_inputs = (Sequence(''), Sequence('a'), Sequence('acgt'))
all_inputs = str_inputs + unicode_inputs + byte_inputs + seq_inputs
all_expected = [b'', b'a', b'acgt'] * 4
for input_, expected in zip(all_inputs, all_expected):
observed = seq._munge_to_bytestring(input_, m)
self.assertEqual(observed, expected)
self.assertIs(type(observed), bytes)
def test_munge_to_bytestring_unicode_out_of_ascii_range(self):
seq = Sequence('')
all_inputs = (u'\x80', u'abc\x80', u'\x80abc')
for input_ in all_inputs:
with six.assertRaisesRegex(self, UnicodeEncodeError,
"'ascii' codec can't encode character"
".*in position.*: ordinal not in"
" range\(128\)"):
seq._munge_to_bytestring(input_, 'dummy_method')
# NOTE: this must be a *separate* class for doctests only (no unit tests). nose
# will not run the unit tests otherwise
#
# these doctests exercise the correct formatting of Sequence's repr in a
# variety of situations. they are more extensive than the unit tests above
# (TestSequence.test_repr) but are only currently run in py2. thus, they cannot
# be relied upon for coverage (the unit tests take care of this)
class SequenceReprDoctests(object):
r"""
>>> import pandas as pd
>>> from skbio import Sequence
Empty (minimal) sequence:
>>> Sequence('')
Sequence
-------------
Stats:
length: 0
-------------
Single character sequence:
>>> Sequence('G')
Sequence
-------------
Stats:
length: 1
-------------
0 G
Multicharacter sequence:
>>> Sequence('ACGT')
Sequence
-------------
Stats:
length: 4
-------------
0 ACGT
Full single line:
>>> Sequence('A' * 60)
Sequence
-------------------------------------------------------------------
Stats:
length: 60
-------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
Full single line with 1 character overflow:
>>> Sequence('A' * 61)
Sequence
--------------------------------------------------------------------
Stats:
length: 61
--------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 A
Two full lines:
>>> Sequence('T' * 120)
Sequence
--------------------------------------------------------------------
Stats:
length: 120
--------------------------------------------------------------------
0 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
60 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
Two full lines with 1 character overflow:
>>> Sequence('T' * 121)
Sequence
---------------------------------------------------------------------
Stats:
length: 121
---------------------------------------------------------------------
0 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
60 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
120 T
Five full lines (maximum amount of information):
>>> Sequence('A' * 300)
Sequence
---------------------------------------------------------------------
Stats:
length: 300
---------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
120 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
180 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
Six lines starts "summarized" output:
>>> Sequence('A' * 301)
Sequence
---------------------------------------------------------------------
Stats:
length: 301
---------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
300 A
A naive algorithm would assume the width of the first column (noting
position) based on the sequence's length alone. This can be off by one if
the last position (in the last line) has a shorter width than the width
calculated from the sequence's length. This test case ensures that only a
single space is inserted between position 99960 and the first sequence
chunk:
>>> Sequence('A' * 100000)
Sequence
-----------------------------------------------------------------------
Stats:
length: 100000
-----------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
The largest sequence that can be displayed using six chunks per line:
>>> Sequence('A' * 100020)
Sequence
-----------------------------------------------------------------------
Stats:
length: 100020
-----------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
A single character longer than the previous sequence causes the optimal
number of chunks per line to be 5:
>>> Sequence('A' * 100021)
Sequence
-------------------------------------------------------------
Stats:
length: 100021
-------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
50 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99950 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
100000 AAAAAAAAAA AAAAAAAAAA A
Wide range of characters (locale-independent):
>>> import string
>>> Sequence((string.ascii_letters + string.punctuation + string.digits +
... 'a space') * 567)
Sequence
-----------------------------------------------------------------------
Stats:
length: 57267
-----------------------------------------------------------------------
0 abcdefghij klmnopqrst uvwxyzABCD EFGHIJKLMN OPQRSTUVWX YZ!"#$%&'(
60 )*+,-./:;< =>?@[\]^_` {|}~012345 6789a spac eabcdefghi jklmnopqrs
...
57180 opqrstuvwx yzABCDEFGH IJKLMNOPQR STUVWXYZ!" #$%&'()*+, -./:;<=>?@
57240 [\]^_`{|}~ 0123456789 a space
Supply horrendous metadata and positional metadata to exercise a variety of
metadata formatting cases and rules. Sorting should be by type, then by
value within each type (Python 3 doesn't allow sorting of mixed types):
>>> metadata = {
... # str key, str value
... 'abc': 'some description',
... # int value
... 'foo': 42,
... # unsupported type (dict) value
... 'bar': {},
... # int key, wrapped str (single line)
... 42: 'some words to test text wrapping and such... yada yada yada '
... 'yada yada yada yada yada.',
... # bool key, wrapped str (multi-line)
... True: 'abc ' * 34,
... # float key, truncated str (too long)
... 42.5: 'abc ' * 200,
... # unsupported type (tuple) key, unsupported type (list) value
... ('foo', 'bar'): [1, 2, 3],
... # bytes key, single long word that wraps
... b'long word': 'abc' * 30,
... # truncated key (too long), None value
... 'too long of a key name to display in repr': None,
... # wrapped bytes value (has b'' prefix)
... 'bytes wrapped value': b'abcd' * 25,
... # float value
... 0.1: 99.9999,
... # bool value
... 43: False,
... # None key, complex value
... None: complex(-1.0, 0.0),
... # nested quotes
... 10: '"\''
... }
>>> positional_metadata = pd.DataFrame.from_items([
... # str key, int list value
... ('foo', [1, 2, 3, 4]),
... # float key, float list value
... (42.5, [2.5, 3.0, 4.2, -0.00001]),
... # int key, object list value
... (42, [[], 4, 5, {}]),
... # truncated key (too long), bool list value
... ('abc' * 90, [True, False, False, True]),
... # None key
... (None, range(4))])
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
-----------------------------------------------------------------------
Metadata:
None: (-1+0j)
True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc '
b'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
bcabcabcabcabcabcabcabcabcabcabcabcabc'
0.1: 99.9999
42.5: <class 'str'>
10: '"\''
42: 'some words to test text wrapping and such... yada yada yada
yada yada yada yada yada.'
43: False
'abc': 'some description'
'bar': <class 'dict'>
'bytes wrapped value': b'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab
cdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
abcdabcdabcdabcd'
'foo': 42
<class 'str'>: None
<class 'tuple'>: <class 'list'>
Positional metadata:
'foo': <dtype: int64>
42.5: <dtype: float64>
42: <dtype: object>
<class 'str'>: <dtype: bool>
None: <dtype: int64>
Stats:
length: 4
-----------------------------------------------------------------------
0 ACGT
"""
pass
if __name__ == "__main__":
main()
|
colinbrislawn/scikit-bio
|
skbio/sequence/tests/test_sequence.py
|
Python
|
bsd-3-clause
| 113,531
| 0.000352
|
# Copyright (C) 2011 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Routines to export the document."""
from __future__ import division
import os.path
import random
import math
import codecs
import re
from ..compat import crange
from .. import qtall as qt4
from .. import utils
try:
from . import emf_export
hasemf = True
except ImportError:
hasemf = False
from . import svg_export
from . import selftest_export
from . import painthelper
# 1m in inch
m_inch = 39.370079
def _(text, disambiguation=None, context="Export"):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
def scalePDFMediaBox(text, pagewidth, reqdsizes):
"""Take the PDF file text and adjust the page size.
pagewidth: full page width
reqdsizes: list of tuples of width, height
"""
outtext = b''
outidx = 0
for size, match in zip(
reqdsizes,
re.finditer(
br'^/MediaBox \[([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\]$',
text, re.MULTILINE)):
box = [float(x) for x in match.groups()]
widthfactor = (box[2]-box[0])/pagewidth
newbox = ('/MediaBox [%i %i %i %i]' % (
int(box[0]),
int(math.floor(box[3]-widthfactor*size[1])),
int(math.ceil(box[0]+widthfactor*size[0])),
int(math.ceil(box[3]))
)).encode('ascii')
outtext += text[outidx:match.start()] + newbox
outidx = match.end()
outtext += text[outidx:]
return outtext
def fixupPDFIndices(text):
"""Fixup index table in PDF.
Basically, we need to find the start of each obj in the file
These indices are then placed in an xref table at the end
The index to the xref table is placed after a startxref
"""
# find occurences of obj in string
indices = {}
for m in re.finditer(b'([0-9]+) 0 obj', text):
index = int(m.group(1))
indices[index] = m.start(0)
# build up xref block (note trailing spaces)
xref = [b'xref', ('0 %i' % (len(indices)+1)).encode('ascii'),
b'0000000000 65535 f ']
for i in crange(len(indices)):
xref.append( ('%010i %05i n ' % (indices[i+1], 0)).encode('ascii') )
xref.append(b'trailer\n')
xref = b'\n'.join(xref)
# replace current xref with this one
xref_match = re.search(b'^xref\n.*trailer\n', text, re.DOTALL | re.MULTILINE)
xref_index = xref_match.start(0)
text = text[:xref_index] + xref + text[xref_match.end(0):]
# put the correct index to the xref after startxref
startxref_re = re.compile(b'^startxref\n[0-9]+\n', re.DOTALL | re.MULTILINE)
text = startxref_re.sub( ('startxref\n%i\n' % xref_index).encode('ascii'),
text)
return text
def fixupPSBoundingBox(infname, outfname, pagewidth, size):
"""Make bounding box for EPS/PS match size given."""
with open(infname, 'rU') as fin:
with open(outfname, 'w') as fout:
for line in fin:
if line[:14] == '%%BoundingBox:':
# replace bounding box line by calculated one
parts = line.split()
widthfactor = float(parts[3]) / pagewidth
origheight = float(parts[4])
line = "%s %i %i %i %i\n" % (
parts[0], 0,
int(math.floor(origheight-widthfactor*size[1])),
int(math.ceil(widthfactor*size[0])),
int(math.ceil(origheight)) )
fout.write(line)
class Export(object):
"""Class to do the document exporting.
This is split from document to make that class cleaner.
"""
formats = [
(["bmp"], _("Windows bitmap")),
(["eps"], _("Encapsulated Postscript")),
(["ps"], _("Postscript")),
(["jpg"], _("Jpeg bitmap")),
(["pdf"], _("Portable Document Format")),
#(["pic"], _("QT Pic format")),
(["png"], _("Portable Network Graphics")),
(["svg"], _("Scalable Vector Graphics")),
(["tiff"], _("Tagged Image File Format bitmap")),
(["xpm"], _("X Pixmap")),
]
if hasemf:
formats.append( (["emf"], _("Windows Enhanced Metafile")) )
formats.sort()
def __init__(self, doc, filename, pagenumber, color=True, bitmapdpi=100,
antialias=True, quality=85, backcolor='#ffffff00',
pdfdpi=150, svgtextastext=False):
"""Initialise export class. Parameters are:
doc: document to write
filename: output filename
pagenumber: pagenumber to export or list of pages for some formats
color: use color or try to use monochrome
bitmapdpi: assume this dpi value when writing images
antialias: antialias text and lines when writing bitmaps
quality: compression factor for bitmaps
backcolor: background color default for bitmaps (default transparent).
pdfdpi: dpi for pdf and eps files
svgtextastext: write text in SVG as text, rather than curves
"""
self.doc = doc
self.filename = filename
self.pagenumber = pagenumber
self.color = color
self.bitmapdpi = bitmapdpi
self.antialias = antialias
self.quality = quality
self.backcolor = backcolor
self.pdfdpi = pdfdpi
self.svgtextastext = svgtextastext
def export(self):
"""Export the figure to the filename."""
ext = os.path.splitext(self.filename)[1].lower()
if ext in ('.eps', '.ps', '.pdf'):
self.exportPDFOrPS(ext)
elif ext in ('.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.xpm'):
self.exportBitmap(ext)
elif ext == '.svg':
self.exportSVG()
elif ext == '.selftest':
self.exportSelfTest()
elif ext == '.pic':
self.exportPIC()
elif ext == '.emf' and hasemf:
self.exportEMF()
else:
raise RuntimeError("File type '%s' not supported" % ext)
def renderPage(self, page, size, dpi, painter):
"""Render page using paint helper to painter.
This first renders to the helper, then to the painter
"""
helper = painthelper.PaintHelper(size, dpi=dpi, directpaint=painter)
painter.setClipRect( qt4.QRectF(
qt4.QPointF(0,0), qt4.QPointF(*size)) )
painter.save()
self.doc.paintTo(helper, page)
painter.restore()
painter.end()
def getSinglePage(self):
"""Check single number of pages or throw exception,
else return page number."""
try:
if len(self.pagenumber) != 1:
raise RuntimeError(
'Can only export a single page in this format')
return self.pagenumber[0]
except TypeError:
return self.pagenumber
def exportBitmap(self, ext):
"""Export to a bitmap format."""
format = ext[1:] # setFormat() doesn't want the leading '.'
if format == 'jpeg':
format = 'jpg'
page = self.getSinglePage()
# get size for bitmap's dpi
dpi = self.bitmapdpi
size = self.doc.pageSize(page, dpi=(dpi,dpi))
# create real output image
backqcolor = utils.extendedColorToQColor(self.backcolor)
if format == 'png':
# transparent output
image = qt4.QImage(size[0], size[1],
qt4.QImage.Format_ARGB32_Premultiplied)
else:
# non transparent output
image = qt4.QImage(size[0], size[1],
qt4.QImage.Format_RGB32)
backqcolor.setAlpha(255)
image.setDotsPerMeterX(dpi*m_inch)
image.setDotsPerMeterY(dpi*m_inch)
if backqcolor.alpha() == 0:
image.fill(qt4.qRgba(0,0,0,0))
else:
image.fill(backqcolor.rgb())
# paint to the image
painter = painthelper.DirectPainter(image)
painter.setRenderHint(qt4.QPainter.Antialiasing, self.antialias)
painter.setRenderHint(qt4.QPainter.TextAntialiasing, self.antialias)
self.renderPage(page, size, (dpi,dpi), painter)
# write image to disk
writer = qt4.QImageWriter()
writer.setFormat(qt4.QByteArray(format))
writer.setFileName(self.filename)
# enable LZW compression for TIFFs
writer.setCompression(1)
try:
# try to enable optimal JPEG compression using new
# options added in Qt 5.5
writer.setOptimizedWrite(True)
writer.setProgressiveScanWrite(True)
except AttributeError:
pass
if format == 'png':
# min quality for png as it makes no difference to output
# and makes file size smaller
writer.setQuality(0)
else:
writer.setQuality(self.quality)
writer.write(image)
def exportPDFOrPS(self, ext):
"""Export to EPS or PDF format."""
# setup printer with requested parameters
printer = qt4.QPrinter()
printer.setResolution(self.pdfdpi)
printer.setFullPage(True)
printer.setColorMode(
qt4.QPrinter.Color if self.color else qt4.QPrinter.GrayScale)
printer.setOutputFormat(
qt4.QPrinter.PdfFormat if ext=='.pdf' else
qt4.QPrinter.PostScriptFormat)
printer.setOutputFileName(self.filename)
printer.setCreator('Veusz %s' % utils.version())
# convert page to list if necessary
try:
pages = list(self.pagenumber)
except TypeError:
pages = [self.pagenumber]
if len(pages) != 1 and ext == '.eps':
raise RuntimeError(
'Only single pages allowed for .eps. Use .ps instead.')
# render ranges and return size of each page
sizes = self.doc.printTo(printer, pages)
# We have to modify the page sizes or bounding boxes to match
# the document. This is copied to a temporary file.
tmpfile = "%s.tmp.%i" % (self.filename, random.randint(0,1000000))
if ext == '.eps' or ext == '.ps':
# only 1 size allowed for PS, so use maximum
maxsize = sizes[0]
for size in sizes[1:]:
maxsize = max(size[0], maxsize[0]), max(size[1], maxsize[1])
fixupPSBoundingBox(self.filename, tmpfile, printer.width(), maxsize)
elif ext == '.pdf':
# change pdf bounding box and correct pdf index
with open(self.filename, 'rb') as fin:
text = fin.read()
text = scalePDFMediaBox(text, printer.width(), sizes)
text = fixupPDFIndices(text)
with open(tmpfile, 'wb') as fout:
fout.write(text)
else:
raise RuntimeError('Invalid file type')
# replace original by temporary
os.remove(self.filename)
os.rename(tmpfile, self.filename)
def exportSVG(self):
"""Export document as SVG"""
page = self.getSinglePage()
dpi = svg_export.dpi * 1.
size = self.doc.pageSize(
page, dpi=(dpi,dpi), integer=False)
with codecs.open(self.filename, 'w', 'utf-8') as f:
paintdev = svg_export.SVGPaintDevice(
f, size[0]/dpi, size[1]/dpi, writetextastext=self.svgtextastext)
painter = painthelper.DirectPainter(paintdev)
self.renderPage(page, size, (dpi,dpi), painter)
def exportSelfTest(self):
"""Export document for testing"""
page = self.getSinglePage()
dpi = svg_export.dpi * 1.
size = width, height = self.doc.pageSize(
page, dpi=(dpi,dpi), integer=False)
f = open(self.filename, 'w')
paintdev = selftest_export.SelfTestPaintDevice(f, width/dpi, height/dpi)
painter = painthelper.DirectPainter(paintdev)
self.renderPage(page, size, (dpi,dpi), painter)
f.close()
def exportPIC(self):
"""Export document as Qt PIC"""
page = self.getSinglePage()
pic = qt4.QPicture()
painter = painthelper.DirectPainter(pic)
dpi = (pic.logicalDpiX(), pic.logicalDpiY())
size = self.doc.pageSize(page, dpi=dpi)
self.renderPage(page, size, dpi, painter)
pic.save(self.filename)
def exportEMF(self):
"""Export document as EMF."""
page = self.getSinglePage()
dpi = 90.
size = self.doc.pageSize(page, dpi=(dpi,dpi), integer=False)
paintdev = emf_export.EMFPaintDevice(size[0]/dpi, size[1]/dpi, dpi=dpi)
painter = painthelper.DirectPainter(paintdev)
self.renderPage(page, size, (dpi,dpi), painter)
paintdev.paintEngine().saveFile(self.filename)
def printDialog(parentwindow, document, filename=None):
"""Open a print dialog and print document."""
if document.getNumberPages() == 0:
qt4.QMessageBox.warning(parentwindow, _("Error - Veusz"),
_("No pages to print"))
return
prnt = qt4.QPrinter(qt4.QPrinter.HighResolution)
prnt.setColorMode(qt4.QPrinter.Color)
prnt.setCreator(_('Veusz %s') % utils.version())
if filename:
prnt.setDocName(filename)
dialog = qt4.QPrintDialog(prnt, parentwindow)
dialog.setMinMax(1, document.getNumberPages())
if dialog.exec_():
# get page range
if dialog.printRange() == qt4.QAbstractPrintDialog.PageRange:
# page range
minval, maxval = dialog.fromPage(), dialog.toPage()
else:
# all pages
minval, maxval = 1, document.getNumberPages()
# pages are relative to zero
minval -= 1
maxval -= 1
# reverse or forward order
if prnt.pageOrder() == qt4.QPrinter.FirstPageFirst:
pages = list(crange(minval, maxval+1))
else:
pages = list(crange(maxval, minval-1, -1))
# if more copies are requested
pages *= prnt.numCopies()
# do the printing
document.printTo(prnt, pages)
|
KDB2/veusz
|
veusz/document/export.py
|
Python
|
gpl-2.0
| 15,098
| 0.002384
|
"""This module provides REST services for Layers"""
import cherrypy
from LmCommon.common.lmconstants import HTTPStatus
from LmWebServer.common.lmconstants import HTTPMethod
from LmWebServer.services.api.v2.base import LmService
from LmWebServer.services.common.access_control import check_user_permission
from LmWebServer.services.cp_tools.lm_format import lm_formatter
# .............................................................................
@cherrypy.expose
@cherrypy.popargs('path_layer_id')
class LayerService(LmService):
"""Class for layers service.
"""
# ................................
@lm_formatter
def GET(self, path_layer_id=None, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, epsg_code=None, env_code=None,
env_type_id=None, gcm_code=None, layerType=None, limit=100,
offset=0, url_user=None, scenario_id=None, squid=None, **params):
"""GET request. Individual layer, count, or list.
"""
# Layer type:
# 0 - Anything
# 1 - Environmental layer
# 2 - ? (Not implemented yet)
if layerType is None or layerType == 0:
if path_layer_id is None:
return self._list_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, limit=limit,
offset=offset, squid=squid)
if path_layer_id.lower() == 'count':
return self._count_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, squid=squid)
return self._get_layer(path_layer_id, env_layer=False)
if path_layer_id is None:
return self._list_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, limit=limit, offset=offset,
scenario_id=scenario_id)
if path_layer_id.lower() == 'count':
return self._count_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, scenario_code=scenario_id)
return self._get_layer(path_layer_id, env_layer=True)
# ................................
def _count_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
scenario_code=None):
"""Count environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: Return layers modified after this time (Modified Julian
Day)
alt_pred_code: Return layers with this alternate prediction code
before_time: Return layers modified before this time (Modified
Julian Day)
date_code: Return layers with this date code
env_code: Return layers with this environment code
env_type_id: Return layers with this environmental type
epsg_code: Return layers with this EPSG code
gcm_code: Return layers with this GCM code
scenario_id: Return layers from this scenario
"""
layer_count = self.scribe.count_env_layers(
user_id=user_id, env_code=env_code, gcm_code=gcm_code,
alt_pred_code=alt_pred_code, date_code=date_code,
after_time=after_time, before_time=before_time, epsg=epsg_code,
env_type_id=env_type_id, scenario_code=scenario_code)
return {'count': layer_count}
# ................................
def _count_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, squid=None):
"""Return a count of layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifier
"""
layer_count = self.scribe.count_layers(
user_id=user_id, squid=squid, after_time=after_time,
before_time=before_time, epsg=epsg_code)
return {'count': layer_count}
# ................................
def _get_layer(self, path_layer_id, env_layer=False):
"""Attempt to get a layer
"""
try:
_ = int(path_layer_id)
except ValueError:
raise cherrypy.HTTPError(
HTTPStatus.BAD_REQUEST,
'{} is not a valid layer ID'.format(path_layer_id))
if env_layer:
lyr = self.scribe.get_env_layer(lyr_id=path_layer_id)
else:
lyr = self.scribe.get_layer(lyr_id=path_layer_id)
if lyr is None:
raise cherrypy.HTTPError(
HTTPStatus.NOT_FOUND,
'Environmental layer {} was not found'.format(path_layer_id))
if check_user_permission(self.get_user_id(), lyr, HTTPMethod.GET):
return lyr
raise cherrypy.HTTPError(
HTTPStatus.FORBIDDEN,
'User {} does not have permission to access layer {}'.format(
self.get_user_id(), path_layer_id))
# ................................
def _list_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
limit=100, offset=0, scenario_id=None):
"""List environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: (optional) Return layers modified after this time
(Modified Julian Day)
alt_pred_code: (optional) Return layers with this alternate
prediction code
before_time: (optional) Return layers modified before this time
(Modified Julian Day)
date_code: (optional) Return layers with this date code
env_code: (optional) Return layers with this environment code
env_type_id: (optional) Return layers with this environmental type
epsg_code: (optional) Return layers with this EPSG code
gcm_code: (optional) Return layers with this GCM code
limit: (optional) Return this number of layers, at most
offset: (optional) Offset the returned layers by this number
scenario_id: (optional) Return layers from this scenario
"""
lyr_atoms = self.scribe.list_env_layers(
offset, limit, user_id=user_id, env_code=env_code,
gcm_code=gcm_code, alt_pred_code=alt_pred_code,
date_code=date_code, after_time=after_time,
before_time=before_time, epsg=epsg_code, env_type_id=env_type_id)
return lyr_atoms
# ................................
def _list_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, limit=100, offset=0, squid=None):
"""Return a list of layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifier
"""
layer_atoms = self.scribe.list_layers(
offset, limit, user_id=user_id, squid=squid, after_time=after_time,
before_time=before_time, epsg=epsg_code)
return layer_atoms
|
lifemapper/core
|
LmWebServer/services/api/v2/layer.py
|
Python
|
gpl-3.0
| 9,222
| 0
|
from unittest import skipIf
from django.conf import settings
def skipIfDefaultUser(test_func):
"""
Skip a test if a default user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL == "auth.User", "Default user model in use")(
test_func
)
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != "auth.User", "Custom user model in use")(
test_func
)
|
yunojuno/django-request-profiler
|
tests/utils.py
|
Python
|
mit
| 491
| 0.004073
|
""" Tests for OAuth Dispatch python API module. """
import unittest
from django.conf import settings
from django.http import HttpRequest
from django.test import TestCase
from oauth2_provider.models import AccessToken
from common.djangoapps.student.tests.factories import UserFactory
OAUTH_PROVIDER_ENABLED = settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER')
if OAUTH_PROVIDER_ENABLED:
from openedx.core.djangoapps.oauth_dispatch import api
from openedx.core.djangoapps.oauth_dispatch.adapters import DOTAdapter
from openedx.core.djangoapps.oauth_dispatch.tests.constants import DUMMY_REDIRECT_URL
EXPECTED_DEFAULT_EXPIRES_IN = 36000
@unittest.skipUnless(OAUTH_PROVIDER_ENABLED, 'OAuth2 not enabled')
class TestOAuthDispatchAPI(TestCase):
""" Tests for oauth_dispatch's api.py module. """
def setUp(self):
super().setUp()
self.adapter = DOTAdapter()
self.user = UserFactory()
self.client = self.adapter.create_public_client(
name='public app',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='public-client-id',
)
def _assert_stored_token(self, stored_token_value, expected_token_user, expected_client):
stored_access_token = AccessToken.objects.get(token=stored_token_value)
assert stored_access_token.user.id == expected_token_user.id
assert stored_access_token.application.client_id == expected_client.client_id
assert stored_access_token.application.user.id == expected_client.user.id
def test_create_token_success(self):
token = api.create_dot_access_token(HttpRequest(), self.user, self.client)
assert token['access_token']
assert token['refresh_token']
self.assertDictContainsSubset(
{
'token_type': 'Bearer',
'expires_in': EXPECTED_DEFAULT_EXPIRES_IN,
'scope': '',
},
token,
)
self._assert_stored_token(token['access_token'], self.user, self.client)
def test_create_token_another_user(self):
another_user = UserFactory()
token = api.create_dot_access_token(HttpRequest(), another_user, self.client)
self._assert_stored_token(token['access_token'], another_user, self.client)
def test_create_token_overrides(self):
expires_in = 4800
token = api.create_dot_access_token(
HttpRequest(), self.user, self.client, expires_in=expires_in, scopes=['profile'],
)
self.assertDictContainsSubset({'scope': 'profile'}, token)
self.assertDictContainsSubset({'expires_in': expires_in}, token)
|
eduNEXT/edx-platform
|
openedx/core/djangoapps/oauth_dispatch/tests/test_api.py
|
Python
|
agpl-3.0
| 2,669
| 0.003372
|
# encoding: utf8
from __future__ import unicode_literals
from ..symbols import *
TAG_MAP = {
# Explanation of Unidic tags:
# https://www.gavo.t.u-tokyo.ac.jp/~mine/japanese/nlp+slp/UNIDIC_manual.pdf
# Universal Dependencies Mapping:
# http://universaldependencies.org/ja/overview/morphology.html
# http://universaldependencies.org/ja/pos/all.html
"記号,一般,*,*":{POS: PUNCT}, # this includes characters used to represent sounds like ドレミ
"記号,文字,*,*":{POS: PUNCT}, # this is for Greek and Latin characters used as sumbols, as in math
"感動詞,フィラー,*,*": {POS: INTJ},
"感動詞,一般,*,*": {POS: INTJ},
# this is specifically for unicode full-width space
"空白,*,*,*": {POS: X},
"形状詞,一般,*,*":{POS: ADJ},
"形状詞,タリ,*,*":{POS: ADJ},
"形状詞,助動詞語幹,*,*":{POS: ADJ},
"形容詞,一般,*,*":{POS: ADJ},
"形容詞,非自立可能,*,*":{POS: AUX}, # XXX ADJ if alone, AUX otherwise
"助詞,格助詞,*,*":{POS: ADP},
"助詞,係助詞,*,*":{POS: ADP},
"助詞,終助詞,*,*":{POS: PART},
"助詞,準体助詞,*,*":{POS: SCONJ}, # の as in 走るのが速い
"助詞,接続助詞,*,*":{POS: SCONJ}, # verb ending て
"助詞,副助詞,*,*":{POS: PART}, # ばかり, つつ after a verb
"助動詞,*,*,*":{POS: AUX},
"接続詞,*,*,*":{POS: SCONJ}, # XXX: might need refinement
"接頭辞,*,*,*":{POS: NOUN},
"接尾辞,形状詞的,*,*":{POS: ADJ}, # がち, チック
"接尾辞,形容詞的,*,*":{POS: ADJ}, # -らしい
"接尾辞,動詞的,*,*":{POS: NOUN}, # -じみ
"接尾辞,名詞的,サ変可能,*":{POS: NOUN}, # XXX see 名詞,普通名詞,サ変可能,*
"接尾辞,名詞的,一般,*":{POS: NOUN},
"接尾辞,名詞的,助数詞,*":{POS: NOUN},
"接尾辞,名詞的,副詞可能,*":{POS: NOUN}, # -後, -過ぎ
"代名詞,*,*,*":{POS: PRON},
"動詞,一般,*,*":{POS: VERB},
"動詞,非自立可能,*,*":{POS: VERB}, # XXX VERB if alone, AUX otherwise
"動詞,非自立可能,*,*,AUX":{POS: AUX},
"動詞,非自立可能,*,*,VERB":{POS: VERB},
"副詞,*,*,*":{POS: ADV},
"補助記号,AA,一般,*":{POS: SYM}, # text art
"補助記号,AA,顔文字,*":{POS: SYM}, # kaomoji
"補助記号,一般,*,*":{POS: SYM},
"補助記号,括弧開,*,*":{POS: PUNCT}, # open bracket
"補助記号,括弧閉,*,*":{POS: PUNCT}, # close bracket
"補助記号,句点,*,*":{POS: PUNCT}, # period or other EOS marker
"補助記号,読点,*,*":{POS: PUNCT}, # comma
"名詞,固有名詞,一般,*":{POS: PROPN}, # general proper noun
"名詞,固有名詞,人名,一般":{POS: PROPN}, # person's name
"名詞,固有名詞,人名,姓":{POS: PROPN}, # surname
"名詞,固有名詞,人名,名":{POS: PROPN}, # first name
"名詞,固有名詞,地名,一般":{POS: PROPN}, # place name
"名詞,固有名詞,地名,国":{POS: PROPN}, # country name
"名詞,助動詞語幹,*,*":{POS: AUX},
"名詞,数詞,*,*":{POS: NUM}, # includes Chinese numerals
"名詞,普通名詞,サ変可能,*":{POS: NOUN}, # XXX: sometimes VERB in UDv2; suru-verb noun
"名詞,普通名詞,サ変可能,*,NOUN":{POS: NOUN},
"名詞,普通名詞,サ変可能,*,VERB":{POS: VERB},
"名詞,普通名詞,サ変形状詞可能,*":{POS: NOUN}, # ex: 下手
"名詞,普通名詞,一般,*":{POS: NOUN},
"名詞,普通名詞,形状詞可能,*":{POS: NOUN}, # XXX: sometimes ADJ in UDv2
"名詞,普通名詞,形状詞可能,*,NOUN":{POS: NOUN},
"名詞,普通名詞,形状詞可能,*,ADJ":{POS: ADJ},
"名詞,普通名詞,助数詞可能,*":{POS: NOUN}, # counter / unit
"名詞,普通名詞,副詞可能,*":{POS: NOUN},
"連体詞,*,*,*":{POS: ADJ}, # XXX this has exceptions based on literal token
"連体詞,*,*,*,ADJ":{POS: ADJ},
"連体詞,*,*,*,PRON":{POS: PRON},
"連体詞,*,*,*,DET":{POS: DET},
}
|
raphael0202/spaCy
|
spacy/ja/tag_map.py
|
Python
|
mit
| 4,024
| 0.03466
|
from unittest import TestCase
from pyrake.contrib.spidermiddleware.urllength import UrlLengthMiddleware
from pyrake.http import Response, Request
from pyrake.spider import Spider
class TestUrlLengthMiddleware(TestCase):
def test_process_spider_output(self):
res = Response('http://pyraketest.org')
short_url_req = Request('http://pyraketest.org/')
long_url_req = Request('http://pyraketest.org/this_is_a_long_url')
reqs = [short_url_req, long_url_req]
mw = UrlLengthMiddleware(maxlength=25)
spider = Spider('foo')
out = list(mw.process_spider_output(res, reqs, spider))
self.assertEquals(out, [short_url_req])
|
elkingtowa/pyrake
|
tests/test_spidermiddleware_urllength.py
|
Python
|
mit
| 685
| 0.00146
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Priority Group Model."""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import linkable
class PriorityGroup(linkable.Linkable):
"""The PriorityGroup model.
"""
#: the priority of this group, 0 being lower than 1
priority = db.IntegerProperty(required=False, default=0)
#: the human readable name of this priority gropu
name = db.StringProperty(required=False)
|
jamslevy/gsoc
|
app/soc/models/priority_group.py
|
Python
|
apache-2.0
| 1,130
| 0.004425
|
# -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
try:
from unittest.mock import MagicMock
except ImportError:
from mock import Mock as MagicMock
MOCK_MODULES = ['numpy', 'numpy.polynomial', 'numpy.polynomial.polynomial',
'h5py', 'pandas', 'opencg']
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
import numpy as np
np.polynomial.Polynomial = MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_numfig',
'notebook_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenMC'
copyright = u'2011-2016, Massachusetts Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.8"
# The full version, including alpha/beta/rc tags.
release = "0.8.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', u'OpenMC Documentation',
u'Massachusetts Institute of Technology', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)
}
|
samuelshaner/openmc
|
docs/source/conf.py
|
Python
|
mit
| 7,909
| 0.006448
|
import json
import shakedown
import dcos
from dcos import marathon
from enum import Enum
from tests.command import (
cassandra_api_url,
spin,
WAIT_TIME_IN_SECONDS
)
class PlanState(Enum):
ERROR = "ERROR"
WAITING = "WAITING"
PENDING = "PENDING"
IN_PROGRESS = "IN_PROGRESS"
COMPLETE = "COMPLETE"
def filter_phase(plan, phase_name):
for phase in plan['phases']:
if phase['name'] == phase_name:
return phase
return None
def get_phase_index(plan, phase_name):
idx = 0
for phase in plan['phases']:
if phase['name'] == phase_name:
return idx
else:
idx += 1
return -1
counter = 0
def get_and_verify_plan(predicate=lambda r: True, wait_time=WAIT_TIME_IN_SECONDS):
global counter
plan_url = cassandra_api_url('plan')
def fn():
try:
return dcos.http.get(plan_url)
except dcos.errors.DCOSHTTPException as err:
return err.response
def success_predicate(result):
global counter
message = 'Request to {} failed'.format(plan_url)
try:
body = result.json()
except ValueError:
return False, message
if counter < 3:
counter += 1
pred_res = predicate(body)
if pred_res:
counter = 0
return pred_res, message
return spin(fn, success_predicate, wait_time=wait_time).json()
def get_marathon_uri():
"""Gets URL to the Marathon instance"""
return '{}/marathon'.format(shakedown.dcos_url())
def get_marathon_client():
"""Gets a marathon client"""
return marathon.Client(get_marathon_uri())
def strip_meta(app):
app.pop('fetch')
app.pop('version')
app.pop('versionInfo')
return app
|
adobe-mds/dcos-cassandra-service
|
integration/tests/infinity_commons.py
|
Python
|
apache-2.0
| 1,791
| 0.002233
|
# Author: Martin Oehler <oehler@knopper.net> 2013
# License: GPL V2
from django.forms import ModelForm
from django.forms import Form
from django.forms import ModelChoiceField
from django.forms.widgets import RadioSelect
from django.forms.widgets import CheckboxSelectMultiple
from django.forms.widgets import TextInput
from django.forms.widgets import Textarea
from django.forms.widgets import DateInput
from django.contrib.admin import widgets
from linboweb.linboserver.models import partition
from linboweb.linboserver.models import partitionSelection
from linboweb.linboserver.models import os
from linboweb.linboserver.models import vm
from linboweb.linboserver.models import client
from linboweb.linboserver.models import clientGroup
from linboweb.linboserver.models import pxelinuxcfg
class partitionForm(ModelForm):
class Meta:
model = partition
class partitionSelectionForm(ModelForm):
class Meta:
model = partitionSelection
class osForm(ModelForm):
partitionselection = ModelChoiceField(queryset=partitionSelection.objects.all())
class Meta:
model = os
class vmForm(ModelForm):
class Meta:
model = vm
class clientForm(ModelForm):
pxelinuxconfiguration = ModelChoiceField(queryset=pxelinuxcfg.objects.all())
class Meta:
model = client
class clientGroupForm(ModelForm):
class Meta:
model = clientGroup
class pxelinuxcfgForm(ModelForm):
class Meta:
model = pxelinuxcfg
widgets = {
'configuration': Textarea(attrs={'cols': 80, 'rows': 40}),
}
|
MartinOehler/LINBO-ServerGUI
|
linboweb/linboserver/forms.py
|
Python
|
gpl-2.0
| 1,587
| 0.006931
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.