repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
endlessm/chromium-browser
third_party/llvm/lldb/test/API/commands/command/script/import/rdar-12586188/fail212586188.py
Python
bsd-3-clause
77
0.012987
def f(x): return x + 1 raise ValueErr
or("I do not want to be imported")
hrashk/sympy
sympy/printing/codeprinter.py
Python
bsd-3-clause
9,879
0.001417
from __future__ import print_function, division from sympy.core import C, Add, Mul, Pow, S from sympy.core.compatibility import default_sort_key from sympy.core.mul import _keep_coeff from sympy.printing.str import StrPrinter from sympy.printing.precedence import precedence class AssignmentError(Exception): """ Raised if an assignment variable for a loop is missing. """ pass class CodePrinter(StrPrinter): """ The base class for code-printing subclasses. """ _operators = { 'and': '&&', 'or': '||', 'not': '!', } def _doprint_a_piece(self, expr, assign_to=None): # Here we print an expression that may contain Indexed objects, they # correspond to arrays in the generated code. The low-level implementation # involves looping over array elements and possibly storing results in temporary # variables or accumulate it in the assign_to object. lhs_printed = self._print(assign_to) lines = [] # Setup loops over non-dummy indices -- all terms need these indices = self.get_expression_indices(expr, assign_to) openloop, closeloop = self._get_loop_opening_ending(indices) # Setup loops over dummy indices -- each term needs separate treatment from sympy.tensor import get_contraction_structure d = get_contraction_structure(expr) # terms with no summations first if None in d: text = CodePrinter.doprint(self, Add(*d[None])) else: # If all terms have summations we must initialize array to Zero text = CodePrinter.doprint(self, 0) # skip redundant assignments if text != lhs_printed: lines.extend(openloop) if assign_to is not None: text = self._get_statement("%s = %s" % (lhs_printed, text)) lines.append(text) lines.extend(closeloop) for dummies in d: # then terms with summations if isinstance(dummies, tuple): indices = self._sort_optimized(dummies, expr) openloop_d, closeloop_d = self._get_loop_opening_ending( indices) for term in d[dummies]: if term in d and not ([list(f.keys()) for f in d[term]] == [[None] for f in d[term]]): # If one factor in the term has it's own internal # contractions, those must be computed first. # (temporary variables?) raise NotImplementedError( "FIXME: no support for contractions in factor yet") else: # We need the lhs expression as an accumulator for # the loops, i.e # # for (int d=0; d < dim; d++){ # lhs[] = lhs[] + term[][d] # } ^.................. the accumulator # # We check if the expression already contains the # lhs, and raise an exception if it does, as that # syntax is currently undefined. FIXME: What would be # a good interpretation? if assign_to is None: raise AssignmentError( "need assignment variable for loops") if term.has(assign_to): raise ValueError("FIXME: lhs present in rhs,\ this is undefined in CCodePrinter") lines.extend(openloop) lines.extend(openloop_d) text = "%s = %s" % (lhs_printed, CodePrinter.doprint( self, assign_to + term)) lines.append(self._get_statement(text)) lines.extend(closeloop_d) lines.extend(closeloop) return lines def get_expression_indices(self, expr, assign_to): from sympy.tensor import get_indices, get_contraction_structure rinds, junk = get_indices(expr) linds, junk = get_indices(assign_to) # support broadcast of scalar if linds and not rinds: rinds = linds if rinds != linds: raise ValueError("lhs indices must match non-dummy" " rhs indices in %s" % expr) return self._sort_optimized(rinds, assign_to) def _sort_optimized(self, indices, expr): if not indices: return [] # determine optimized loop order by giving a score to each index # the index with the highest score are put in the innermost loop. score_table = {} for i in indices: score_table[i] = 0 arrays = expr.atoms(C.Indexed) for arr in arrays: for p, ind in enumerate(arr.indices): try: score_table[ind] += self._rate_index_position(p) except KeyError: pass return sorted(indices, key=lambda x: score_table[x]) def _print_NumberSymbol(self, expr): # A Number symbol that is not implemented here or with _printmethod # is registered and evaluated self._number_symbols.add((expr, self._print(expr.evalf(self._settings["precision"])))) return str(expr) def _print_Dummy(self, expr): # dummies must be printed as unique symbols return "%s_%i" % (expr.name, expr.dummy_index) # Dummy _print_Catalan = _print_NumberSymbol _print_EulerGamma = _print_NumberSymbol _print_GoldenRatio = _print_NumberSymbol def _print_And(self, expr): PREC = precedence(expr) return (" %s " % self._operat
ors['and']).join(self.parenthesize(a, PREC) for a in sorted(expr.args, key=default_sort_key)) def _print_Or(self, expr): PREC = precedence(expr) return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC) for a in sorted(expr.args, key=default_sort_key)) def _print_Xor(self, expr): if self._operators.get('xor') is None: return self._print_not_supported(expr
) PREC = precedence(expr) return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC) for a in expr.args) def _print_Equivalent(self, expr): if self._operators.get('equivalent') is None: return self._print_not_supported(expr) PREC = precedence(expr) return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC) for a in expr.args) def _print_Not(self, expr): PREC = precedence(expr) return self._operators['not'] + self.parenthesize(expr.args[0], PREC) def _print_Mul(self, expr): prec = precedence(expr) c, e = expr.as_coeff_Mul() if c < 0: expr = _keep_coeff(-c, e) sign = "-" else: sign = "" a = [] # items in the numerator b = [] # items that are in the denominator (if any) if self.order not in ('old', 'none'): args = expr.as_ordered_factors() else: # use make_args in case expr was something like -x -> x args = Mul.make_args(expr) # Gather args for numerator/denominator for item in args: if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative: if item.exp != -1: b.append(Pow(item.base, -item.exp, evaluate=False)) else: b.append(Pow(item.base, -item.exp)) else: a.append(item) a = a or [S.One] a_str = [self.parenthesize(x, prec) for x in a] b_str = [self.parenthesize(x, prec) for x in b] if len(b) == 0: return sign + '*'.join(a_str) e
ajdelgados/Sofia
modules/main.py
Python
gpl-3.0
37,233
0.010102
#!/usr/bin/python # -*- coding: windows-1252 -*- import wxversion wxversion.select('2.8') import wx import wx.aui from id import * from model import * from graphic import * from sql import * from django import * import sqlite3 from xml.dom import minidom class MainFrame(wx.aui.AuiMDIParentFrame): def __init__(self, app, posx, posy, sizex, sizey): self.data = {} self.locale = wx.Locale() self.locale.AddCatalogLookupPathPrefix('./locale') if app.config.Read("language"): if app.config.Read("language") != 'English': idioma = app.config.Read("language") else: idioma = '' else: idioma = 'es_ES' app.config.Write("language", idioma) app.config.Flush() self.locale.AddCatalog(idioma) for key, value in language.iteritems(): if value == idioma: self.data["idioma"] = key self.translation = wx.GetTranslation self.app = app #--Iniciar el padre con las posiciones y titulo del Frame--# wx.aui.AuiMDIParentFrame.__init__(self, None, -1, self.translation(archivo[TITULO]), pos = (posx, posy), size = (sizex, sizey)) #--Imbuir el logo del CUC en la caja de control de la ventana--# ico = wx.Icon('images/mini_logo_cuc_trans.ico', wx.BITMAP_TYPE_ICO) self.SetIcon(ico) #--Inicializamos la libreria OGL de wxPython--# ogl.OGLInitialize() #--MENU--# #Menu de Archivo self.menuFile = wx.Menu() self.menuFile.Append(ID_CREAR_MODELO, self.translation(archivo[ID_CREAR_MODELO]), self.translation(archivoHelp[ID_CREAR_MODELO])) self.menuFile.Append(ID_ABRIR_MODELO, self.translation(archivo[ID_ABRIR_MODELO]), self.translation(archivoHelp[ID_ABRIR_MODELO])) self.menuFile.AppendSeparator() self.menuFile.Append(ID_GUARDAR_MODELO, self.translation(archivo[ID_GUARDAR_MODELO]), self.translation(archivoHelp[ID_GUARDAR_MODELO])) self.menuFile.Enable(ID_GUARDAR_MODELO, False) self.menuFile.Append(ID_GUARDAR_COMO_MODELO, self.translation(archivo[ID_GUARDAR_COMO_MODELO]), self.translation(archivoHelp[ID_GUARDAR_COMO_MODELO])) self.menuFile.Enable(ID_GUARDAR_COMO_MODELO, False) self.menuFile.Append(ID_EXPORTAR_MODELO, self.translation(archivo[ID_EXPORTAR_MODELO]), self.translation(archivoHelp[ID_EXPORTAR_MODELO])) self.menuFile.Enable(ID_EXPORTAR_MODELO, False) self.menuFile.AppendSeparator() self.menuFile.Append(ID_CERRAR_APLICACION, self.translation(archivo[ID_CERRAR_APLICACION]), self.translation(archivoHelp[ID_CERRAR_APLICACION])) #Menu Ver self.menuVer = wx.Menu() self.refrescar = self.menuVer.Append(ID_MENU_VER_REFRESCAR, self.translation(archivo[ID_MENU_VER_REFRESCAR]), self.translation(archivoHelp[ID_MENU_VER_REFRESCAR])) wx.EVT_MENU(self, ID_MENU_VER_REFRESCAR, self.Actualizar) self.menuVer.AppendSeparator() self.menuVerStandard = self.menuVer.Append(ID_MENU_VER_STANDARD, self.translation(archivo[ID_MENU_VER_STANDARD]), self.translation(archivoHelp[ID_MENU_VER_STANDARD]), kind=wx.ITEM_CHECK) self.menuVerIdef1x = self.menuVer.Append(ID_MENU_VER_IDF1X, self.translation(archivo[ID_MENU_VER_IDF1X]), self.translation(archivoHelp[ID_MENU_VER_IDF1X]), kind=wx.ITEM_CHECK) self.menuVer.AppendSeparator() self.menuVerNav = self.menuVer.Append(ID_MENU_VER_NAV, self.translation(archivo[ID_MENU_VER_NAV]), self.translation(archivoHelp[ID_MENU_VER_NAV]), kind=wx.ITEM_CHECK) self.menuVerCard = self.menuVer.Append(ID_MENU_VER_CARD, self.translation(archivo[ID_MENU_VER_CARD]), self.translation(archivoHelp[ID_MENU_VER_CARD]), kind=wx.ITEM_CHECK) self.menuVer.AppendSeparator() self.barraStatus = self.menuVer.Append(ID_MENU_VER_BARRA_ESTADO, self.translation(archivo[ID_MENU_VER_BARRA_ESTADO]), self.translation(archivoHelp[ID_MENU_VER_BARRA_ESTADO]), kind=wx.ITEM_CHECK) if app.tool: idf1x, standard, navegador = eval(app.tool) else: idf1x, standard, navegador = (True, True, True) app.config.Write("tool", str( (True, True, True) )) app.config.Flush() self.menuVer.Check(ID_MENU_VER_STANDARD, standard) self.menuVer.Check(ID_MENU_VER_IDF1X, idf1x) self.menuVer.Check(ID_MENU_VER_BARRA_ESTADO, True) self.menuVer.Enable(ID_MENU_VER_REFRESCAR, False) self.menuVer.Enable(ID_MENU_VER_NAV, False) self.menuVer.Enable(ID_MENU_VER_CARD, False) #Menu Herramientas self.menuTool = wx.Menu() self.menuTool.Append(ID_CREAR_ENTIDAD, self.translation(archivo[ID_CREAR_ENTIDAD]), self.translation(archivoHelp[ID_CREAR_ENTIDAD])) self.menuTool.Enable(ID_CREAR_ENTIDAD, False) self.menuTool.AppendSeparator() self.menuTool.Append(ID_RELACION_IDENTIF, self.translation(archivo[ID_RELACION_IDENTIF]), self.translation(archivoHelp[ID_RELACION_IDENTIF])) self.menuTool.Enable(ID_RELACION_IDENTIF, False) self.menuTool.Append(ID_RELACION_NO_IDENTIF, self.translation(archivo[ID_RELACION_NO_IDENTIF]), self.translation(archivoHelp[ID_RELACION_IDENTIF])) self.menuTool.Enable(ID_RELACION_NO_IDENTIF, False) self.menuTool.AppendSeparator() self.menuTool.Append(ID_GENERAR_SCRIPT, self.translation(archivo[ID_GENERAR_SCRIPT]), self.translation(archivoHelp[ID_GENERAR_SCRIPT])) self.menuTool.Enable(ID_GENERAR_SCRIPT, False) self.menuTool.Append(ID_GENERAR_SCRIPT_DJANGO, archivo[ID_GENERAR_SCRIPT_DJANGO], archivoHelp[ID_GENERAR_SCRIPT_DJANGO]) self.menuTool.Enable(ID_GENERAR_SCRIPT_DJANGO, False) #self.menuTool.Append(ID_GUARDAR_SCRIPT, "Guardar Script SQL", "Guarda el Script SQL del modelo para PostgreSQL") #Menu de Ayuda self.menuHelp = wx.Menu() #self.menuLanguage = wx.Menu() #self.menuLanguage.Append(ID_MENU_HELP_us_US, self.translation(archivo[ID_MENU_HELP_us_US]), self.translation(archivoHelp[ID_MENU_HELP_us_US]), kind=wx.ITEM_RADIO) #self.menuLanguage.Append(ID_MENU_HELP_es_ES, self.translation(archivo[ID_MENU_HELP_es_ES]), self.translation(archivoHelp[ID_MENU_HELP_es_ES]), kind=wx.ITEM_RADIO).Check(True) #self.menuLanguage.Append(ID_MENU_HELP_fr_FR, self.translation("frances"), kind=wx.ITEM_RADIO) #self.menuHelp.AppendMenu(ID_MENU_HELP_LANGUAGE, self.translation(archivo[ID_MENU_HELP_LANGUAGE]), self.menuLanguage) self.menuHelp.Append(ID_MENU_HELP_LANGUAGE, self.translation(archivo[ID_MENU_HELP_LANGUAGE]), self.translation(archivoHelp[ID_MENU_HELP_LANGUAGE])) self.menuHelp.Append(ID_MENU_HELP_AYUDA, self.translation(archivo[ID_MENU_HELP_AYUDA]), self.translation(archivoHelp[ID_MENU_HELP_AYUDA])) self.menuHelp.AppendSeparator() self.menuHelp.Append(ID_MENU_HELP_LOG, self.translation(archivo[ID_MENU_HELP_LOG]), self.translation(archi
voHelp[ID_MENU_HELP_LOG])) self.menuHelp.Enable(ID_MENU_HELP_LOG, False) self.menuHelp.AppendSeparator() self.menuHelp.Append(ID_MENU_HELP_ACERCA_DE, self.translation(ar
chivo[ID_MENU_HELP_ACERCA_DE]), self.translation(archivoHelp[ID_MENU_HELP_ACERCA_DE])) #--Se adicionan los menues a la barra de menu--# self.menuBar = wx.MenuBar() self.menuBar.Append(self.menuFile, self.translation(menuBar[0])) self.menuBar.Append(self.menuVer, self.translation(menuBar[1])) self.menuBar.Append(self.menuTool, self.translation(menuBar[2])) self.menuBar.Append(self.menuHelp, self.translation(menuBar[3])) #--Se adiciona la barra de menu al frame--# self.SetMenuBar(self.menuBar) if not posx: self.Centre() #--MENU ToolBar--# self._mgr = wx.aui.AuiManager() self._mgr.SetManagedWindow(self) #self.translationperspectives = [] self.n = 0 self.x = 0 self.toolBarIdef1x = wx.ToolBar(self, -1, wx.DefaultPosition, wx.DefaultSize, wx.TB_FLAT | wx.TB_NODIVIDER) self.toolBarIdef1x.SetToolBitmapSize((8, 8)) self.toolBarIdef1x.AddLabelTool(ID_PUNTERO_MOUSE, self.translation(archivo[ID_PUNTERO_MOUSE]), wx.Bitmap('images/Puntero.png')) self.toolBarIdef1x.AddLabelTool(ID_CREAR_ENTIDAD, self.translation(archivo[ID_CREAR_ENTIDAD]), wx.Bitmap('images/Entidad.png')) self.toolBarIdef1x.EnableTool(ID_CREAR_ENTIDAD, False) self.toolBarIdef1x.AddLabelTool(ID_RELACION_IDENTIF, self.translation(archivo[ID_RELACION_IDENTIF]), wx.Bitmap('images/R-identificadora.png'))
lovetox/gajim
src/search_window.py
Python
gpl-3.0
9,270
0.003344
# -*- coding: utf-8 -*- ## src/search_window.py ## ## Copyright (C) 2007 Stephan Erb <steve-e AT h3c.de> ## Copyright (C) 2007-2014 Yann Leboulanger <asterix AT lagaule.org> ## ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## from gi.repository import GLib from gi.repository import Gtk from gi.repository import Gdk from common import gajim from common import dataforms from common import ged import gtkgui_helpers import dialogs import vcard import config import dataforms_widget class SearchWindow: def __init__(self, account, jid): """ Create new window """ # an account object self.account = account self.jid = jid # retrieving widgets from xml self.xml = gtkgui_helpers.get_gtk_builder('search_window.ui') self.window = self.xml.get_object('search_window') for name in ('label', 'progressbar', 'search_vbox', 'search_button', 'add_contact_button', 'information_button'): self.__dict__[name] = self.xml.get_object(name) self.search_button.set_sensitive(False) # displaying the window self.xml.connect_signals(self) self.window.show_all() self.request_form() self.pulse_id = GLib.timeout_add(80, self.pulse_callback) self.is_form = None # Is there a jid column in results ? if -1: no, else column number self.jid_column = -1 gajim.ged.register_event_handler('search-form-received', ged.GUI1, self._nec_search_form_received) gajim.ged.register_event_handler('search-result-received', ged.GUI1, self._nec_search_result_received) def request_form(self): gajim.connections[self.account].request_search_fields(self.jid) def pulse_callback(self): self.progressbar.pulse() return True def on_search_window_key_press_event(self, widget, event): if event.keyval == Gdk.KEY_Escape: self.window.destroy() def on_search_window_destroy(self, widget): if self.pulse_id: GLib.source_remove(self.pulse_id) del gajim.interface.instances[self.account]['search'][self.jid] gajim.ged.remove_event_handler('search-form-received', ged.GUI1, self._nec_search_form_received) gajim.ged.remove_event_handler('search-result-received', ged.GUI1, self._nec_search_result_received) def on_close_button_clicked(self, button): self.window.destroy() def on_search_button_clicked(self, button): if self.is_form: self.data_form_widget.data_form.type_ = 'submit' gajim.connections[self.account].send_search_form(self.jid, self.data_form_widget.data_form.get_purged(), True) else: infos = self.data_form_widget.get_infos() if 'instructions' in infos: del infos['instructions'] gajim.connections[self.account].send_search_form(self.jid, infos, False) self.search_vbox.remove(self.data_form_widget) self.progressbar.show() self.label.set_text(_('Waiting for results')) self.label.show() self.pulse_id = GLib.timeout_add(80, self.pulse_callback) self.search_button.hide() def on_add_contact_button_clicked(self, widget): (model, iter_) = self.result_treeview.get_selection().get_selected() if not iter_: return jid = model[iter_][self.jid_column] dialogs.AddNewContactWindow(self.account, jid) def on_information_button_clicked(self, widget): (model, iter_) = self.result_treeview.get_selection().get_selected() if not iter_: return jid = model[iter_][self.jid_column] if jid in gajim.interface.instances[self.account]['infos']: gajim.interface.instances[self.account]['infos'][jid].window.present() else: contact = gajim.contacts.create_contact(jid=jid, account=self.account) gajim.interface.instances[self.account]['infos'][jid] = \ vcard.VcardWindow(contact, self.account) def _nec_search_form_received(self, obj): if self.pulse_id: GLib.source_remove(self.pulse_id) self.progressbar.hide() self.label.hide() if obj.is_dataform: self.is_form = True self.data_form_widget = dataforms_widget.DataFormWidget() self.dataform = dataforms.ExtendForm(node=obj.data) self.data_form_widget.set_sensitive(True) try: self.data_form_widget.data_form = self.dataform except dataforms.Error: self.label.set_text(_('Error in received dataform')) self.label.show() return if self.data_form_widget.title: self.window.set_title('%s - Search - Gajim' % \ self.data_form_widget.title) else: self.is_form = False self.data_form_widget = config.FakeDataForm(obj.data) self.data_form_widget.show_all() self.search_vbox.pack_start(self.data_form_widget, True, True, 0) self.search_button.set_sensitive(True) def on_result_treeview_cursor_changed(self, treeview): if self.jid_column == -1: return (model, iter_) = treeview.get_selection().get_selected() if not iter_: return if model[iter_][self.jid_column]: self.add_contact_button.set_sensitive(True) self.information_button.set_sensitive(True) else: self.add_contact_button.set_sensitive(False) self.information_button.set_sensitive(False) def _nec_search_result_received(self, obj): if self.pulse_id: GLib.source_remove(self.pulse_id) self.progressbar.hide() self.label.hide() if not obj.is_dataform: if not obj.data: self.label.set_text(_('No result')) self.label.show() return # We suppose all items have the same fields sw = Gtk.ScrolledWindow() sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) self.result_treeview = Gtk.TreeView() self.result_treeview.connect('cursor-changed', self.on_result_treeview_cursor_changed) sw.add(self.result_treeview) # Create model fieldtypes = [str]*len(obj.data[0]) model = Gtk.ListStore(*fieldtypes) # Copy data to model for item in obj.data: model.append(item.values()) # Create columns
counter = 0 for field in obj.data[0].keys(): self.result_treeview.append_column(Gtk.TreeViewColumn(field, Gtk.CellRendererText(), text=counter)) if field == 'jid': self.jid_column = counter counter += 1 self.result_treeview.set_model
(model) sw.show_all() self.search_vbox.pack_start(sw, True, True, 0) if self.jid_column > -1: self.add_contact_button.show() self.information_button.show() return self.dataform = dataforms.ExtendForm(node=obj.data) if len(self.dataform.items) == 0: # No result self.label.set_text(_('No result')) self.label.show() return self.data_form_widget.set_sensitive(Tr
mjirik/lisa
lisa/shape_model.py
Python
bsd-3-clause
8,801
0.011083
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2015 mjirik <mjirik@mjirik-Latitude-E6520> # # Distributed under terms of the MIT license. """ """ import numpy as np from loguru import logger # logger = logging.getLogger() import argparse from scipy import ndimage from . import qmisc class ShapeModel(): """ Cílem je dát dohromady vstupní data s různou velikostí a různou polohou objektu. Výstup je pak zapotřebí opět přizpůsobit libovolné velikosti a poloze objektu v obraze. Model je tvořen polem s velikostí definovanou v konstruktoru (self.shape). U modelu je potřeba brát v potaz polohu objektu. Ta je udávána pomocí crinfo. To je skupina polí s minimální a maximální hodnotou pro každou osu. Trénování je prováděno opakovaným voláním funkce train_one(). :param model_margin: stanovuje velikost okraje v modelu. Objekt bude ve výchozím nastavení vzdálen 0 px od každého okraje. """ def __init__(self, shape=[5, 5, 5]): """TODO: to be defined1. """ self.model = np.ones(shape) self.data_number = 0 self.model_margin = [0, 0, 0] pass def get_model(self, crinfo, image_shape): """ :param image_shape: Size of output image :param crinfo: Array with min and max index of object for each axis. [[minx, maxx], [miny, maxy], [minz, maxz]] """ # Průměrování mdl = self.model / self.data_number print(mdl.shape) print(crinfo) # mdl_res = imma.image.resize_to_shape(mdl, crinfo[0][] uncr = qmisc.uncrop(mdl, crinfo, image_shape, resize=True) return uncr def train_one(self, data,voxelSize_mm): """ Trenovani shape modelu data se vezmou a oriznou (jen jatra) na oriznuta data je aplikovo binarni otevreni - rychlejsi nez morphsnakes co vznikne je uhlazena cast ktera se odecte od puvodniho obrazu cimz vzniknou spicky orezany obraz se nasledne rozparceluje podle velikosti (shape) modelu pokud pocet voxelu v danem useku prekroci danou mez, je modelu prirazena nejaka hodnota. Meze jsou nasledujici: 0%-50% => 1 50%-75% => 2 75%-100% => 3 """ crinfo = qmisc.crinfo_from_specific_data(data, margin=self.model_margin) datacr = qmisc.crop(data, crinfo=crinfo) dataShape = self.model.shape datacrres = self.trainThresholdMap(datacr, voxelSize_mm, dataShape) self.model += datacrres self.data_number += 1 # Tady bude super kód pro trénování def train(self, data_arr): for data in data_arr: self.train_one(data) def objectThreshold(self,objekt,thresholds,values): ''' Objekt - 3d T/F pole thresholds = [0,0.5,0.75] zacina nulou values = [3,2,1] vrati hodnotu z values odpovidajici thresholds podle podilu True voxelu obsazenych v 3d poli zde napriklad 60% =>2, 80% => 1. ''' bile = np.sum(objekt) velikost = objekt.shape velikostCelkem = 1.0 for x in velikost: velikostCelkem = velikostCelkem*x podil = bile/velikostCelkem #podil True voxelu #print podil #vybrani threshold final = 0 #vracena hodnota pomocny = 0 #pomocna promenna for threshold in thresholds: if(podil >= threshold ): final = values[pomocny] pomocny = pomocny+1 return final def rozdelData(self,crData,dataShape, nasobitel1=1,nasobitel2 = 2): ''' crData - vstupni data dataShape - velikost vraceneho pole volte 0<nasobitel1 < nasobitel2, vysvetleni nasleduje: rozdeli pole crData na casti vrati pole rozmeru dataShape vysledne hodnoty pole jsou urceny funkci objectThreshold(object,thresholds,values) intervaly prirazeni values [1-3] jsou nasledujici: [0-prumer*nasobitel1],[prumer*nasobitel1-prumer*nasobitel2],[prumer*nasobitel2 a vice] ''' 'vypocet prumerneho podilu bilych voxelu' bile = np.sum(crData) velikost = crData.shape velikostCelkem = 1.0 for x in velikost: velikostCelkem = velikostCelkem*x podil = bile/velikostCelkem #prumerny podil True voxelu thresholds = [0,nasobitel1*podil,nasobitel2*podil] values = [3,2,1] 'vybrani voxelu a vytvoreni objektu' velikostDat = crData.shape voxelySmer = [0,0,0] vysledek = np.zeros(dataShape) for poradi in range(3): voxelySmer[poradi] = velikostDat[poradi]/dataShape[poradi]
for x in range(dataShape[0]): for y in range(dataShape[1]): for z in range(dataShape[2]):
xStart = x * voxelySmer[0] xKonec = xStart + voxelySmer[0] yStart = y * voxelySmer[1] yKonec = yStart + voxelySmer[1] zStart = z * voxelySmer[2] zKonec = zStart + voxelySmer[2] objekt = crData[ int(xStart):int(xKonec), int(yStart):int(yKonec), int(zStart):int(zKonec) ] vysledek[x,y,z] = self.objectThreshold(objekt,thresholds,values) return vysledek def vytvorKouli3D(self,voxelSize_mm,polomer_mm): '''voxelSize:mm = [x,y,z], polomer_mm = r Vytvari kouli v 3d prostoru postupnym vytvarenim kruznic podel X (prvni) osy. Predpokladem spravnosti funkce je ze Y a Z osy maji stejne rozliseni funkce vyuziva pythagorovu vetu''' print('zahajeno vytvareni 3D objektu') x = voxelSize_mm[0] y = voxelSize_mm[1] z = voxelSize_mm[2] xVoxely = int(np.ceil(polomer_mm/x)) yVoxely = int(np.ceil(polomer_mm/y)) zVoxely = int( np.ceil(polomer_mm/z)) rozmery = [xVoxely*2+1,yVoxely*2+1,yVoxely*2+1] xStred = xVoxely konec = yVoxely*2+1 koule = np.zeros(rozmery) #pole kam bude ulozen vysledek for xR in range(xVoxely*2+1): if(xR == xStred): print('3D objekt z 50% vytvoren') c = polomer_mm #nejdelsi strana a = (xStred-xR )*x vnitrek = (c**2-a**2) b = 0.0 if(vnitrek > 0): b = np.sqrt((c**2-a**2))#pythagorova veta b je v mm rKruznice = float(b)/float(y) if(rKruznice == np.NAN): continue #print rKruznice #osetreni NAN kruznice = self.vytvoritTFKruznici(yVoxely,rKruznice) koule[xR,0:konec,0:konec] = kruznice[0:konec,0:konec] print('3D objekt uspesne vytvoren') return koule def vytvoritTFKruznici(self,polomerPole,polomerKruznice): '''vytvori 2d pole velikosti 2xpolomerPole+1 s kruznici o polomeru polomerKruznice uprostred ''' radius = polomerPole r2 = np.arange(-radius, radius+1)**2 dist2 = r2[:, None] + r2 vratit = (dist2 <= polomerKruznice**2).astype(np.int) return vratit def trainThresholdMap(self,data3d,voxelSize,dataShape): structure = self.vytvorKouli3D(voxelSize, 5) smoothed = ndimage.binary_opening(data3d, structure, 3) spicky = smoothed != data3d vysledek = self.rozdelData(spicky,dataShape) return vysledek def main(): # logger = logging.getLogger() logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() logger.addHandler(ch) # create file handler which logs even debug messages # fh = logging.FileHandler('log.txt') # fh.setLevel(logging.DEBUG) # formatter = logging.Formatter( # '%(asctime)s - %(nam
Kemaweyan/django-content-gallery
content_gallery_testapp/testapp/migrations/0002_auto_20170618_1457.py
Python
bsd-3-clause
475
0.002105
# -*- coding: utf-8 -*- # Generated by
Django 1.10.5 on 2017-06-18 14:57 from __future__ import unicode_literals from django.
db import migrations, models class Migration(migrations.Migration): dependencies = [ ('testapp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='cat', name='sex', field=models.CharField(choices=[('F', 'Female'), ('M', 'Male')], max_length=1), ), ]
DarioGT/OMS-PluginXML
org.modelsphere.sms/lib/jython-2.2.1/Lib/dospath.py
Python
gpl-3.0
10,452
0.003062
"""Common operations on DOS pathnames.""" import os import stat __all__ = ["normcase","isabs","join","splitdrive","split","splitext", "basename","dirname","commonprefix","getsize","getmtime", "getatime","islink","exists","isdir","isfile","ismount", "walk","expanduser","expandvars","normpath","abspath","realpath"] def normcase(s): """Normalize the case of a pathname. On MS-DOS it maps the pathname to lowercase, turns slashes into backslashes. Other normalizations (such as optimizing '../' away) are not allowed (this is done by normpath). Previously, this version mapped invalid consecutive characters to a single '_', but this has been removed. This functionality should possibly be added as a new function.""" return s.replace("/", "\\").lower() def isabs(s): """Return whether a path is absolute. Trivial in Posix, harder on the Mac or MS-DOS. For DOS it is absolute if it starts with a slash or backslash (current volume), or if a pathname after the volume letter and colon starts with a slash or backslash.""" s = splitdrive(s)[1] return s != '' and s[:1] in '/\\' def join(a, *p): """Join two (or more) paths.""" path = a for b in p: if isabs(b): path = b elif path == '' or path[-1:] in '/\\:': path = path + b else: path = path + "\\" + b return path def splitdrive(p): """Split a path into a drive specification (a drive letter followed by a colon) and path specification. It is always true that drivespec + pathspec == p.""" if p[1:2] == ':': return p[0:2], p[2:] return '', p def split(p): """Split a path into head (everything up to the last '/') and tail (the rest). After the trailing '/' is stripped, the invariant join(head, tail) == p holds. The resulting head won't end in '/' unless it is the root.""" d, p = splitdrive(p) # set i to index beyond p's last slash i = len(p) while i and p[i-1] not in '/\\': i = i - 1 head, tail = p[:i], p[i:] # now tail has no slashes # remove trailing slashes from head, unless it's all slashes head2 = head while head2 and head2[-1] in '/\\': head2 = head2[:-1] head = head2 or head return d + head, tail def splitext(p): """Split a path into root and extension. The extension is everything starting at the first dot in the last pathname component; the root is everything before that. It is always true that root + ext == p.""" root, ext = '', '' for c in p: if c in '/\\': root, ext = root + ext + c, '' elif c == '.' or ext: ext = ext + c else: root = root + c return root, ext def basename(p): """Return the tail (basename) part of a path.""" return split(p)[1] def dirname(p): """Return the head (dirname) part of a path.""" return split(p)[0] def commonprefix(m): """Return the longest prefix of all list elements.""" if not m: return '' prefix = m[0] for item in m: for i in range(len(prefix)): if prefix[:i+1] != item[:i+1]: prefix = prefix[:i] if i == 0: return '' break return prefix # Get size, mtime, atime of files. def getsize(filename): """Return the size of a file, reported by os.stat().""" st = os.stat(filename) return st[stat.ST_SIZE] def getmtime(filename): """Return the last modification time of a file, reported by os.stat().""" st = os.stat(filename) return st[stat.ST_MTIME] def getatime(filename): """Return the last access time of a file, reported by os.stat().""" st = os.stat(filename) return st[stat.ST_ATIME] def islink(path): """Is a path a symbolic link? This will always return false on systems where posix.lstat doesn't exist.""" return 0 def exists(path): """Does a path exist? This is false for dangling symbolic links.""" try: st = os.stat(path) except os.error: return 0 return 1 def isdir(path): """Is a path a dos directory?""" try: st = os.stat(path) except os.error: return 0 return stat.S_ISDIR(st[stat.ST_MODE]) def isfile(path): """Is a path a regular file?""" try: st = os.stat(path) except os.error: return 0 return stat.S_ISREG(st[stat.ST_MODE]) def ismount(path): """Is a path a mount point?""" # XXX This degenerates in: 'is this the root?' on DOS return isabs(splitdrive(path)[1]) def walk(top, func, arg): """Directory tree walk with callback function. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), call func(arg, dirname, fnames). dirname is the name of the directory, and fnames a list of the names of the files and subdirectories in dirname (excluding '.' and '..'). func may modify the fnames list in-place (e.g. via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in fnames; this can be used to implement a filter, or to impose a specific order of visiting. No semantics are defined for, or required of, arg, beyond that arg is always passed to func. It can be used, e.g., to pass a filename pattern, or a mutable object designed to accumulate statistics. Passing None for arg is common.""" try: names = os.listdir(top) except os.error: return func(arg, top, names) exceptions = ('.', '..') for name in names: if name not in exceptions: name = join(top, name) if isdir(name): walk(name, func, arg) def expanduser(path): """Expand paths beginning with '~' or '~user'. '~' means $HOME; '~user' means that user's home directory. If the path doesn't begin with '~', or if the user or $HOME is unknown, the path is returned unchanged (leaving error repor
ting to whatever function is called with the expanded path as argument). See also module 'glob' for expansion of *, ? and [...] in pathnames. (A function should also be defined to do full *sh-style envi
ronment variable expansion.)""" if path[:1] != '~': return path i, n = 1, len(path) while i < n and path[i] not in '/\\': i = i+1 if i == 1: if not os.environ.has_key('HOME'): return path userhome = os.environ['HOME'] else: return path return userhome + path[i:] def expandvars(path): """Expand paths containing shell variable substitutions. The following rules apply: - no expansion within single quotes - no escape character, except for '$$' which is translated into '$' - ${varname} is accepted. - varnames can be made out of letters, digits and the character '_'""" # XXX With COMMAND.COM you can use any characters in a variable name, # XXX except '^|<>='. if '$' not in path: return path import string varchars = string.ascii_letters + string.digits + "_-" res = '' index = 0 pathlen = len(path) while index < pathlen: c = path[index] if c == '\'': # no expansion within single quotes path = path[index + 1:] pathlen = len(path) try: index = path.index('\'') res = res + '\'' + path[:index + 1] except ValueError: res = res + path index = pathlen -1 elif c == '$': # variable or '$$' if path[index + 1:index + 2] == '$': res = res + c index = index + 1 elif path[index + 1:index + 2] == '{': path = path[
blrm/openshift-tools
scripts/monitoring/cron-send-filesystem-metrics.py
Python
apache-2.0
5,045
0.006938
#!/usr/bin/env python ''' Command to send dynamic filesystem information to Zagg ''' # vim: expandtab:tabstop=4:shiftwidth=4 # # Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #This is not a module, but pylint thinks it is. This is a command. #pylint: disable=invalid-name,import-error import argparse import re from openshift_tools.monitoring.metric_sender import MetricSender from openshift_tools.monitoring import pminfo def parse_args(): """ parse the args from the cli """ parser = argparse.ArgumentParser(description='Disk metric sender') parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?') parser.add_argument('--debug', action='store_true', default=None, help='Debug?') parser.add_argument('--filter-pod-pv', action='store_true', default=None, help="Filter out OpenShift Pod PV mounts") parser.add_argument('--force-send-zeros', action='store_true', default=None, help="Send 0% full for mounts, useful for clearing existing bad alerts") return parser.parse_args() def filter_out_key_name_chars(metric_dict, filesystem_filter): """ Simple filter to elimate unnecessary characters in the key name """ filtered_dict = {k.replace(filesystem_filter, ''):v for (k, v) in metric_dict.iteritems() } return filtered_dict def filter_out_container_root(metric_dict): """ Simple filter to remove the container root FS info """ container_root_regex = r'^/dev/mapper/docker-\d+:\d+-\d+-[0-9a-f]+$' filtered_dict = {k: v for (k, v) in metric_dict.iteritems() if not re.match(container_root_regex, k) } return filtered_dict def filter_out_customer_pv_filesystems(metric_dict): """ Remove customer PVs from list """ r = re.compile("^/dev/(?:xvd[a-z]{2}|nvme(?:[2-9].*|\d{2,}.*))$") # filter out xvda{2} (???) and nvme devices past 2 return { k:v for (k, v) in metric_dict.iteritems() if not r.match(k) } def zero_mount_percentages(metric_dict): """ Make all mounts report 0% used """ return { k:0 for (k, v) in metric_dict.iteritems() } def main(): """ Main function to run the check """ args = parse_args() metric_sender = MetricSender(verbose=args.verbose, debug=args.debug) filesys_full_metric = ['filesys.full'] filesys_inode_derived_metrics = {'filesys.inodes.pused' : 'filesys.usedfiles / (filesys.usedfiles + filesys.freefiles) * 100' } discovery_key_fs = 'disc.filesys' item_prototype_macro_fs = '#OSO_FILESYS' item_prototype_key_full = 'disc.filesys.full' item_prototype_key_inode = 'disc.filesys.inodes.pused' # Get the disk space filesys_full_metrics = pminfo.get_metrics(filesys_full_metric) filtered_filesys_metrics = filter_out_key_name_chars(filesys_full_metrics, 'filesys.full.') filtered_filesys_metrics = filter_out_container_root(filtered_filesys_metrics) if args.filter_pod_pv: filtered_filesys_metrics = filter_out_customer_pv_filesystems(filtered_filesys_metrics) if args.force_send_zeros: filtered_filesys_metrics = zero_mount_percentages(filtered_filesys_metrics) metric_sender.add_dynamic_metric(discovery_key_fs, item_prototype_macro_fs, filtered_filesys_metrics.keys()) for filesys_name, filesys_full in filtered_filesys_metrics.iteritems(): metric_sender.add_metric({'%s[%s]' % (item_prototype_key_full, filesys_name): filesys_full}) # Get filesytem inode metrics filesys_inode_metrics = pminfo.get_metrics(derived_metrics=filesys_inode_derived_metrics) filtered_filesys_inode_metrics = filter_out_key_name_chars(filesys_inode_metrics, 'filesys.in
odes.pused.') filtered_filesys_inode_metrics = filter_out_container_root(filtered_filesys_inode_metrics) if args.filter_pod_pv: filtered_filesys_inode_metrics = filter_out_customer_pv_filesystems(filtered_filesys_inode_metrics) i
f args.force_send_zeros: filtered_filesys_inode_metrics = zero_mount_percentages(filtered_filesys_inode_metrics) for filesys_name, filesys_inodes in filtered_filesys_inode_metrics.iteritems(): metric_sender.add_metric({'%s[%s]' % (item_prototype_key_inode, filesys_name): filesys_inodes}) metric_sender.send_metrics() if __name__ == '__main__': main()
tensorflow/io
tests/test_dicom.py
Python
apache-2.0
7,159
0.000559
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # ============================================================================== """Tests for DICOM.""" import os import numpy as np import pytest import tensorflow as tf import tensorflow_io as tfio # The DICOM sample files must be downloaded befor running the tests # # To download the DICOM samples: # $ bash dicom_samples.sh download # $ bash dicom_samples.sh extract # # To remopve the DICOM samples: # $ bash dicom_samples.sh clean_dcm # # To remopve all the downloaded files: # $ bash dicom_samples.sh clean_all def test_dicom_input(): """test_dicom_input""" _ = tfio.image.decode_dicom_data _ = tfio.image.decode_dicom_image _ = tfio.image.dicom_tags @pytest.mark.parametrize( "fname, exp_shape", [ ("OT-MONO2-8-colon.dcm", (1, 512, 512, 1)), ("CR-MONO1-10-chest.dcm", (1, 440, 440, 1)), ("CT-MONO2-16-ort.dcm", (1, 512, 512, 1)), ("MR-MONO2-16-head.dcm", (1, 256, 256, 1)), ("US-RGB-8-epicard.dcm", (1, 480, 640, 3)), ("CT-MONO2-8-abdo.dcm", (1, 512, 512, 1)), ("MR-MONO2-16-knee.dcm", (1, 256, 256, 1)), ("OT-MONO2-8-hip.dcm", (1, 512, 512, 1)), ("US-RGB-8-esopecho.dcm", (1, 120, 256, 3)), ("CT-MONO2-16-ankle.dcm", (1, 512, 512, 1)), ("MR-MONO2-12-an2.dcm", (1, 256, 256, 1)), ("MR-MONO2-8-16x-heart.dcm", (16, 256, 256, 1)), ("OT-PAL-8-face.dcm", (1, 480, 640, 3)), ("XA-MONO2-8-12x-catheter.dcm", (12, 512, 512, 1)), ("CT-MONO2-16-brain.dcm", (1, 512, 512, 1)), ("NM-MONO2-16-13x-heart.dcm", (13, 64, 64, 1)), ("US-MONO2-8-8x-execho.dcm", (8, 120, 128, 1)), ("CT-MONO2-16-chest.dcm", (1, 400, 512, 1)), ("MR-MONO2-12-shoulder.dcm", (1, 1024, 1024, 1)), ("OT-MONO2-8-a7.dcm", (1, 512, 512, 1)), ("US-PAL-8-10x-echo.dcm", (10, 430, 600, 3)), ("TOSHIBA_J2K_OpenJPEGv2Regression.dcm", (1, 512, 512, 1)), ], ) def test_decode_dicom_image(fname, exp_shape): """test_decode_dicom_image""" dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname ) file_contents = tf.io.read_file(filename=dcm_path) dcm_image = tfio.image.decode_dicom_image( contents=file_contents, dtype=tf.float
32, on_error="strict", scale="auto", color_dim=True, ) assert dcm_image.numpy().shape == exp_shape @pytest.mark.parametrize( "fname, tag, exp_value", [ ( "OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.StudyInstanceUID, b"1.3.46.670589.17.1.7.1.1.16", ), ("OT-MONO2-8-colon.dcm", tfio.image.
dicom_tags.Rows, b"512"), ("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.Columns, b"512"), ("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.SamplesperPixel, b"1"), ( "US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.StudyInstanceUID, b"999.999.3859744", ), ( "US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.SeriesInstanceUID, b"999.999.94827453", ), ("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.NumberofFrames, b"10"), ("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Rows, b"430"), ("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Columns, b"600"), ], ) def test_decode_dicom_data(fname, tag, exp_value): """test_decode_dicom_data""" dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname ) file_contents = tf.io.read_file(filename=dcm_path) dcm_data = tfio.image.decode_dicom_data(contents=file_contents, tags=tag) assert dcm_data.numpy() == exp_value def test_dicom_image_shape(): """test_decode_dicom_image""" dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", "US-PAL-8-10x-echo.dcm", ) dataset = tf.data.Dataset.from_tensor_slices([dcm_path]) dataset = dataset.map(tf.io.read_file) dataset = dataset.map(lambda e: tfio.image.decode_dicom_image(e, dtype=tf.uint16)) dataset = dataset.map(lambda e: tf.image.resize(e, (224, 224))) def test_dicom_image_concurrency(): """test_decode_dicom_image_currency""" @tf.function def preprocess(dcm_content): tags = tfio.image.decode_dicom_data( dcm_content, tags=[tfio.image.dicom_tags.PatientsName] ) tf.print(tags) image = tfio.image.decode_dicom_image(dcm_content, dtype=tf.float32) return image dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", "TOSHIBA_J2K_OpenJPEGv2Regression.dcm", ) dataset = ( tf.data.Dataset.from_tensor_slices([dcm_path]) .repeat() .map(tf.io.read_file) .map(preprocess, num_parallel_calls=8) .take(200) ) for i, item in enumerate(dataset): print(tf.shape(item), i) assert np.array_equal(tf.shape(item), [1, 512, 512, 1]) dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", "US-PAL-8-10x-echo.dcm", ) dataset = ( tf.data.Dataset.from_tensor_slices([dcm_path]) .repeat() .map(tf.io.read_file) .map(preprocess, num_parallel_calls=8) .take(200) ) for i, item in enumerate(dataset): print(tf.shape(item), i) assert np.array_equal(tf.shape(item), [10, 430, 600, 3]) def test_dicom_sequence(): """test_decode_dicom_sequence""" dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", "2.25.304589190180579357564631626197663875025.dcm", ) dcm_content = tf.io.read_file(filename=dcm_path) tags = tfio.image.decode_dicom_data( dcm_content, tags=["[0x0008,0x1115][0][0x0008,0x1140][0][0x0008,0x1155]"] ) assert np.array_equal(tags, [b"2.25.211904290918469145111906856660599393535"]) dcm_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "test_dicom", "US-PAL-8-10x-echo.dcm", ) dcm_content = tf.io.read_file(filename=dcm_path) tags = tfio.image.decode_dicom_data(dcm_content, tags=["[0x0020,0x000E]"]) assert np.array_equal(tags, [b"999.999.94827453"]) tags = tfio.image.decode_dicom_data(dcm_content, tags=["0x0020,0x000e"]) assert np.array_equal(tags, [b"999.999.94827453"]) if __name__ == "__main__": test.main()
mjsottile/PyOpinionGame
driver_alpha_tau_study.py
Python
gpl-3.0
2,715
0.003315
# test driver to verify that new version of code works import opiniongame.config as og_cfg import opiniongame.IO as og_io import opiniongame.coupling as og_coupling import opiniongame.state as og_state import opiniongame.opinions as og_opinions import opiniongame.adjacency as og_adj import opiniongame.selection as og_select import opiniongame.potentials as og_pot import opiniongame.core as og_core import opiniongame.stopping as og_stop import numpy as np # # process command line # cmdline = og_cfg.CmdLineArguments() cmdline.printOut() # # load configuration # # TODO: add option to generate defaults and save to file # TODO: interpret args to get filename if specified on cmd line config = og_cfg.staticParameters() config.readFromFile('staticParameters.cfg') config.threshold = 0.01 config.printOut() # # seed PRNG: must do this before any random numbers are # ever sampled during default generation # print("SEEDING PRNG: "+str(config.startingseed)) np.random.seed(config.startingseed) state = og_state.WorldState.fromCmdlineArguments(cmdline, config) # # run
# tau_list = np.arange(0.45, 0.9, 0.01) alpha_list = np.arange(0.05, 0.25, 0.01) numalphas = len(alpha_list) numtaus = len(tau_list) numvars = 3 resultMatrix = np.zeros((numalphas, numtaus, numvars)) for (i, alpha) in enumerate(alpha_list): config.learning_rate = alpha print("") for (j, tau) in enumerate(tau_list): print((alpha, tau)) #
# functions for use by the simulation engine # ufuncs = og_cfg.UserFunctions(og_select.FastPairSelection, og_stop.totalChangeStop, og_pot.createTent(tau)) polarized = 0 notPolarized = 0 aveIters = 0 for k in range(100): state = og_core.run_until_convergence(config, state, ufuncs) results = og_opinions.isPolarized(state.history[-1], 0.05) for result in results: if result: polarized += 1 else: notPolarized += 1 aveIters += state.iterCount state.reset() state.initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics) # maybe you want to do Consensus and nonConsensus. Finding consensus is easier! # assuming pop_size = 20, ten people at 1, nine people at 0 and and one person # at 0.5 will be polarization, but, still ... resultMatrix[i][j][0] = polarized resultMatrix[i][j][1] = notPolarized resultMatrix[i][j][2] = aveIters/100.0 rdict = {} rdict['results'] = resultMatrix og_io.saveMatrix('output.mat', rdict)
kfoss/neon
neon/backends/tests/test_cc2_tensor.py
Python
apache-2.0
3,502
0
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- from nose.plugins.attrib import attr from nose.tools import nottest import numpy as np from neon.util.testing import assert_tensor_equal @attr('cuda') class TestGPUTensor(object): def setup(self): from neon.backends.cc2 i
mport GPUTensor self.gpt = GPUTensor def test_empty_creation(self): tns = self.gpt([]) expected_shape = (0, ) while len(expected_shape) < tns._min_dims: expected_shape += (1, ) assert tns.shape == expected_shape def test_1d_creation(self): tns = self.gpt([1, 2, 3, 4
]) expected_shape = (4, ) while len(expected_shape) < tns._min_dims: expected_shape += (1, ) assert tns.shape == expected_shape def test_2d_creation(self): tns = self.gpt([[1, 2], [3, 4]]) expected_shape = (2, 2) while len(expected_shape) < tns._min_dims: expected_shape += (1, ) assert tns.shape == expected_shape def test_2d_ndarray_creation(self): tns = self.gpt(np.array([[1.5, 2.5], [3.3, 9.2], [0.111111, 5]])) assert tns.shape == (3, 2) @nottest # TODO: add >2 dimension support to cudanet def test_higher_dim_creation(self): shapes = ((1, 1, 1), (1, 2, 3, 4), (1, 2, 3, 4, 5, 6, 7)) for shape in shapes: tns = self.gpt(np.empty(shape)) assert tns.shape == shape def test_str(self): tns = self.gpt([[1, 2], [3, 4]]) assert str(tns) == "[[ 1. 2.]\n [ 3. 4.]]" def test_scalar_slicing(self): tns = self.gpt([[1, 2], [3, 4]]) res = tns[1, 0] assert res.shape == (1, 1) assert_tensor_equal(res, self.gpt([[3]])) def test_range_slicing(self): tns = self.gpt([[1, 2], [3, 4]]) res = tns[0:2, 0] assert res.shape == (2, 1) assert_tensor_equal(res, self.gpt([1, 3])) @nottest # TODO: add scalar assignment to self.gpt class def test_scalar_slice_assignment(self): tns = self.gpt([[1, 2], [3, 4]]) tns[1, 0] = 9 assert_tensor_equal(tns, self.gpt([[1, 2], [9, 4]])) def test_asnumpyarray(self): tns = self.gpt([[1, 2], [3, 4]]) res = tns.asnumpyarray() assert isinstance(res, np.ndarray) assert_tensor_equal(res, np.array([[1, 2], [3, 4]])) @nottest # TODO: fix this for self.gpt def test_transpose(self): tns = self.gpt([[1, 2], [3, 4]]) res = tns.transpose() assert_tensor_equal(res, self.gpt([[1, 3], [2, 4]])) def test_fill(self): tns = self.gpt([[1, 2], [3, 4]]) tns.fill(-9.5) assert_tensor_equal(tns, self.gpt([[-9.5, -9.5], [-9.5, -9.5]]))
thomasleese/smartbot-old
smartbot/utils/web.py
Python
mit
1,353
0
import lxml import requests def requests_session(): """ Get a suitable requests session for use in SmartBot. In particular, this sets the `User-Agent` header to the value of 'SmartBot'. """ session = requests.Session() session.headers.update({"User-Agent": "SmartBot"}) return session def _check_content_type(response, content_type="text/html"): return response.headers.get("Content-Type", "").startswith(content_type) def get_title(url): """Get the title of a website.""" try: page = requests_session().get(url, timeout=5, stream=True) if page.status_code == 200 and _check_content_type(page): try: tree = lxml.html.fromstring(page.text) except ValueError: # lxml seems to have issues with unicode tree = lxml.html.fromstring(page.content) title = tree.cssselect("title")[0].text_content() return title.strip().replace("\n", "").replace("\r", "") except requests.exceptions.Timeout: return "Timeout!" exce
pt IndexError: # no title element return "No title." def sprunge(data): """Upload the data to `sprunge.us` (a popular plain-text paste bin).""" payload = {"sprunge": data} page = requests_session().post("http://sprunge.us", data=payload)
return page.text
lootr/netzob
netzob/src/netzob/Model/Vocabulary/Domain/Parser/VariableParserPath.py
Python
gpl-3.0
5,916
0.007277
#-*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 01101111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org | #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+
#| File contributors : | #| - Georges Bossert <georges.bossert (a) supelec.fr> | #| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> | #+------------------------------------------------
---------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports | #+---------------------------------------------------------------------------+ import uuid #+---------------------------------------------------------------------------+ #| Related third party imports | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Local application imports | #+---------------------------------------------------------------------------+ from netzob.Common.Utils.Decorators import NetzobLogger from netzob.Common.Utils.Decorators import typeCheck from netzob.Model.Vocabulary.Domain.Variables.AbstractVariable import AbstractVariable from netzob.Model.Vocabulary.Domain.Parser.VariableParserResult import VariableParserResult @NetzobLogger class VariableParserPath(object): """This class denotes one parsing result of a variable against a specified content """ def __init__(self, variableParser, consumedData, remainingData, originalVariableParserPath=None): self.name = str(uuid.uuid4()) self.consumedData = consumedData self.remainingData = remainingData self.variableParser = variableParser self.memory = self.variableParser.memory.duplicate() self.originalVariableParserPath = originalVariableParserPath self.variableParserResults = [] if originalVariableParserPath is not None: self.variableParserResults.extend( originalVariableParserPath.variableParserResults) def getValueToParse(self, variable): """Returns the value that is assigned to the specified variable""" def createVariableParserResult(self, variable, parserResult, consumedData, remainedData): variableParserResult = VariableParserResult(variable, parserResult, consumedData, remainedData) if parserResult: self._logger.debug("New parser result attached to path {0}: {1}". format(self, variableParserResult)) self.remainingData = variableParserResult.remainedData if self.consumedData is None: self._logger.debug("consumed is none...") self.consumedData = variableParserResult.consumedData else: self.consumedData.extend(variableParserResult.consumedData) else: self._logger.debug("creation of an invalid parser result.") self.variableParserResults.append(variableParserResult) self._logger.debug( "After registering new VariablePathResult, Path is {0}".format( self)) def __str__(self): return "Path {0} (consumedData={1}, remainingData={2}".format( self.name, self.consumedData, self.remainingData) @property def consumedData(self): return self.__consumedData @consumedData.setter def consumedData(self, consumedData): self.__consumedData = consumedData @property def memory(self): return self.__memory @memory.setter def memory(self, memory): if memory is None: raise Exception("Memory cannot be None") self.__memory = memory
alexkolar/home-assistant
homeassistant/config.py
Python
mit
5,349
0
""" homeassistant.config ~~~~~~~~~~~~~~~~~~~~ Module to help with parsing and generating configuration files. """ import logging import os from homeassistant.exceptions import HomeAssistantError from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME, CONF_TIME_ZONE) import homeassistant.util.location as loc_util _LOGGER = logging.getLogger(__name__) YAML_CONFIG_FILE = 'configuration.yaml' CONFIG_DIR_NAME = '.homeassistant' DEFAULT_CONFIG = ( # Tuples (attribute, default, auto detect property, description) (CONF_NAME, 'Home', None, 'Name of the location where Home Assistant is ' 'running'), (CONF_LATITUDE, None, 'latitude', 'Location required to calculate the time' ' the sun rises and sets'), (CONF_LONGITUDE, None, 'longitude', None), (CONF_TEMPERATURE_UNIT, 'C', None, 'C for Celcius, F for Fahrenheit'), (CONF_TIME_ZONE, 'UTC', 'time_zone', 'Pick yours from here: http://en.wiki' 'pedia.org/wiki/List_of_tz_database_time_zones'), ) DEFAULT_COMPONENTS = { 'introduction': 'Show links to resources in log and frontend', 'frontend': 'Enables the frontend', 'discovery': 'Discover some devices automatically', 'conversation': 'Allows you to issue voice commands from the frontend', 'history': 'Enables support for tracking state changes over time.', 'logbook': 'View all events in a logbook', 'sun': 'Track the sun', } def get_default_config_dir(): """ Put together the default configuration directory based on OS. """ data_dir = os.getenv('APPDATA') if os.name == "nt" \ else os.path.expanduser('~') return os.path.join(data_dir, CONFIG_DIR_NAME) def ensure_config_exists(config_dir, detect_location=True): """ Ensures a config file exists in given config dir. Creating a default one if needed. Returns path to the config file. """ config_path = find_config_file(config_dir) if config_path is None: print("Unable to find configuration. Creating default one in", config_dir) config_path = create_default_config(config_dir, detect_location) return config_path def create_default_config(config_dir, detect_location=True): """ Creates a default configuration file in given config dir. Returns path to new config file if success, None if failed. """ config_path = os.path.join(config_dir, YAML_CONFIG_FILE) info = {attr: default for attr, default, *_ in DEFAULT_CONFIG} location_info = detect_location and loc_util.detect_location_info() if location_info: if location_info.use_fahrenheit: info[CONF_TEMPERATURE_UNIT] = 'F' for attr, default, prop, _ in DEFAULT_CONFIG: if prop is None: continue info[attr] = getattr(location_info, prop) or default # Writing files with YAML does not create the most human readable results # So we're hard coding a YAML template. try: with open(config_path, 'w') as config_file: config_file.write("homeassistant:\n") for attr, _, _, description in DEFAULT_CONFIG: if info[attr] is None: continue elif description: config_file.write(" # {}\n".format(description)) config_file.write(" {}: {}\n".format(attr, info[attr])) config_file.write("\n") for component, description in DEFAULT_COMPONENTS.items(): config_file.write("# {}\n".format(description)) config_file.write("{}:\n\n".format(component)) return config_path except IOError: print('Unable to create default configuration file', config_path) return None def find_config_file(config_dir): """ Looks in given directory for supported config files. """ config_path = os.path.join(config_dir, YAML_CONFIG_FILE) return config_path if os.path.isfile(config_path) else None def load_config_file(config_path): """ Loads given config file. """ return load_yaml_config_file(config_path) def load_yaml_config_file(config_path): """ Parse a YAML configuration file. """ import yaml def parse(fname): """ Parse a YAML file. """ try: with open(fname, encoding='utf-8') as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file) or {} except yaml.YAMLError: error = 'Error reading YAML config
uration fil
e {}'.format(fname) _LOGGER.exception(error) raise HomeAssistantError(error) def yaml_include(loader, node): """ Loads another YAML file and embeds it using the !include tag. Example: device_tracker: !include device_tracker.yaml """ fname = os.path.join(os.path.dirname(loader.name), node.value) return parse(fname) yaml.add_constructor('!include', yaml_include) conf_dict = parse(config_path) if not isinstance(conf_dict, dict): _LOGGER.error( 'The configuration file %s does not contain a dictionary', os.path.basename(config_path)) raise HomeAssistantError() return conf_dict
jrversteegh/softsailor
deps/swig-2.0.4/Examples/test-suite/python/namespace_class_runme.py
Python
gpl-3.0
379
0.036939
from namespace_class import * try: p = Private1() error = 1 exc
ept: error = 0 if (error): raise RuntimeError, "Private1 is private" t
ry: p = Private2() error = 1 except: error = 0 if (error): raise RuntimeError, "Private2 is private" EulerT3D.toFrame(1,1,1) b = BooT_i() b = BooT_H() f = FooT_i() f.quack(1) f = FooT_d() f.moo(1) f = FooT_H() f.foo(Hi)
tpolasek/cmput410-project
BenHoboCo/BenHoboCo/settings.py
Python
gpl-2.0
2,810
0.003559
""" Django settings for BenHoboCo project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) GET_SOLO_TEMPLATE_TAG_NAME = 'get_solo' # Quick-start development settings - unsuitable for producti
on # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'b&r86v3qyzx=d^8p8k4$c!#imhb+jys*$g@yxz8#vt83@r-va_' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True # NOTE: Local server has to be in the first positi
on! ALLOWED_HOSTS = [ '127.0.0.1:8000', 'cs410.cs.ualberta.ca:41011', ] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'crispy_forms', 'solo', 'core', 'south', 'images', 'posts', 'authors', 'friends', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'BenHoboCo.urls' WSGI_APPLICATION = 'BenHoboCo.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME':'helix', 'USER':'myuser', 'PASSWORD':'mypass', 'HOST':'leago.btrinh.com', 'PORT':'3306', } } CRISPY_TEMPLATE_PACK = 'bootstrap3' # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' STATIC_PATH = os.path.join( BASE_DIR, "static" ) STATICFILES_DIRS = ( STATIC_PATH, ) # Templates TEMPLATE_PATH = os.path.join( BASE_DIR, "templates") TEMPLATE_DIRS = ( TEMPLATE_PATH, ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.contrib.auth.context_processors.auth', ) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join( BASE_DIR, 'media' ) LOGIN_URL = '/login/'
muccg/rdrf
rdrf/rdrf/migrations/0023_rdrfcontext_context_form_group.py
Python
agpl-3.0
534
0
# -*- coding: utf-8
-*- from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rdrf', '0022_merge'), ] operations = [ migrations.AddField( model_name='rdrfcontext', name='context_form_group', field=models.ForeignKey(blank=True, to='rdrf.ContextFormGroup', null=True, on_delete=mod
els.SET_NULL), ), ]
rodrigosurita/GDAd
sdaps/model/sheet.py
Python
gpl-3.0
1,971
0.001522
# -*- coding: utf8 -*- # SDAPS - Scripts for data acquisition with paper based surveys # Copyright(C) 2008, Christoph Simon <post@christoph-s
imon.eu> # Copyright(C) 2008, Benjamin Berg <benjamin@sipsolutions.net> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY;
without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import buddy class Sheet(buddy.Object): def __init__(self): self.survey = None self.data = dict() self.images = list() self.survey_id = None self.questionnaire_id = None self.global_id = None self.valid = 1 self.quality = 1 def add_image(self, image): self.images.append(image) image.sheet = self def get_page_image(self, page): # Simply return the image for the requested page. # Note: We return the first one we find; this means in the error case # that a page exists twice, we return the first one. for image in self.images: if image.page_number == page and image.survey_id == self.survey.survey_id: return image return None class Image(buddy.Object): def __init__(self): self.sheet = None self.filename = str() self.tiff_page = 0 self.rotated = 0 self.raw_matrix = None self.page_number = None self.survey_id = None self.global_id = None self.questionnaire_id = None
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/networkx/algorithms/approximation/dominating_set.py
Python
agpl-3.0
2,985
0.00067
# -*- coding: utf-8 -*- """ ********************** Minimum Dominating Set ********************** A dominating set for a graph G = (V, E) is a subset D of V such that every vertex not in D is joined to at least one member of D by some edge. The domination number gamma(G) is the number of vertices in a smallest dominating set for G. Given a graph G = (V, E) find a minimum weight dominating set V'. http://en.wikipedia.org/wiki/Dominating_set This is reducible to the minimum set dom_set problem. """ # Copyright (C) 2011-2012 by # Nicholas Mancuso <nick.mancuso@gmail.com> # All rights reserved. # BSD license. import networkx as nx __all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"] __author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)""" def min_weighted_dominating_set(graph, weight=None): """Return minimum weight dominating set. Parameters ---------- graph : NetworkX graph Undirected graph weight : None or string,
optional (default = None) If None, every edge has weight/distance/weight 1. If a string, use this edge attribute as the edge weight. Any edge attribute not present defaults to 1. Returns ------- min_weight_dominating_set : set Returns a set of vertices whose weight sum is no more than 1 + log w(V) References ---------- .. [1] Vazirani, Vijay Approximation Algorithms (2001) """ if not graph: raise Va
lueError("Expected non-empty NetworkX graph!") # min cover = min dominating set dom_set = set([]) cost_func = dict((node, nd.get(weight, 1)) \ for node, nd in graph.nodes_iter(data=True)) vertices = set(graph) sets = dict((node, set([node]) | set(graph[node])) for node in graph) def _cost(subset): """ Our cost effectiveness function for sets given its weight """ cost = sum(cost_func[node] for node in subset) return cost / float(len(subset - dom_set)) while vertices: # find the most cost effective set, and the vertex that for that set dom_node, min_set = min(sets.items(), key=lambda x: (x[0], _cost(x[1]))) alpha = _cost(min_set) # reduce the cost for the rest for node in min_set - dom_set: cost_func[node] = alpha # add the node to the dominating set and reduce what we must cover dom_set.add(dom_node) del sets[dom_node] vertices = vertices - min_set return dom_set def min_edge_dominating_set(graph): """Return minimum weight dominating edge set. Parameters ---------- graph : NetworkX graph Undirected graph Returns ------- min_edge_dominating_set : set Returns a set of dominating edges whose size is no more than 2 * OPT. """ if not graph: raise ValueError("Expected non-empty NetworkX graph!") return nx.maximal_matching(graph)
altermarkive/Coding-Interviews
algorithm-design/hackerrank/class_2_find_the_torsional_angle/test_class_2_find_the_torsional_angle.py
Python
mit
2,023
0
#!/usr/bin/env python3 # https://www.hackerrank.com/challenges/class-2-find-the-torsional-angle import io import math import sys import unittest class Vector: def __init__(self, x, y, z): self.x = x self.y = y self.z = z def subtract(self, other): x = self.x - other.x y = self.y - other.y z = self.z - other.z return Vector(x, y, z) def dot_product(self, other): return self.x * other.x + self.y * other.y + self.z * other.z def cross_product(self, other): zero = Vector(0, 0, 0) x = self.y * other.z - self.z * other.y y = self.z * other.x - self.x * other.z z = self.x * other.y - self.y * other.x return zero.subtract(Vector(x, y, z)) def value(self): xx = math.pow(self.x, 2) yy = math.pow(self.y, 2) zz = math.pow(self.z, 2) return math.sqrt(xx + yy + zz) def torsional_angle(a, b, c, d): ab = a.subtract(b) bc = b.subtract(c) cd = c.subtract(d) x = ab.cross_product(bc) y = bc.cross_product(cd) cosine = x.dot_product(y) / (x.value() * y.value()) return math.degrees(math.acos(cosine)) def main(): a = Vector(*tuple(map(float, input().strip().split()))) b = Vector(*tuple(map(float, input().strip().split()))) c = Vector(*tuple(map(float, input().strip().split()))) d = Vector(*tuple(map(float, input().strip().split()))) print('%.2f' % torsional_angle(a, b, c, d)) if __name__ == '__main__': # pragma: no cover main() class TestCode(unittest.TestCase): def generalized_test(self, which): s
ys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r') sys.stdout = io.StringIO() expected = open(__file__.replace('.py', f'.{which}.out'),
'r') main() self.assertEqual(sys.stdout.getvalue(), expected.read()) for handle in [sys.stdin, sys.stdout, expected]: handle.close() def test_0(self): self.generalized_test('0')
iblis-ms/conan_gbenchmark
conanfile.py
Python
mit
2,604
0.006528
# Author: Marcin Serwach # https://github.com/iblis-ms/conan
_gbenchmark from conans import ConanFile, CMake, tools imp
ort os import sys import shutil class GbenchmarkConan(ConanFile): name = 'GBenchmark' version = '1.3.0' license = 'MIT Licence' url = 'https://github.com/iblis-ms/conan_gbenchmark' description = 'Conan.io support for Google Benchmark' settings = ['os', 'compiler', 'build_type', 'arch', 'cppstd'] options = { 'BENCHMARK_ENABLE_TESTING': [True, False], 'BENCHMARK_ENABLE_LTO': [True, False] } default_options = ('BENCHMARK_ENABLE_TESTING=False', 'BENCHMARK_ENABLE_LTO=False' ) generators = 'cmake' source_root = 'benchmark-%s' % version exports = 'CMakeLists.txt' buildFolder = '_build' def source(self): zipFileName = "v%s.zip" % self.version tools.download("https://github.com/google/benchmark/archive/%s" % zipFileName, zipFileName) tools.unzip(zipFileName) def build(self): cmake = CMake(self) for (opt, val) in self.options.items(): if val is not None: cmake.definitions[opt] = 'ON' if val == "True" else 'OFF' if self.settings.compiler == 'clang' and str(self.settings.compiler.libcxx) == 'libc++': cmake.definitions['BENCHMARK_USE_LIBCXX'] = 'YES' if str(self.settings.compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']: if str(self.settings.arch) in ['x86_64', 'sparcv9']: cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'OFF' elif str(self.settings.arch) in ['x86', 'sparc']: cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'YES' sys.stdout.write("cmake " + str(cmake.command_line) + "\n") cmake.configure(source_dir=self.build_folder, build_dir=self.buildFolder) cmake.build() def package(self): self.copy(pattern='*.h', dst='include', src='%s/include' % self.source_root, keep_path=True) self.copy(pattern='*.lib', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False) self.copy(pattern='*.a', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False) for docPatter in ['*.md', 'LICENSE', 'AUTHORS', 'CONTRIBUTORS']: self.copy(pattern=docPatter, dst='doc', src=self.source_root, keep_path=False) def package_info(self): self.cpp_info.libs = ['benchmark'] if self.settings.os == 'Windows': self.cpp_info.libs.extend(['Shlwapi'])
svram/fun-with-advanced-python
hackerNewsScraper/hackerNewsScraper/items.py
Python
gpl-3.0
317
0.003155
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topi
cs/items.html import scrapy class HackernewsscraperItem(scrapy.Item): # define the fields for your item here like: title = scrapy.Field() link = scrapy.Field()
quit9to5/Next
patient_management/doctor/forms.py
Python
gpl-2.0
308
0.00974
from django import forms from .models import doctor cla
ss ContactForm(forms.Form): message = forms.CharField() class SignUpForm(forms.ModelForm): class Meta: model = doctor fields = ['full_name', 'email'] class areaForm(forms.Form): messag = forms.CharField(req
uired=False)
nimiq/promptastic
segments/filesystem.py
Python
apache-2.0
916
0.001092
import os from segments import Segment, theme from utils import colors, glyphs class CurrentDir(Segment): bg = colors.background(theme.CURRENTDIR_BG) fg = colors.foreground(theme.CURRENTDIR_FG) def init(self, cwd): home = os.path.expanduser('~') self.text = cwd.replace(home, '~') class ReadOnly(Segment): bg = colors.background(theme.READONLY_BG) fg = colors.foreground(theme.READONLY_FG) def init(self, cwd): self.text = ' ' + glyphs.WRITE_ONLY + ' ' if os.access(cwd, os.W_OK): self.active = False class Venv(Segment)
: bg = colors.background(theme.VENV_BG) fg = colors.foreground(theme.VENV_FG) def init(self): env = os.
getenv('VIRTUAL_ENV') if env is None: self.active = False return env_name = os.path.basename(env) self.text = glyphs.VIRTUAL_ENV + ' ' + env_name
BrainIntensive/OnlineBrainIntensive
resources/HCP/pyxnat/pyxnat/core/__init__.py
Python
mit
233
0
import
os import sys from .interfaces import Interface from .search import SearchManager from .cache import CacheManager from .select import Select from .help import Inspector from .users import Users from .packages import
Packages
martbhell/wasthereannhlgamelastnight
src/lib/google/auth/crypt/_python_rsa.py
Python
mit
5,973
0.001005
# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "Lic
ense"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS O
F ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pure-Python RSA cryptography implementation. Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages to parse PEM files storing PKCS#1 or PKCS#8 keys as well as certificates. There is no support for p12 files. """ from __future__ import absolute_import from pyasn1.codec.der import decoder from pyasn1_modules import pem from pyasn1_modules.rfc2459 import Certificate from pyasn1_modules.rfc5208 import PrivateKeyInfo import rsa import six from google.auth import _helpers from google.auth.crypt import base _POW2 = (128, 64, 32, 16, 8, 4, 2, 1) _CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" _PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----") _PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----") _PKCS8_SPEC = PrivateKeyInfo() def _bit_list_to_bytes(bit_list): """Converts an iterable of 1s and 0s to bytes. Combines the list 8 at a time, treating each group of 8 bits as a single byte. Args: bit_list (Sequence): Sequence of 1s and 0s. Returns: bytes: The decoded bytes. """ num_bits = len(bit_list) byte_vals = bytearray() for start in six.moves.xrange(0, num_bits, 8): curr_bits = bit_list[start : start + 8] char_val = sum(val * digit for val, digit in six.moves.zip(_POW2, curr_bits)) byte_vals.append(char_val) return bytes(byte_vals) class RSAVerifier(base.Verifier): """Verifies RSA cryptographic signatures using public keys. Args: public_key (rsa.key.PublicKey): The public key used to verify signatures. """ def __init__(self, public_key): self._pubkey = public_key @_helpers.copy_docstring(base.Verifier) def verify(self, message, signature): message = _helpers.to_bytes(message) try: return rsa.pkcs1.verify(message, signature, self._pubkey) except (ValueError, rsa.pkcs1.VerificationError): return False @classmethod def from_string(cls, public_key): """Construct an Verifier instance from a public key or public certificate string. Args: public_key (Union[str, bytes]): The public key in PEM format or the x509 public key certificate. Returns: google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier. Raises: ValueError: If the public_key can't be parsed. """ public_key = _helpers.to_bytes(public_key) is_x509_cert = _CERTIFICATE_MARKER in public_key # If this is a certificate, extract the public key info. if is_x509_cert: der = rsa.pem.load_pem(public_key, "CERTIFICATE") asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate()) if remaining != b"": raise ValueError("Unused bytes", remaining) cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"] key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"]) pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER") else: pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM") return cls(pubkey) class RSASigner(base.Signer, base.FromServiceAccountMixin): """Signs messages with an RSA private key. Args: private_key (rsa.key.PrivateKey): The private key to sign with. key_id (str): Optional key ID used to identify this private key. This can be useful to associate the private key with its associated public key or certificate. """ def __init__(self, private_key, key_id=None): self._key = private_key self._key_id = key_id @property @_helpers.copy_docstring(base.Signer) def key_id(self): return self._key_id @_helpers.copy_docstring(base.Signer) def sign(self, message): message = _helpers.to_bytes(message) return rsa.pkcs1.sign(message, self._key, "SHA-256") @classmethod def from_string(cls, key, key_id=None): """Construct an Signer instance from a private key in PEM format. Args: key (str): Private key in PEM format. key_id (str): An optional key id used to identify the private key. Returns: google.auth.crypt.Signer: The constructed signer. Raises: ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in PEM format. """ key = _helpers.from_bytes(key) # PEM expects str in Python 3 marker_id, key_bytes = pem.readPemBlocksFromFile( six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER ) # Key is in pkcs1 format. if marker_id == 0: private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER") # Key is in pkcs8. elif marker_id == 1: key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC) if remaining != b"": raise ValueError("Unused bytes", remaining) private_key_info = key_info.getComponentByName("privateKey") private_key = rsa.key.PrivateKey.load_pkcs1( private_key_info.asOctets(), format="DER" ) else: raise ValueError("No key could be detected.") return cls(private_key, key_id=key_id)
Takasudo/studyPython
deep/common/layers.py
Python
gpl-3.0
7,810
0.00516
# coding: utf-8 import numpy as np from common.functions import * from common.util import im2col, col2im class Relu: def __init__(self): self.mask = None def forward(self, x): self.mask = (x <= 0) out = x.copy() out[self.mask] = 0 return out def backward(self, dout): dout[self.mask] = 0 dx = dout return dx class Sigmoid: def __init__(self): self.out = None def forward(self, x): out = sigmoid(x) self.out = out return out def backward(self, dout): dx = dout * (1.0 - self.out) * self.out return dx class Affine: def __init__(self, W, b): self.W =W self.b = b self.x = None self.original_x_shape = None # 重み・バイアスパラメータの微分 self.dW = None self.db = None def forward(self, x): # テンソル対応 self.original_x_shape = x.shape x = x.reshape(x.shape[0], -1) self.x = x out = np.dot(self.x, self.W) + self.b return out def backward(self, dout): dx = np.dot(dout, self.W.T) self.dW = np.dot(self.x.T, dout) self.db = np.sum(dout, axis=0) dx = dx.reshape(*self.original_x_shape) # 入力データの形状に戻す(テンソル対応) return dx class SoftmaxWithLoss: def __init__(self): self.loss = None self.y = None # softmaxの出力 self.t = None # 教師データ def forward(self, x, t): self.t = t self.y = softmax(x) self.loss = cross_entropy_error(self.y, self.t) return self.loss def backward(self, dout=1): batch_size = self.t.shape[0] if self.t.size == self.y.size: # 教師データがone-hot-vectorの場合 dx = (self.y - self.t) / batch_size else: dx = self.y.copy() dx[np.arange(batch_size), self.t] -= 1 dx = dx / batch_size return dx class Dropout: """ http://arxiv.org/abs/1207.0580 """ def __init__(self, dropout_ratio=0.5): self.dropout_ratio = dropout_ratio self.mask = None def forward(self, x, train_flg=True): if train_flg: self.mask = np.random.rand(*x.shape) > self.dropout_ratio return x * self.mask else: return x * (1.0 - self.dropout_ratio) def backward(self, dout): return dout * self.mask class BatchNormalization: """ http://arxiv.org/abs/1502.03167 """ def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None): self.gamma = gamma self.beta = beta self.momentum = momentum self.input_shape = None # Conv層の場合は4次元、全結合層の場合は2次元 # テスト時に使用する平均と分散 self.running_mean = running_mean self.running_var = running_var # backward時に使用する中間データ self.batch_size = None self.xc = None self.std = None self.dgamma = None self.dbeta = None def forward(self, x, train_flg=True): self.input_shape = x.shape if x.ndim != 2: N, C, H, W = x.shape x = x.transpose(1, 0, 2, 3).reshape(C, -1)
out = self.__forward(x, train_flg) return out.reshape(*self.input_shape) def __forward(self, x, train_flg): if self.running_mean is None: N, D = x.shape self.running_mean = np.zeros(D) self.running_var = np.zeros(D)
if train_flg: mu = x.mean(axis=0) xc = x - mu var = np.mean(xc**2, axis=0) std = np.sqrt(var + 10e-7) xn = xc / std self.batch_size = x.shape[0] self.xc = xc self.xn = xn self.std = std self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu self.running_var = self.momentum * self.running_var + (1-self.momentum) * var else: xc = x - self.running_mean xn = xc / ((np.sqrt(self.running_var + 10e-7))) out = self.gamma * xn + self.beta return out def backward(self, dout): if dout.ndim != 2: N, C, H, W = dout.shape dout = dout.transpose(1, 0, 2, 3).reshape(C, -1) dx = self.__backward(dout) dx = dx.reshape(*self.input_shape) return dx def __backward(self, dout): dbeta = dout.sum(axis=0) dgamma = np.sum(self.xn * dout, axis=0) dxn = self.gamma * dout dxc = dxn / self.std dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0) dvar = 0.5 * dstd / self.std dxc += (2.0 / self.batch_size) * self.xc * dvar dmu = np.sum(dxc, axis=0) dx = dxc - dmu / self.batch_size self.dgamma = dgamma self.dbeta = dbeta return dx class Convolution: def __init__(self, W, b, stride=1, pad=0): self.W = W self.b = b self.stride = stride self.pad = pad # 中間データ(backward時に使用) self.x = None self.col = None self.col_W = None # 重み・バイアスパラメータの勾配 self.dW = None self.db = None def forward(self, x): FN, C, FH, FW = self.W.shape N, C, H, W = x.shape out_h = 1 + int((H + 2*self.pad - FH) / self.stride) out_w = 1 + int((W + 2*self.pad - FW) / self.stride) col = im2col(x, FH, FW, self.stride, self.pad) col_W = self.W.reshape(FN, -1).T out = np.dot(col, col_W) + self.b out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2) self.x = x self.col = col self.col_W = col_W return out def backward(self, dout): FN, C, FH, FW = self.W.shape dout = dout.transpose(0,2,3,1).reshape(-1, FN) self.db = np.sum(dout, axis=0) self.dW = np.dot(self.col.T, dout) self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW) dcol = np.dot(dout, self.col_W.T) dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad) return dx class Pooling: def __init__(self, pool_h, pool_w, stride=1, pad=0): self.pool_h = pool_h self.pool_w = pool_w self.stride = stride self.pad = pad self.x = None self.arg_max = None def forward(self, x): N, C, H, W = x.shape out_h = int(1 + (H - self.pool_h) / self.stride) out_w = int(1 + (W - self.pool_w) / self.stride) col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad) col = col.reshape(-1, self.pool_h*self.pool_w) arg_max = np.argmax(col, axis=1) out = np.max(col, axis=1) out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2) self.x = x self.arg_max = arg_max return out def backward(self, dout): dout = dout.transpose(0, 2, 3, 1) pool_size = self.pool_h * self.pool_w dmax = np.zeros((dout.size, pool_size)) dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad) return dx
uwosh/uwosh.emergency.master
setup.py
Python
gpl-2.0
1,123
0.002671
from setuptools import setup, find_packages import os version = '0.5' setup(name='uwosh.emergency.master', version=version, description="", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Framework :: Plone", "Programming Language :: Python", "Topic :: Software Development :: Libraries ::
Python Modules", ], keywords='', author='Nathan Van Gheem', author_email='vangheem@gmail.com', url='http://svn.plone.org/svn/plone/plone.example', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['uwosh', 'uwosh.emergency'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', 'uwosh.simplee
mergency>=1.1', 'rsa' ], entry_points=""" # -*- Entry points: -*- [z3c.autoinclude.plugin] target = plone """, )
harsha5500/pytelegrambot
telegram/ForceReply.py
Python
gpl-3.0
353
0.005666
__author__ = 'harsha' class ForceRep
ly(object): def __init__(self, force_reply, selective): self.force_reply = force_reply self.selective = selective def get_force_reply(self): return self.force_reply def get_selective(self): return self.selective def __str__(self):
return str(self.__dict__)
TimZaman/DIGITS
digits/model/tasks/test_caffe_train.py
Python
bsd-3-clause
281
0.007117
# Copyright (c)
2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from . import caffe_train from digits import test_utils def test_caffe_imports(): test_utils.skipIfNotFramework('caffe') import numpy import google.protobu
f
EderSantana/agnez
docs/conf.py
Python
bsd-3-clause
8,361
0.005382
#!/usr/bin/env python # -*- coding: utf-8 -*- # # agnez documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import agnez # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Agnez' copyright = u'2015, Eder Santana' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = agnez.__version__ # The full version, including alpha/beta/rc tags. release = agnez.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two o
ptions for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files an
d # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'agnezdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'agnez.tex', u'Agnez Documentation', u'Eder Santana', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'agnez', u'Agnez Documentation', [u'Eder Santana'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'agnez', u'Agnez Documentation', u'Eder Santana', 'agnez', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to disp
eranroz/hewiktionary_checker
upload.py
Python
mit
8,269
0.000605
#!/usr/bin/python # -*- coding: utf-8 -*- """ Script to upload images to wikipedia. Arguments: -keep Keep the filename as is -filename Target filename without the namespace prefix -noverify Do not ask for verification of the upload description if one is given -abortonwarn: Abort upload on the specified warning type. If no warning type is specified, aborts on any warning. -ignorewarn: Ignores specified upload warnings. If no warning type is specified, ignores all warnings. Use with caution -chunked: Upload the file in chunks (more overhead, but restartable). If no value is specified the chunk size is 1 MiB. The value must be a number which can be preceded by a suffix. The units are: No suffix: Bytes 'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B) 'Ki': Kibibytes (1024 B) 'Mi': Mebibytes (1024x1024 B) The suffixes are cas
e insensitive. -always Don't ask the user anything. This will imply -keep and -noverify and require that either -abortonwarn or -ignorewarn is defined for all. It will also require a valid file name and description. It'll only overwrite files if -ignorewarn includes the 'exists' warning. -recursive When the filename is a directory it also uploads the files from the subdirectories. -summary Pick a custom edit summary for the bot. It is possible to combine -abortonwarn and -ignorewarn so that if the specific warning is given it won't apply the general one but more specific one. So if it should ignore specific warnings and abort on the rest it's possible by defining no warning for -abortonwarn and the specific warnings for -ignorewarn. The order does not matter. If both are unspecific or a warning is specified by both, it'll prefer aborting. If any other arguments are given, the first is either URL, filename or directory to upload, and the rest is a proposed description to go with the upload. If none of these are given, the user is asked for the directory, file or URL to upload. The bot will then upload the image to the wiki. The script will ask for the location of an image(s), if not given as a parameter, and for a description. """ # # (C) Rob W.W. Hooft, Andre Engels 2003-2004 # (C) Pywikibot team, 2003-2017 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals import math import os import re import pywikibot from pywikibot.bot import suggest_help from pywikibot.specialbots import UploadRobot from datetime import date from pywikibot import config def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: list of unicode """ url = u'' description = [] summary = None keepFilename = False always = False useFilename = None verifyDescription = True aborts = set() ignorewarn = set() chunk_size = 0 chunk_size_regex = r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$' chunk_size_regex = re.compile(chunk_size_regex, re.I) recursive = False # process all global bot args # returns a list of non-global args, i.e. args for upload.py for arg in pywikibot.handle_args(args): if arg: if arg == '-always': keepFilename = True always = True verifyDescription = False elif arg == '-recursive': recursive = True elif arg.startswith('-keep'): keepFilename = True elif arg.startswith('-filename:'): useFilename = arg[10:] elif arg.startswith('-summary'): summary = arg[9:] elif arg.startswith('-noverify'): verifyDescription = False elif arg.startswith('-abortonwarn'): if len(arg) > len('-abortonwarn:') and aborts is not True: aborts.add(arg[len('-abortonwarn:'):]) else: aborts = True elif arg.startswith('-ignorewarn'): if len(arg) > len('-ignorewarn:') and ignorewarn is not True: ignorewarn.add(arg[len('-ignorewarn:'):]) else: ignorewarn = True elif arg.startswith('-chunked'): match = chunk_size_regex.match(arg) if match: if match.group(1): # number was in there base = float(match.group(1)) if match.group(2): # suffix too suffix = match.group(2).lower() if suffix == "k": suffix = 1000 elif suffix == "m": suffix = 1000000 elif suffix == "ki": suffix = 1 << 10 elif suffix == "mi": suffix = 1 << 20 else: pass # huh? else: suffix = 1 chunk_size = math.trunc(base * suffix) else: chunk_size = 1 << 20 # default to 1 MiB else: pywikibot.error('Chunk size parameter is not valid.') elif url == u'': url = arg else: description.append(arg) description = u' '.join(description) # curly barckets need to double in formatted string description = """=={{{{int:filedesc}}}}== {{{{Information |description={{{{en|1=Native Israeli pronunciation of this Hebrew word}}}} |date={0} |source={{{{own}}}} |author=[[User:{1}|{1}]] |permission= |other versions= }}}} =={{{{int:license-header}}}}== {{{{self|cc-zero}}}} [[Category:Hebrew pronunciation]]""".format(date.today(),config.usernames['commons']['commons']) while not ("://" in url or os.path.exists(url)): if not url: error = 'No input filename given.' else: error = 'Invalid input filename given.' if not always: error += ' Try again.' if always: url = None break else: pywikibot.output(error) url = pywikibot.input(u'URL, file or directory where files are now:') if always and ((aborts is not True and ignorewarn is not True) or not description or url is None): additional = '' missing = [] if url is None: missing += ['filename'] additional = error + ' ' if description is None: missing += ['description'] if aborts is not True and ignorewarn is not True: additional += ('Either -ignorewarn or -abortonwarn must be ' 'defined for all codes. ') additional += 'Unable to run in -always mode' suggest_help(missing_parameters=missing, additional_text=additional) return False if os.path.isdir(url): file_list = [] for directory_info in os.walk(url): if not recursive: # Do not visit any subdirectories directory_info[1][:] = [] for dir_file in directory_info[2]: file_list.append(os.path.join(directory_info[0], dir_file)) url = file_list else: url = [url] bot = UploadRobot(url, description=description, useFilename=useFilename, keepFilename=keepFilename, verifyDescription=verifyDescription, aborts=aborts, ignoreWarning=ignorewarn, chunk_size=chunk_size, always=always, summary="bot upload", targetSite=pywi
cloudify-cosmo/cloudify-system-tests
cosmo_tester/test_suites/snapshots/inplace_restore_test.py
Python
apache-2.0
3,573
0
from time import sleep from os.path import join import pytest from cosmo_tester.framework.examples import get_example_deployment from cos
mo_tester.framework.test_hosts import Hosts, VM from cosmo_tester.test_suites.snapshots import ( create_snapshot, download_snapshot, restore_snapshot, upload_snapshot, ) @pytest.fixture(scope='function') def manager_and_vm(request, ssh_key, module_tmpdir, te
st_config, logger): hosts = Hosts(ssh_key, module_tmpdir, test_config, logger, request, 2) hosts.instances[0] = VM('master', test_config) hosts.instances[1] = VM('centos_7', test_config) manager, vm = hosts.instances passed = True try: hosts.create() yield hosts.instances except Exception: passed = False raise finally: hosts.destroy(passed=passed) @pytest.fixture(scope='function') def example(manager_and_vm, ssh_key, tmpdir, logger, test_config): manager, vm = manager_and_vm example = get_example_deployment( manager, ssh_key, logger, 'inplace_restore', test_config, vm) try: yield example finally: if example.installed: example.uninstall() def test_inplace_restore(manager_and_vm, example, module_tmpdir, logger): manager, vm = manager_and_vm snapshot_name = 'inplace_restore_snapshot_{0}'.format(manager.image_type) snapshot_path = join(str(module_tmpdir), snapshot_name) + '.zip' example.upload_and_verify_install() create_snapshot(manager, snapshot_name, logger) download_snapshot(manager, snapshot_path, snapshot_name, logger) # We need the certs to be the same for the 'new' manager otherwise an # inplace upgrade can't properly work manager.run_command('mkdir /tmp/ssl_backup') manager.run_command('cp /etc/cloudify/ssl/* /tmp/ssl_backup', use_sudo=True) manager.teardown() # The teardown doesn't properly clean up rabbitmq manager.run_command('pkill -f rabbitmq', use_sudo=True) manager.run_command('rm -rf /var/lib/rabbitmq', use_sudo=True) manager.install_config['rabbitmq'] = { 'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem', 'cert_path': '/tmp/ssl_backup/rabbitmq-cert.pem', 'key_path': '/tmp/ssl_backup/rabbitmq-key.pem', } manager.install_config['prometheus'] = { 'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem', 'cert_path': '/tmp/ssl_backup/monitoring_cert.pem', 'key_path': '/tmp/ssl_backup/monitoring_key.pem', } manager.install_config['ssl_inputs'] = { 'external_cert_path': '/tmp/ssl_backup/cloudify_external_cert.pem', 'external_key_path': '/tmp/ssl_backup/cloudify_external_key.pem', 'internal_cert_path': '/tmp/ssl_backup/cloudify_internal_cert.pem', 'internal_key_path': '/tmp/ssl_backup/cloudify_internal_key.pem', 'ca_cert_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem', 'external_ca_cert_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem', } manager.bootstrap() upload_snapshot(manager, snapshot_path, snapshot_name, logger) restore_snapshot(manager, snapshot_name, logger, admin_password=manager.mgr_password) manager.wait_for_manager() logger.info('Waiting 35 seconds for agents to reconnect. ' 'Agent reconnect retries are up to 30 seconds apart.') sleep(35) example.uninstall()
tldr-pages/tldr-python-client
setup.py
Python
mit
1,805
0
from pathlib import Path import re from setuptools import setup setup_dir = Path(__file__).resolve().parent version = re.search( r'__version__ = "(.*)"', Path(setup_dir, 'tldr.py').open().read() ) if version is None: raise SystemExit("Could not determine version to use") version = version.group(1) with open('requirements.txt') as f: required = f.read().splitlines() setup( name='tldr',
author='Felix Yan', author_email='felixonmars@gmail.com', url='https://github.com/tldr-pages/tldr-python-client', description='command line client for tldr', long_description=Path(setup_dir, 'README.md').open().read(), long_description_content_type='text/mark
down', license='MIT', py_modules=['tldr'], entry_points={ "console_scripts": [ "tldr = tldr:cli" ] }, data_files=[('share/man/man1', ['docs/man/tldr.1'])], install_requires=required, tests_require=[ 'pytest', 'pytest-runner', ], version=version, python_requires='~=3.6', classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Environment :: Console", "Intended Audience :: End Users/Desktop", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Operating System :: POSIX :: SunOS/Solaris", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Topic :: Utilities", "Topic :: System" ] )
iamlikeme/sections
tests/test_sections_Circle.py
Python
mit
810
0.020988
import unittest import sys sys
.path.insert(0, "..") from sections.sections import Circle import test_sections_generic as generic class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase): @classmethod def setUpClass(cls): cls.sectclass = Circle cls.dimensions = dict(r=3.0) cls.rp = 5.0, 4.0 cls.A = 28.274333882308138 cls._I0 = 63.61725
123519331, 63.61725123519331, 0.0 cls._I = 63.61725123519331, 63.61725123519331, 0.0 cls._cog = 0.0, 0.0 def test_check_dimensions(self): self.assertRaises(ValueError, self.section.set_dimensions, r=-1) self.assertRaises(ValueError, self.section.set_dimensions, r=0) if __name__ == "__main__": unittest.main()
Silvian/samaritan
api/tests/integration/__init__.py
Python
gpl-3.0
2,353
0.00085
"""API integration tests factories.""" import factory from django_common.auth_backends import User from factory.django import DjangoModelFactory from samaritan.models import Address, ChurchRole, MembershipType, ChurchGroup, Member class UserFactory(DjangoModelFactory):
"""Factory for users.""" username = factory.Faker('name') class Meta: model = User class AddressFactory(DjangoModelFactory): """Factory for address.""" number = factory.Faker('word') street = factory.Faker('name') locality = factory.Faker('name') city = factory.Faker('name') post_code = factory.F
aker('word') class Meta: model = Address class RoleFactory(DjangoModelFactory): """Factory for Roles.""" name = factory.Faker('name') description = factory.Faker('text') class Meta: model = ChurchRole class GroupFactory(DjangoModelFactory): """Factory for Groups.""" name = factory.Faker('name') description = factory.Faker('text') class Meta: model = ChurchGroup @factory.post_generation def members(self, create, extracted, **kwargs): if create and extracted: for member in extracted: self.members.add(member) class MembershipTypeFactory(DjangoModelFactory): """Membership Type Factory.""" name = factory.Faker('name') description = factory.Faker('text') class Meta: model = MembershipType class MemberFactory(DjangoModelFactory): """Factory for Members.""" first_name = factory.Faker('name') last_name = factory.Faker('name') date_of_birth = factory.Faker('date_this_century') telephone = factory.Faker('random_int', min=0, max=99999999) address = factory.SubFactory(AddressFactory) email = factory.Faker('email') details = factory.Faker('text') is_baptised = factory.Faker('boolean') baptismal_date = factory.Faker('date_this_century') baptismal_place = factory.Faker('name') is_member = factory.Faker('boolean') membership_type = factory.SubFactory(MembershipTypeFactory) membership_date = factory.Faker('date_this_year') is_active = factory.Faker('boolean') notes = factory.Faker('text') church_role = factory.SubFactory(RoleFactory) gdpr = factory.Faker('boolean') class Meta: model = Member
naritotakizawa/ngo
tests/project2/project/settings.py
Python
mit
941
0
"""ユーザー設定用モジュール.""" import os DEBUG = True BASE_DIR = os.path.dirna
me(os.path.dirname(os.path.abspath(__file__))) INSTALLED_APPS = [ 'app1', 'app2', ] ROOT_URLCONF = 'project.urls' WSGI_APPLICATION = [ # 'wsgiref.validate.validator', 'ngo.wsgi.RedirectApp', 'ngo.wsgi.WSGIHandler', ] """ 以下のように読み込まれていきます app = None app = WSGIHandler(None) app = RedirectAp
p(app) app = validator(app) """ # TEMPLATES = ('ngo.backends.Ngo', []) """ TEMPLATES = ( 'ngo.backends.Ngo', [os.path.join(BASE_DIR, 'template'), os.path.join(BASE_DIR, 'template2')] ) """ TEMPLATES = ('ngo.backends.Jinja2', []) STATICFILES_DIRS = None """ STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'static2') ] """ STATIC_URL = 'static' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = 'media'
ahmeier/jhbuild
jhbuild/utils/systeminstall.py
Python
gpl-2.0
14,543
0.002888
# jhbuild - a tool to ease building collections of source packages # Copyright (C) 2011 Colin Walters <walters@verbum.org> # # systeminstall.py - Use system-specific means to acquire dependencies # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import os import sys import logging import shlex import subprocess import pipes from StringIO import StringIO import cmds def get_installed_pkgconfigs(config): """Returns a dictionary mapping pkg-config names to their current versions on the system.""" pkgversions = {} try: proc = subprocess.Popen(['pkg-config', '--list-all'], stdout=subprocess.PIPE, env=config.get_original_environment(), close_fds=True) stdout = proc.communicate()[0] proc.wait() pkgs = [] for line in StringIO(stdout): pkg, rest = line.split(None, 1) pkgs.append(pkg) # We have to rather inefficiently repeatedly fork to work around # broken pkg-config installations - if any package has a missing # dependency pkg-config will fail entirely. for pkg in pkgs: args = ['pkg-config', '--modversion'] args.append(pkg) proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, env=config.get_original_environment()) stdout = proc.communicate()[0] proc.wait() pkgversions[pkg] = stdout.strip() except OSError: # pkg-config not installed pass return pkgversions def systemdependencies_met(module_name, sysdeps, config): '''Returns True of the system dependencies are met for module_name''' def get_c_include_search_paths(config): '''returns a list of C include paths (-I) from the environment and the user's config''' def extract_path_from_cflags(args): '''extract the C include paths (-I) from a list of arguments (args) Returns a list of paths''' itr = iter(args.split()) paths = [] if os.name == 'nt': # shlex.split doesn't handle sep '\' on Windows import string shell_split = string.split else: shell_split = shlex.split try: while True: arg = itr.next() if arg.strip() in ['-I', '-isystem']: # extract paths handling quotes and multiple paths paths += shell_split(itr.next())[0].split(os.pathsep) elif arg.startswith('-I'): paths += shell_split(arg[2:])[0].split(os.pathsep) except StopIteration: pass return paths try: multiarch = subprocess.check_output(['gcc', '-print-multiarch']).strip() except: multiarch = None # search /usr/include and its multiarch subdir (if any) by default paths = [ os.path.join(os.sep, 'usr', 'include')] if multiarch: paths += [ os.path.join(paths[0], multiarch) ] paths += extract_path_from_cflags(os.environ.get('CPPFLAGS', '')) # check include paths incorrectly configured in CFLAGS, CXXFLAGS paths += extract_path_from_cflags(os.environ.get('CFLAGS', '')) paths += extract_path_from_cflags(os.environ.get('CXXFLAGS', '')) # check include paths incorrectly configured in makeargs paths += extract_path_from_cflags(config.makeargs) paths += extract_path_from_cflags(config.module_autogenargs.get (module_name, '')) paths += extract_path_from_cflags(config.module_makeargs.get (module_name, '')) paths = list(set(paths)) # remove duplicates return paths c_include_search_paths = None for dep_type, value in sysdeps: if dep_type.lower() == 'path': if os.path.split(value)[0]: if not os.path.isfile(value) and not os.access(value, os.X_OK): return False else: found = False for path in os.environ.get('PATH', '').split(os.pathsep): filename = os.path.join(path, value) if (os.path.isfile(filename) and os.access(filename, os.X_OK)): found = True break if not found: return False elif dep_type.lower() == 'c_include': if c_include_search_paths is None: c_include_search_paths = get_c_include_search_paths(config) found = False for path in c_include_search_paths: filename = os.path.join(path, value) if os.path.isfile(filename): found = True break if not found: return False return True class SystemInstall(object): def __init__(self): if cmds.has_command('pkexec'): self._root_command_prefix_args = ['pkexec'] elif cmds.has_command('sudo'): self._root_command_prefix_args = ['sudo'] else: raise SystemExit, _('No suitable root privilege command found; you should install "pkexec"') def install(self, pkgconfig_ids): """Takes a list of pkg-config identifiers and uses a system-specific method to install them.""" raise NotImplementedError() @classmethod def find_best(cls): global _classes for possible_cls in _classes: if possible_cls.detect(): return possible_cls() # PackageKit dbus interface contains bitfield constants which # aren't introspectable PK_PROVIDES_ANY = 1 PK_FILTER_ENUM_NOT_INSTALLED = 1 << 3 PK_FILTER_ENUM_NEWEST = 1 << 16 PK_FILTER_ENUM_ARCH = 1 << 18 # NOTE: This class is unfinished class PKSystemInstall(SystemInstall): def __init__(self): SystemInstall.__init__(self) self._loop = None # PackageKit 0.8.1 has API breaks in the D-BUS interf
ace, for now # we try to support both it and older PackageKit self._using_pk_0_8_1 = None self._sysbus = None self._pkdbus = None def _on_pk_message(self, msgtype, msg): logging.info(_('PackageKit: %s' % (msg,))) def _on_pk_error(self, msgtype, msg): log
ging.error(_('PackageKit: %s' % (msg,))) def _get_new_transaction(self): if self._loop is None: import glib self._loop = glib.MainLoop() if self._sysbus is None: import dbus.glib import dbus self._dbus = dbus self._sysbus = dbus.SystemBus() if self._pkdbus is None: self._pkdbus = dbus.Interface(self._sysbus.get_object('org.freedesktop.PackageKit', '/org/freedesktop/PackageKit'), 'org.freedesktop.PackageKit') if self._using_pk_0_8_1 is None: try: txn_path = self._pkdbus.CreateTransaction() txn = self._sysbus.get_object('org.freedesktop.PackageKit', txn_path) self._using_pk_0_8_1 = True except dbus.exceptions.DBusException: tid = self._pkdbus.GetTid() txn = self._sysbus.get_object('org.freedesktop.PackageKit', tid)
chengduoZH/Paddle
python/paddle/fluid/tests/unittests/test_sampling_id_op.py
Python
apache-2.0
2,626
0
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fl
uid.
op import Operator class TestSamplingIdOp(OpTest): def setUp(self): self.op_type = "sampling_id" self.use_mkldnn = False self.init_kernel_type() self.X = np.random.random((100, 10)).astype('float32') self.inputs = {"X": self.X} self.Y = np.random.random(100).astype('int64') self.outputs = {'Out': self.Y} self.attrs = {'max': 1.0, 'min': 0.0, 'seed': 1} def test_check_output(self): self.check_output_customized(self.verify_output) y1 = self.out self.check_output_customized(self.verify_output) y2 = self.out # check dtype assert y1.dtype == np.int64 assert y2.dtype == np.int64 # check output is index ids of inputs inputs_ids = np.arange(self.X.shape[1]) assert np.isin(y1, inputs_ids).all() assert np.isin(y2, inputs_ids).all() self.assertTrue(np.array_equal(y1, y2)) self.assertEqual(len(y1), len(self.Y)) def verify_output(self, outs): out = np.array(outs[0]) self.out = out def init_kernel_type(self): pass class TestSamplingIdShape(unittest.TestCase): def test_shape(self): x = fluid.layers.data(name='x', shape=[3], dtype='float32') output = fluid.layers.sampling_id(x) place = fluid.CPUPlace() exe = fluid.Executor(place=place) exe.run(fluid.default_startup_program()) feed = { 'x': np.array( [[0.2, 0.3, 0.5], [0.2, 0.3, 0.4]], dtype='float32') } output_np = exe.run(feed=feed, fetch_list=[output])[0] self.assertEqual(output.shape[0], -1) self.assertEqual(len(output.shape), 1) self.assertEqual(output_np.shape[0], 2) self.assertEqual(len(output_np.shape), 1) if __name__ == "__main__": unittest.main()
ctrlaltdel/neutrinator
vendor/keystoneauth1/tests/unit/k2k_fixtures.py
Python
gpl-3.0
5,454
0
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. UNSCOPED_TOKEN_HEADER = 'UNSCOPED_TOKEN' UNSCOPED_TOKEN = { "token": { "issued_at": "2014-06-09T09:48:59.643406Z", "extras": {}, "methods": ["token"], "expires_at": "2014-06-09T10:48:59.643375Z", "user": { "OS-FEDERATION": { "identity_provider": { "id": "testshib" }, "protocol": { "id": "saml2" }, "groups": [ {"id": "1764fa5cf69a49a4918131de5ce4af9a"} ] }, "id": "testhib%20user", "name": "testhib user" } } } SAML_ENCODING = "<?xml version='1.0' encoding='UTF-8'?>" TOKEN_SAML_RESPONSE = """ <ns2:Response Destination="http://beta.example.com/Shibboleth.sso/POST/ECP" ID="8c21de08d2f2435c9acf13e72c982846" IssueInstant="2015-03-25T14:43:21Z" Version="2.0"> <saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity"> http://keystone.idp/v3/OS-FEDERATION/saml2/idp </saml:Issuer> <ns2:Status> <ns2:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/> </ns2:Status> <saml:Assertion ID="a5f02efb0bff4044b294b4583c7dfc5d" IssueInstant="2015-03-25T14:43:21Z" Version="2.0"> <saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity"> http://keystone.idp/v3/OS-FEDERATION/saml2/idp</saml:Issuer> <xmldsig:Signature> <xmldsig:SignedInfo> <xmldsig:CanonicalizationMethod Algorith
m="http://www.w3.org/2001/10/xml-exc-c14n#"/> <xmldsig:SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/> <xmldsig:Reference URI="#a5f02efb0bff4044b294b4583c7dfc5d"> <xmldsig:Transforms> <xmldsig:Transform Alg
orithm="http://www.w3.org/2000/09/xmldsig# enveloped-signature"/> <xmldsig:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/> </xmldsig:Transforms> <xmldsig:DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/> <xmldsig:DigestValue> 0KH2CxdkfzU+6eiRhTC+mbObUKI= </xmldsig:DigestValue> </xmldsig:Reference> </xmldsig:SignedInfo> <xmldsig:SignatureValue> m2jh5gDvX/1k+4uKtbb08CHp2b9UWsLw </xmldsig:SignatureValue> <xmldsig:KeyInfo> <xmldsig:X509Data> <xmldsig:X509Certificate>...</xmldsig:X509Certificate> </xmldsig:X509Data> </xmldsig:KeyInfo> </xmldsig:Signature> <saml:Subject> <saml:NameID>admin</saml:NameID> <saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer"> <saml:SubjectConfirmationData NotOnOrAfter="2015-03-25T15:43:21.172385Z" Recipient="http://beta.example.com/Shibboleth.sso/POST/ECP"/> </saml:SubjectConfirmation> </saml:Subject> <saml:AuthnStatement AuthnInstant="2015-03-25T14:43:21Z" SessionIndex="9790eb729858456f8a33b7a11f0a637e" SessionNotOnOrAfter="2015-03-25T15:43:21.172385Z"> <saml:AuthnContext> <saml:AuthnContextClassRef> urn:oasis:names:tc:SAML:2.0:ac:classes:Password </saml:AuthnContextClassRef> <saml:AuthenticatingAuthority> http://keystone.idp/v3/OS-FEDERATION/saml2/idp </saml:AuthenticatingAuthority> </saml:AuthnContext> </saml:AuthnStatement> <saml:AttributeStatement> <saml:Attribute Name="openstack_user" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"> <saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue> </saml:Attribute> <saml:Attribute Name="openstack_roles" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"> <saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue> </saml:Attribute> <saml:Attribute Name="openstack_project" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"> <saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue> </saml:Attribute> </saml:AttributeStatement> </saml:Assertion> </ns2:Response> """ TOKEN_BASED_SAML = ''.join([SAML_ENCODING, TOKEN_SAML_RESPONSE]) ECP_ENVELOPE = """ <ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns1="urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp" xmlns:ns2="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:xmldsig="http://www.w3.org/2000/09/xmldsig#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <ns0:Header> <ns1:RelayState ns0:actor="http://schemas.xmlsoap.org/soap/actor/next" ns0:mustUnderstand="1"> ss:mem:1ddfe8b0f58341a5a840d2e8717b0737 </ns1:RelayState> </ns0:Header> <ns0:Body> {0} </ns0:Body> </ns0:Envelope> """.format(TOKEN_SAML_RESPONSE) TOKEN_BASED_ECP = ''.join([SAML_ENCODING, ECP_ENVELOPE])
inukaze/maestro
Solución/CodeCombat/Python/2.Bosque_Aislado/005 - If-stravaganza.py
Python
gpl-2.0
386
0.002604
# https://codecombat.com/play/level/if-stravaganza? # # Debes Comprar & Equipar: # 1. Reloj de Pulsera Simple # 2. Programática II # # ¡Derrota a los ogros desde dentro de su propio campamento! while Tru
e: enemy = hero.findNearestEnemy() # Usa la sentencia if para comprobar si existe un enemigo # Ata
ca al enemigo si existe: if enemy: hero.attack(enemy) * 2
google/rekall
rekall-core/rekall/plugins/response/forensic_artifacts.py
Python
gpl-2.0
37,683
0.001115
# Rekall Memory Forensics # Copyright 2016 Google Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """This module implements plugins related to forensic artifacts. https://github.com/ForensicArtifacts """ from future import standard_library standard_library.install_aliases() from builtins import str from past.builtins import basestring from builtins import object from future.utils import with_metaclass __author__ = "Michael Cohen <scudette@google.com>" import csv import datetime import json import platform import os import io import sys import zipfile import yaml from artifacts import definitions from artifacts import errors from rekall import plugin from rekall import obj from rekall_lib import yaml_utils from rekall.ui import text from rekall.ui import json_renderer from rekall.plugins.response import common from rekall_lib import registry class ArtifactResult(object): """Bundle all the results from an artifact.""" def __init__(self, artifact_name=None, result_type=None, fields=None): self.artifact_name = artifact_name self.result_type = result_type self.results = [] self.fields = fields or [] def __iter__(self): return iter(self.results) def add_result(self, **data): if data: self.results.append(data) def merge(self, other): self.results.extend(other) def as_dict(self): return dict(fields=self.fields, results=self.results, artifact_name=self.artifact_name, result_type=self.result_type) class BaseArtifactResultWriter(with_metaclass(registry.MetaclassRegistry, object)): """Writes the results of artifacts.""" __abstract = True def __init__(self, session=None, copy_files=False, create_timeline=False): self.session = session self.copy_files = copy_files self.create_timeline = create_timeline def write_result(self, result): """Writes the artifact result.""" def _create_timeline(self, artifact_result): """Create a new timeline result from the given result. We use the output format suitable for the timesketch tool: https://github.com/google/timesketch/wiki/UserGuideTimelineFromFile """ artifact_fields = artifact_result.fields fields = [ dict(name="message", type="unicode"), dict(name="timestamp", type="int"), dict(name="datetime", type="unicode"), dict(name="timestamp_desc", type="unicode"), ] + artifact_fields new_result = ArtifactResult( artifact_name=artifact_result.artifact_name, result_type="timeline", fields=fields) for field in artifact_fields: # This field is a timestamp - copy the entire row into the timeline. if field["type"] == "epoch": for row in artifact_result.results: new_row = row.copy() timestamp = row.get(field["name"]) if timestamp is None: continue new_row["timestamp"] = int(timestamp) new_row["datetime"] = datetime.datetime.utcfromtimestamp( timestamp).strftime("%Y-%m-%dT%H:%M:%S+00:00") new_row["timestamp_desc"] = artifact_result.artifact_name new_row["message"] = " ".join( str(row[field["name"]]) for field in artifact_fields if field["name"] in row) new_result.add_result(**new_row) return new_result def __enter__(self): return self def __exit__(self, unused_type, unused_value, unused_traceback): return class DirectoryBasedWriter(BaseArtifactResultWriter): na
me = "Directory" def __init__(self, output=None, **kwargs): super(DirectoryBasedWriter, self).__init__(**kwargs) self.dump_dir = output # Check if the directory already exists. if not os.path.isdir(self.dump_dir): raise plugin.PluginError("%s is not a directory" % self.
dump_dir) def write_file(self, result): """Writes a FileInformation object.""" for row in result.results: filename = row["filename"] with open(filename, "rb") as in_fd: with self.session.GetRenderer().open( directory=self.dump_dir, filename=filename, mode="wb") as out_fd: while 1: data = in_fd.read(1024*1024) if not data: break out_fd.write(data) def _write_csv_file(self, out_fd, result): fieldnames = [x["name"] for x in result.fields] writer = csv.DictWriter( out_fd, dialect="excel", fieldnames=fieldnames) writer.writeheader() for row in result.results: writer.writerow(row) def write_result(self, result): """Writes the artifact result.""" if self.copy_files and result.result_type == "file_information": try: self.write_file(result) except (IOError, OSError) as e: self.session.logging.warn("Unable to copy file: %s", e) with self.session.GetRenderer().open( directory=self.dump_dir, filename="artifacts/%s.json" % result.artifact_name, mode="wb") as out_fd: out_fd.write(json.dumps(result.as_dict(), sort_keys=True)) with self.session.GetRenderer().open( directory=self.dump_dir, filename="artifacts/%s.csv" % result.artifact_name, mode="wb") as out_fd: self._write_csv_file(out_fd, result) if self.create_timeline: with self.session.GetRenderer().open( directory=self.dump_dir, filename="artifacts/%s.timeline.csv" % result.artifact_name, mode="wb") as out_fd: self._write_csv_file(out_fd, self._create_timeline(result)) class ZipBasedWriter(BaseArtifactResultWriter): name = "Zip" def __init__(self, output=None, **kwargs): super(ZipBasedWriter, self).__init__(**kwargs) self.output = output def __enter__(self): self.out_fd = self.session.GetRenderer().open( filename=self.output, mode="wb").__enter__() self.outzip = zipfile.ZipFile(self.out_fd, mode="w", compression=zipfile.ZIP_DEFLATED) return self def __exit__(self, *args): self.outzip.close() self.out_fd.__exit__(*args) def _write_csv_file(self, out_fd, result): fieldnames = [x["name"] for x in result.fields] writer = csv.DictWriter( out_fd, dialect="excel", fieldnames=fieldnames) writer.writeheader() for row in result.results: writer.writerow(row) def write_file(self, result): for row in result.results: filename = row["filename"] self.outzip.write(filename) def write_result(self, result): """Writes the artifact result.""" if self.copy_files and result.result_type == "file_information": try: self.write
summanlp/gensim
docker/check_fast_version.py
Python
lgpl-2.1
283
0.003534
import sys try: from gensim.models.word2vec_inner import
FAST_VERSION print('FAST_VERSION ok ! Retrieved with value ', FAST_VERSION) sys.exit() except ImportError: print('Failed... fall back to p
lain numpy (20-80x slower training than the above)') sys.exit(-1)
Vijaysai005/KProject
vijay/DBSCAN/clustering/db/generate_data.py
Python
gpl-3.0
5,801
0.038786
# usr/bin/env python # -*- coding: utf-8 -*- """ Created on Thu Jul 20 13:15:05 2017 @author: Vijayasai S """ # Use python3 from haversine import distance from datetime import datetime from dateutil import tz import my_dbscan as mydb import alert_update as au from pymongo import MongoClient import pandas as pd import time from_zone = tz.tzutc() to_zone = tz.tzlocal() def _connect_mongo(host, port, username, password, db): """ A util for making a connection to mongo """ if username and password: mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db) conn = MongoClient(mongo_uri) else: conn = MongoClient(host, port) return conn[db] def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True): """ Read from Mongo and Store into DataFrame """ # Connect to MongoDB db = _connect_mongo(host=host, port=port, username=username, password=password, db=db) # Make a query to the specific DB and Collection cursor = db[collection].find(query) # Expand the cursor and construct the DataFrame df = pd.DataFrame(list(cursor)) # Delete the _id if no_id: del df['_id'] return df def Generate_data(get_col, set_col1, set_col2, time_delay, year, month, startday, endday, starthr, endhr, startmin, endmin): id_dist = [] ; item_id_dist = [] main_curr_rank = {} ; tot_rank_curr = {} count = 0 client = MongoClient('localhost', 27017) db = client.maximus_db for day in range(startday,endday+1): for hr in range(starthr,endhr+1): for mins in range(startmin,endmin+1,time_delay): try: #set_col1.drop() #set_col2.drop() mins_next = mins + time_delay hr_next = hr if time_delay + mins > 59: mins_next = (time_delay + mins) - 60 hr_next += 1 if hr_next > 23: hr_next = 0 day += 1 #print (hr,mins) items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)]) utc = datetime(year,month,day,hr,mins) utc = utc.replace(tzinfo=from_zone) # Convert time zone ist = utc.astimezone(to_zone) data = [] ; item_id = [] for item in items: if item["unit_id"] not in item_id: item_id.append(item["unit_id"]) data.append(item) if item["unit_id"] not in item_id_dist: item_id_dist.append(item["unit_id"]) id_dist.append(item) u_id = [ids["unit_id"] for ids in id_dist] if count > 0: rank_curr = {} ; lat_curr = {} ; long_curr = {} for item in item_id: if item in u_id: for i in range(len(id_dist)): if item == id_dist[i]["unit_id"]: for j in range(len(data)): if item == data[j]["unit_id"]: dist = distance(id_dist[i]["latitude"],data[j]["latitude"],id_dist[i]["longitude"],data[j]["longitude"]) id_dist[i]["latitude"] = data[j]["latitude"] id_dist[i]["longitude"] = data[j]["longitude"] rank_curr[item] = dist lat_curr[item] = id_dist[i]["latitude"] long_curr[item] = id_dist[i]["longitude"] try: tot_rank_curr[item] = dist + main_curr_rank[item] main_curr_rank[item] = dist + main_curr_rank[item] except Exception: tot_rank_curr[item] = dist main_curr_rank[item] = dist #print (item, dist) rank_current_sorted = sorted(rank_curr.values(), reverse=True) tot_rank_current_sorted = sorted(tot_rank_curr.values(), reverse=True) #rank,r_id,dist_rank = [],[],[] for item in item_id: if rank_curr
[item] in rank_current_sorted: set_col1.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":
rank_curr[item], "unit_id":item, "rank":rank_current_sorted.index(rank_curr[item])+1,"timestamp":ist}]) set_col2.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":tot_rank_curr[item], "unit_id":item, "rank":tot_rank_current_sorted.index(tot_rank_curr[item])+1,"timestamp":ist}]) ########################################################################## # CREATING CLUSTERS AND SAVING IT IN DATABASE # ########################################################################## table_to_read_1 = "tapola_rank_15_total" eps = 5.0 # in KM ride_id = None coll_1 = db.tapola_rank_15_manual_clustering df_1 = read_mongo("maximus_db", table_to_read_1, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id}) mydb.manual_DBSCAN(df_1, coll_1, eps) print (ist) print ("Creating cluster using manual dbscan algorithm") ########################################################################## # CREATING ALERTS AND SAVING IT IN DATABASE # ########################################################################## table_to_read_2 = "tapola_rank_15_manual_clustering" df_2 = read_mongo("maximus_db", table_to_read_2, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id}) coll_2 = db.tapola_rank_15_manual_clus_alert au.Generate_alert(df_2, coll_2) print ("Generating alert and saving in the database\n") time.sleep(1) count += 1 except KeyError: pass return
OCA/operating-unit
stock_operating_unit/model/stock_warehouse.py
Python
agpl-3.0
2,156
0.000464
# © 2019 ForgeFlow S.L. # © 2019 Serpent Consulting Services Pvt. Ltd. # License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html). from odoo import _, api, fields, models from odoo.exceptions import UserError class StockWarehouse(models.Model): _inherit = "stock.warehouse" def _default_operating_unit(self): if self.company_id: company = self.company_id else: company = self.env.company for ou in self.env.user.operating_unit_ids: if company == self.company_id: self.operating_unit_id = ou operating_unit_id = fields.Many2one( comodel_name="operating.unit", string="Operating Unit", default=_default_operating_unit, ) @api.constrains("operating_unit_id", "company_id") def _check_company_operating_unit(self): for rec in self: if ( rec.operating_unit_id and rec.company_id and rec.operating_unit_id and rec.company_id != rec.operating_unit_id.company_id ): raise UserError( _( "Configuration error. The Company in the Stock Warehouse" " and in the Operating Unit must be the same." ) ) class StockWarehouseOrderPoint(models.Model): _inherit = "stock.warehouse.orderpoint" @api.constrains( "warehouse_id", "location_id", "location_id.operating_unit_id", "warehouse_id.operating_unit_id", ) def _check_location(self): for rec in self:
if ( rec.warehouse_id.operating_unit_id and rec.warehouse_id and rec.l
ocation_id and rec.warehouse_id.operating_unit_id != rec.location_id.operating_unit_id ): raise UserError( _( "Configuration Error. The Operating Unit of the " "Warehouse and the Location must be the same. " ) )
DavidLP/home-assistant
homeassistant/components/rainmachine/__init__.py
Python
apache-2.0
14,628
0
"""Support for RainMachine devices.""" import asyncio import logging from datetime import timedelta import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD, CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL, CONF_MONITORED_CONDITIONS, CONF_SWITCHES) from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.service import verify_domain_control from .config_flow import configured_instances from .const import ( DATA_CLIENT, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DOMAIN, PROVISION_SETTINGS, RESTRICTIONS_CURRENT, RESTRICTIONS_UNIVERSAL) _LOGGER = logging.getLogger(__name__) DATA_LISTENER = 'listener' PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN) SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN) ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN) CONF_CONTROLLERS = 'controllers' CONF_PROGRAM_ID = 'program_id' CONF_SECONDS = 'seconds' CONF_ZONE_ID = 'zone_id' CONF_ZONE_RUN_TIME = 'zone_run_time' DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC' DEFAULT_ICON = 'mdi:water' DEFAULT_ZONE_RUN = 60 * 10 TYPE_FLOW_SENSOR = 'flow_sensor' TYPE_FLOW_SENSOR_CLICK_M3 = 'flow_sensor_clicks_cubic_meter' TYPE_FLOW_SENSOR_CONSUMED_LITERS = 'flow_sensor_consumed_liters' TYPE_FLOW_SENSOR_START_INDEX = 'flow_sensor_start_index' TYPE_FLOW_SENSOR_WATERING_CLICKS = 'flow_sensor_watering_clicks' TYPE_FREEZE = 'freeze' TYPE_FREEZE_PROTECTION = 'freeze_protection' TYPE_FREEZE_TEMP = 'freeze_protect_temp' TYPE_HOT_DAYS = 'extra_water_on_hot_days' TYPE_HOURLY = 'hourly' TYPE_MONTH = 'month' TYPE_RAINDELAY = 'raindelay' TYPE_RAINSENSOR = 'rainsensor' TYPE_WEEKDAY = 'weekday' BINARY_SENSORS = { TYPE_FLOW_SENSOR: ('Flow Sensor', 'mdi:water-pump'), TYPE_FREEZE: ('Freeze Restrictions', 'mdi:canc
el'), TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'), TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'), TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'), TYPE_MONT
H: ('Month Restrictions', 'mdi:cancel'), TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'), TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'), TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'), } SENSORS = { TYPE_FLOW_SENSOR_CLICK_M3: ( 'Flow Sensor Clicks', 'mdi:water-pump', 'clicks/m^3'), TYPE_FLOW_SENSOR_CONSUMED_LITERS: ( 'Flow Sensor Consumed Liters', 'mdi:water-pump', 'liter'), TYPE_FLOW_SENSOR_START_INDEX: ( 'Flow Sensor Start Index', 'mdi:water-pump', None), TYPE_FLOW_SENSOR_WATERING_CLICKS: ( 'Flow Sensor Clicks', 'mdi:water-pump', 'clicks'), TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'), } BINARY_SENSOR_SCHEMA = vol.Schema({ vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)): vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)]) }) SENSOR_SCHEMA = vol.Schema({ vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All(cv.ensure_list, [vol.In(SENSORS)]) }) SERVICE_ALTER_PROGRAM = vol.Schema({ vol.Required(CONF_PROGRAM_ID): cv.positive_int, }) SERVICE_ALTER_ZONE = vol.Schema({ vol.Required(CONF_ZONE_ID): cv.positive_int, }) SERVICE_PAUSE_WATERING = vol.Schema({ vol.Required(CONF_SECONDS): cv.positive_int, }) SERVICE_START_PROGRAM_SCHEMA = vol.Schema({ vol.Required(CONF_PROGRAM_ID): cv.positive_int, }) SERVICE_START_ZONE_SCHEMA = vol.Schema({ vol.Required(CONF_ZONE_ID): cv.positive_int, vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN): cv.positive_int, }) SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({ vol.Required(CONF_PROGRAM_ID): cv.positive_int, }) SERVICE_STOP_ZONE_SCHEMA = vol.Schema({ vol.Required(CONF_ZONE_ID): cv.positive_int, }) SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int}) CONTROLLER_SCHEMA = vol.Schema({ vol.Required(CONF_IP_ADDRESS): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): cv.time_period, vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA, vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA, vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA, }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_CONTROLLERS): vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]), }), }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up the RainMachine component.""" hass.data[DOMAIN] = {} hass.data[DOMAIN][DATA_CLIENT] = {} hass.data[DOMAIN][DATA_LISTENER] = {} if DOMAIN not in config: return True conf = config[DOMAIN] for controller in conf[CONF_CONTROLLERS]: if controller[CONF_IP_ADDRESS] in configured_instances(hass): continue hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={'source': SOURCE_IMPORT}, data=controller)) return True async def async_setup_entry(hass, config_entry): """Set up RainMachine as config entry.""" from regenmaschine import login from regenmaschine.errors import RainMachineError _verify_domain_control = verify_domain_control(hass, DOMAIN) websession = aiohttp_client.async_get_clientsession(hass) try: client = await login( config_entry.data[CONF_IP_ADDRESS], config_entry.data[CONF_PASSWORD], websession, port=config_entry.data[CONF_PORT], ssl=config_entry.data[CONF_SSL]) rainmachine = RainMachine( client, config_entry.data.get(CONF_BINARY_SENSORS, {}).get( CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)), config_entry.data.get(CONF_SENSORS, {}).get( CONF_MONITORED_CONDITIONS, list(SENSORS)), config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN)) await rainmachine.async_update() except RainMachineError as err: _LOGGER.error('An error occurred: %s', err) raise ConfigEntryNotReady hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine for component in ('binary_sensor', 'sensor', 'switch'): hass.async_create_task( hass.config_entries.async_forward_entry_setup( config_entry, component)) async def refresh(event_time): """Refresh RainMachine sensor data.""" _LOGGER.debug('Updating RainMachine sensor data') await rainmachine.async_update() async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC) hass.data[DOMAIN][DATA_LISTENER][ config_entry.entry_id] = async_track_time_interval( hass, refresh, timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL])) @_verify_domain_control async def disable_program(call): """Disable a program.""" await rainmachine.client.programs.disable( call.data[CONF_PROGRAM_ID]) async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC) @_verify_domain_control async def disable_zone(call): """Disable a zone.""" await rainmachine.client.zones.disable(call.data[CONF_ZONE_ID]) async_dispatcher_send(hass, ZONE_UPDATE_TOPIC) @_verify_domain_control async def enable_program(call): """Enable a program.""" await rainmachine.client.programs.enable(call.data[CONF_PROGRAM_ID]) async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC) @_verify_domain_control async def enable_zone
gaoce/TimeVis
tests/test_api.py
Python
mit
1,185
0.001688
import unittest import os import os.path import json # The folder holding the test data data_path = os.path.dirname(__file__) # Set the temporal config for testing os.environ['TIMEVIS_CONFIG'] = os.path.join(data_path, 'config.py') import timevis class TestExperiment
(unittest.TestCase): def setUp(self): self.app = timevis.app.test_client() self.url = '/api/v2/experiment' def test_post(self
): name = os.path.join(data_path, 'post_exp.json') with open(name) as file: obj = json.load(file) resp = self.app.post(self.url, data=json.dumps(obj), content_type='application/json') self.assertIsNotNone(resp.data) def test_get(self): resp = self.app.get(self.url) self.assertIsNotNone(resp.data) def test_put(self): name = os.path.join(data_path, 'put_exp.json') with open(name) as file: obj = json.load(file) resp = self.app.put(self.url, data=json.dumps(obj), content_type='application/json') self.assertIsNotNone(resp.data) if __name__ == '__main__': unittest.main()
rjspiers/qgis-batch-save-layers
resources.py
Python
gpl-2.0
4,288
0.001166
# -*- coding: utf-8 -*- # Resource object code # # Created: Sun 21. Feb 22:22:07 2016 # by: The Resource Compiler for PyQt (Qt v4.8.5) # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore qt_resource_data = "\ \x00\x00\x02\xf9\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x24\x00\x00\x00\x24\x08\x06\x00\x00\x00\xe1\x00\x98\x98\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\ \xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\ \xe0\x02\x15\x16\x0a\x03\xbc\xda\x23\x1d\x00\x00\x02\x86\x49\x44\ \x41\x54\x58\xc3\xed\x96\x4d\x48\x54\x51\x18\x86\x9f\x7b\x38\x77\ \xfe\xcc\x66\xb4\xd4\x85\x84\xa4\x44\x85\x8b\x1a\xc8\xc8\x08\x04\ \xa3\xa0\xa0\x36\x31\x42\x8b\xa0\x76\x45\x8b\xa4\x55\xfb\x68\xe3\ \x6e\x5a\xb5\x09\x72\x35\x38\xe0\x46\x83\xa0\x8d\x18\xd1\xb4\x30\ \x66\xa9\x26\x39\x3a\x15\x24\x42\x51\xa6\xe1\xfd\x99\xd3\xc2\x9a\ \x4a\x67\xbc\xf7\x72\xef\x8c\x2e\x7a\x97\x87\x7b\xbe\xfb\x9e\xf7\ \x7c\xdf\xc3\xd1\x72\xf9\x19\xf5\x70\x78\x0c\x21\x43\x68\xf6\x0f\ \x34\x55\xc2\x8b\x14\x02\x25\xa3\x94\x2c\x83\x3b\xd7\x2f\x73\xea\ \xf8\x11\x0d\x1f\x92\xe9\xe1\x31\x9a\x3a\x4f\x20\xcc\x15\xfa\x3b\ \xe1\x50\xd7\x41\x4f\x05\xe6\xe6\x17\x98\x78\x07\xb6\xbe\x87\xf4\ \xf0\x38\x7e\x25\x4b\x80\xd4\x43\xa8\x75\x8b\x8e\x03\x1d\xb4\xb7\ \xb7\x7b\x2a\x60\x18\x06\xcc\x2d\x22\xf5\x10\xb6\x52\xfe\x0d\x6d\ \x5e\xc8\xe7\xf3\x64\xb3\x59\xc7\x8d\x42\x08\x52\xa9\x14\xf1\x78\ \x9c\x20\xb5\xc5\x50\x32\x99\x24\x99\x4c\xba\x2e\x50\x28\x14\x6a\ \x6b\xe8\x7f\x42\x5e\x13\xba\x71\xeb\x2e\xee\xb0\x30\x43\x18\xb8\ \x36\xf8\x40\xf9\xc1\x82\x63\x42\xb7\xef\x3f\xae\x2b\x16\xca\x86\ \x94\x90\xcc\x2f\x14\xb1\x6d\xfb\x9f\x0f\xea\x8d\x85\xb2\x21\x11\ \x6d\xe6\xc5\xfb\xaf\xa8\xc5\x4f\xdb\x6e\xa8\x75\xd3\xff\xb9\x32\ \x4d\x43\x8b\x24\x70\xe2\x7e\xad\x9b\x5e\x7a\x9d\x82\xfa\x25\x04\ \xa8\xd5\x65\x9a\x8d\x02\x4d\x4d\x89\xf2\xda\xd2\x4e\x26\xa4\x6c\ \x83\xd4\xa5\x73\x34\xee\x8d\xb3\x6e\x98\x00\xe4\x66\x47\x77\x2e\ \xa1\x8d\x56\xd2\x78\x3a\x31\xc5\xe8\xf3\x1c\x00\x2d\xad\x2d\xdb\ \x26\xf4\xb6\xb8\x5c\x95\x53\x4f\xc6\x5f\x7b\xe6\x94\xeb\x1e\xaa\ \x86\x85\x74\x66\x32\x50\x4e\xb9\x36\x54\x0d\x0b\x41\x73\xaa\xa2\ \xa1\x86\x58\x84\xd6\x7d\xf1\x5f\x91\x7a\xc3\x82\xdf\x1e\x93\xaa\ \x54\x02\xa5\x40\xdb\xf8\x95\x69\x5a\xf4\xf5\x74\xd3\xd7\xd3\x0d\ \xc0\xbd\xf4\x88\xa7\x13\xfb\x9d\x42\x79\xb6\xf7\x18\x93\x53\x6f\ \x08\xc5\x1a\x11\xe6\x2a\x23\xa3\x33\x48\x5d\xaf\xd8\xf7\x6e\xb0\ \xe0\x3b\xa1\x9b\x57\x2f\x6c\x7b\x
0b\x03\x83\x43\xca\x0b\x16\x7c\ \x27\xe4\x95\xd4\x4e\x58\xf0\x9d\x10\x01\xab\xee\x09\x79\xe5\x94\ \x93\x16\x8b\x1f\x41\xe8\xfe\x0c\x55\xc2\x82\xdb\xe7\xcb\x96\x16\ \x10\x21\xb4\x58\xc2\xbd\x21\xd7\x58\x70\xc9\x29\xdf\xcf\x0f\x2f\ \x58\x08\x42\x7e\x0f\xc4\xc
0\xe0\x90\x6a\x3b\x7c\xba\x2a\xa7\x00\ \x56\xbe\xaf\xa1\x2a\x3c\x5f\x2d\xd3\xe0\x73\xa4\x0b\x11\xdb\xbf\ \xc1\xb4\xd9\x57\xc1\x1e\xaf\x12\xa7\xc2\x21\x9d\x68\x24\x8c\x94\ \x5b\x7f\x35\x3d\x3d\x4d\xe6\xe5\x87\xda\x8e\xfd\x66\x4e\x5d\x39\ \xdf\xcb\xc0\xc5\x33\xae\xf7\x0b\x76\x99\x76\x9d\x21\x59\x8b\xa2\ \x7f\x73\x2a\x16\x0d\xd7\xd7\x90\x13\xa7\x7e\xf7\x95\x73\x21\x85\ \xa6\x02\x18\xfb\x47\x99\x67\x6a\x72\x6a\xb6\xcc\xa9\x36\xf9\x65\ \x13\xa7\xaa\xcb\xb2\x2c\x96\xcc\x04\x25\xbd\x01\x63\xed\x1b\xfd\ \x27\x8f\xf2\x13\x0c\xc0\x8b\x69\x94\xd1\x9d\xcc\x00\x00\x00\x00\ \x49\x45\x4e\x44\xae\x42\x60\x82\ " qt_resource_name = "\ \x00\x07\ \x07\x3b\xe0\xb3\ \x00\x70\ \x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\ \x00\x0f\ \x0f\x12\xef\x33\ \x00\x42\ \x00\x61\x00\x74\x00\x63\x00\x68\x00\x53\x00\x61\x00\x76\x00\x65\x00\x4c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\ \x00\x08\ \x0a\x61\x5a\xa7\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\ " qt_resource_struct = "\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ \x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
argentumproject/electrum-arg
plugins/digitalbitbox/digitalbitbox.py
Python
mit
20,399
0.008285
# ---------------------------------------------------------------------------------- # Electrum plugin for the Digital Bitbox hardware wallet by Shift Devices AG # digitalbitbox.com # try: import electrum_arg as electrum from electrum_arg.bitcoin import TYPE_ADDRESS, var_int, msg_magic, Hash, verify_message, public_key_to_p2pkh, EncodeAES, DecodeAES from electrum_arg.i18n import _ from electrum_arg.keystore import Hardware_KeyStore from ..hw_wallet import HW_PluginBase from electrum_arg.util import print_error import time import hid import json import math import hashlib from ecdsa.ecdsa import generator_secp256k1 from ecdsa.util import sigencode_der DIGIBOX = True except ImportError as e: DIGIBOX = False # ---------------------------------------------------------------------------------- # USB HID interface # class DigitalBitbox_Client(): def __init__(self, hidDevice): self.dbb_hid = hidDevice self.opened = True self.password = None self.isInitialized = False self.setupRunning = False self.hidBufSize = 4096 def close(self): if self.opened: try: self.dbb_hid.close() except: pass self.opened = False def timeout(self, cutoff): pass def label(self): return " " def is_pairable(self): return True def is_initialized(self): return self.dbb_has_password() def is_paired(self): return self.password is not None def get_xpub(self, bip32_path): if self.check_device_dialog(): msg = '{"xpub":"' + bip32_path + '"}' reply = self.hid_send_encrypt(msg) return reply['xpub'] return None def dbb_has_password(self): reply = self.hid_send_plain('{"ping":""}') if 'ping' not in reply: raise Exception('Device communication error. Please unplug and replug your Digital Bitbox.') if reply['ping'] == 'password': return True return False def stretch_key(self, key): import pbkdf2, hmac return pbkdf2.PBKDF2(key, 'Digital Bitbox', iterations = 20480, macmodule = hmac, digestmodule = hashlib.sha512).read(64).encode('hex') def backup_password_dialog(self): msg = _("Enter the password used when the backup was created:") while True: password = self.handler.get_passphrase(msg, False) if password is None: return None if len(password) < 4: msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:") elif len(password) > 64: msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:") else: return str(password) def password_dialog(self, msg): while True: password = self.handler.get_passphrase(msg, False) if password is None: return False if len(password) < 4: msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:") elif len(password) > 64: msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:") else: self.password = str(password) return True def check_device_dialog(self): # Set password if fresh device if self.password is None and not self.dbb_has_password(): if not self.setupRunning: return False # A fresh device cannot connect to an existing wallet msg = _("An uninitialized Digital Bitbox is detected. " \ "Enter a new password below.\r\n\r\n REMEMBER THE PASSWORD!\r\n\r\n" \ "You cannot access your coins or a backup without the password.\r\n" \ "A backup is saved automatically when generating a new wallet.") if self.password_dialog(msg): reply = self.hid_send_plain('{"password":"' + self.password + '"}') else: return False # Get password from user if not yet set msg = _("Enter your Digital Bitbox password:") while self.password is None: if not self.password_dialog(msg): return False reply = self.hid_send_encrypt('{"led":"blink"}') if 'error' in reply: self.password = None if reply['error']['code'] == 109: msg = _("Incorrect password e
ntered.\r\n\r\n" \ + reply['error']['message'] + "\r\n\r\n" \
"Enter your Digital Bitbox password:") else: # Should never occur msg = _("Unexpected error occurred.\r\n\r\n" \ + reply['error']['message'] + "\r\n\r\n" \ "Enter your Digital Bitbox password:") # Initialize device if not yet initialized if not self.setupRunning: self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet elif not self.isInitialized: reply = self.hid_send_encrypt('{"device":"info"}') if reply['device']['id'] <> "": self.recover_or_erase_dialog() # Already seeded else: self.seed_device_dialog() # Seed if not initialized return self.isInitialized def recover_or_erase_dialog(self): msg = _("The Digital Bitbox is already seeded. Choose an option:\n") choices = [ (_("Create a wallet using the current seed")), (_("Load a wallet from the micro SD card (the current seed is overwritten)")), (_("Erase the Digital Bitbox")) ] try: reply = self.handler.win.query_choice(msg, choices) except Exception: return # Back button pushed if reply == 2: self.dbb_erase() elif reply == 1: if not self.dbb_load_backup(): return else: pass # Use existing seed self.isInitialized = True def seed_device_dialog(self): msg = _("Choose how to initialize your Digital Bitbox:\n") choices = [ (_("Generate a new random wallet")), (_("Load a wallet from the micro SD card")) ] try: reply = self.handler.win.query_choice(msg, choices) except Exception: return # Back button pushed if reply == 0: self.dbb_generate_wallet() else: if not self.dbb_load_backup(show_msg=False): return self.isInitialized = True def dbb_generate_wallet(self): key = self.stretch_key(self.password) filename = "Electrum-" + time.strftime("%Y-%m-%d-%H-%M-%S") + ".pdf" msg = '{"seed":{"source": "create", "key": "%s", "filename": "%s", "entropy": "%s"}}' % (key, filename, 'Digital Bitbox Electrum Plugin') reply = self.hid_send_encrypt(msg) if 'error' in reply: raise Exception(reply['error']['message']) def dbb_erase(self): self.handler.show_message(_("Are you sure you want to erase the Digital Bitbox?\r\n\r\n" \ "To continue, touch the Digital Bitbox's light for 3 seconds.\r\n\r\n" \ "To cancel, briefly touch the light or wait for the timeout.")) hid_reply = self.hid_send_encrypt('{"reset":"__ERASE__"}') self.handler.clear_dialog() if 'error' in hid_reply: raise Exception(hid_reply['error']['message']) else: self.password = None raise Exception('Device erased') def dbb_load_backup(self, show_msg=True): backups = self.hid_send_encrypt('{"backup":"list"}') if 'error' in backups: raise Exception(backups['error']['message']) try:
gdimitris/ChessPuzzlerBackend
Application/app_configuration.py
Python
mit
368
0.008152
__author__ = 'dimitris' import os # Flask Configurat
ion basedir = os.path.abspath(os.path.dirname(__file__)) SECRET_KEY = 'knaskndfknasdfiaosifoaignaosdnfoasodfnaodgnas' PREFERRED_URL_SCHEME = 'https' #SqlAlchemy Configuration DB_NAME = 'puzzles.db' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, DB_NAME)
#Cache Configuration CACHE_TYPE = 'simple'
fidelram/goatools
setup.py
Python
bsd-2-clause
889
0
#!/usr/bin/env python # -*- coding: UTF-8 -*- from setuptools import setup from glob import glob classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Topic :: Scientific/Engineering :: Bio-Informatics', ] exec(open("goatools/version.py").read()) setup( name="goatools", version=__version__, author='Haibao Tang', author_email='tanghaibao@gmail.com', packages=['goatools'], scripts=glob('scripts/*.py'), license='BSD', classifiers=classifiers, url='http://github.com/tanghaibao/goatools', description="Py
thon scripts to find enrichment of GO terms", long_description=open("README.rst").read(), install_requires=['fisher', 'xlsxwriter', 'st
atsmodels'] )
kalafut/go-ledger
importer_test.py
Python
mit
1,021
0.001959
from StringIO import StringIO import textwrap import import
er def t
est_import_csv(): current = StringIO(textwrap.dedent('''\ status,qty,type,transaction_date,posting_date,description,amount A,,,2016/11/02,,This is a test,$4.53 ''')) new = StringIO(textwrap.dedent('''\ "Trans Date", "Summary", "Amount" 5/2/2007, Regal Theaters, $15.99 11/2/2016, This is a test , $4.53 5/2/2007, Regal Theaters, $15.99 ''')) mapping = { 'Trans Date': 'transaction_date', 'Summary': 'description', 'Amount': 'amount' } importer.save_csv(current, new, mapping, '%m/%d/%Y') lines = current.getvalue().splitlines() assert lines[0].rstrip() == 'status,qty,type,transaction_date,posting_date,description,amount' assert lines[1].rstrip() == 'N,2,,2007/05/02,,Regal Theaters,$15.99' assert lines[2].rstrip() == 'A,,,2016/11/02,,This is a test,$4.53' assert len(lines) == 3
CubicERP/odoo
addons/product/product.py
Python
agpl-3.0
67,910
0.006685
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import math import re import time from _common import ceiling from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import osv, fields, expression from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT import psycopg2 import openerp.addons.decimal_precision as dp from openerp.tools.float_utils import float_round, float_compare def ean_checksum(eancode): """returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length""" if len(eancode) != 13: return -1 oddsum=0 evensum=0 total=0 eanvalue=eancode reversevalue = eanvalue[::-1] finalean=reversevalue[1:] for i in range(len(finalean)): if i % 2 == 0: oddsum += int(finalean[i]) else: evensum += int(finalean[i]) total=(oddsum * 3) + evensum check = int(10 - math.ceil(total % 10.0)) %10 return check def check_ean(eancode): """returns True if eancode is a valid ean13 string, or null""" if not eancode: return True if len(eancode) != 13: return False try: int(eancode) except: return False return ean_checksum(eancode) == int(eancode[-1]) def sanitize_ean13(ean13): """Creates and returns a valid ean13 from an invalid one""" if not ean13: return "0000000000000" ean13 = re.sub("[A-Za-z]","0",ean13); ean13 = re.sub("[^0-9]","",ean13); ean13 = ean13[:13] if len(ean13) < 13: ean13 = ean13 + '0' * (13-len(ean13)) return ean13[:-1] + str(ean_checksum(ean13)) #---------------------------------------------------------- # UOM #---------------------------------------------------------- class product_uom_categ(osv.osv): _name = 'product.uom.categ' _description = 'Product uom categ' _columns = { 'name': fields.char('Name', required=True, translate=True), } class product_uom(osv.osv): _name = 'product.uom' _description = 'Product Unit of Measure' def _compute_factor_inv(self, factor): return factor and (1.0 / factor) or 0.0 def _factor_inv(self, cursor, user, ids, name, arg, context=None): res = {} for uom in self.browse(cursor, user, ids, context=context): res[uom.id] = self._compute_factor_inv(uom.factor) return res def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None): return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context) def name_create(self, cr, uid, name, context=None): """ The UoM category and factor are required, so we'll have to add temporary values for imported UoMs """ uom_categ = self.pool.get('product.uom.categ') # look for the category based on the english name, i.e. no context on purpose! # TODO: should find a way to have it translated but not created until actually used categ_misc = 'Unsorted/Imported Units' categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)]) if categ_id: categ_id = categ_id[0] else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc) uom_id = self.create(cr, uid, {self._rec_name: name, 'categor
y_id': categ_id, 'factor': 1}) return self.name_get(cr, uid, [uom_id], context=context)[0] def create(self, cr, uid, data, context=None): if 'factor_inv' in data: if data['factor_inv'] != 1: data['factor'] = self._compute_factor_inv(data['factor_inv']) del(data['factor_inv']) return super(product_uom, self).create(cr, uid, data, context) _order = "name" _columns = { 'name': fields.char('Unit of Measure', required=True, translate=True), 'category_id': fields.many2one('product.uom.categ', 'Unit of Measure Category', required=True, ondelete='cascade', help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."), 'factor': fields.float('Ratio', required=True, digits=0, # force NUMERIC with unlimited precision help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\ '1 * (reference unit) = ratio * (this unit)'), 'factor_inv': fields.function(_factor_inv, digits=0, # force NUMERIC with unlimited precision fnct_inv=_factor_inv_write, string='Bigger Ratio', help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\ '1 * (this unit) = ratio * (reference unit)', required=True), 'rounding': fields.float('Rounding Precision', digits=0, required=True, help="The computed quantity will be a multiple of this value. "\ "Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."), 'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."), 'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'), ('reference','Reference Unit of Measure for this category'), ('smaller','Smaller than the reference Unit of Measure')],'Type', required=1), } _defaults = { 'active': 1, 'rounding': 0.01, 'factor': 1, 'uom_type': 'reference', 'factor': 1.0, } _sql_constraints = [ ('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!') ] def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True, rounding_method='UP'): if not from_uom_id or not qty or not to_uom_id: return qty uoms = self.browse(cr, uid, [from_uom_id, to_uom_id]) if uoms[0].id == from_uom_id: from_unit, to_unit = uoms[0], uoms[-1] else: from_unit, to_unit = uoms[-1], uoms[0] return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round, rounding_method=rounding_method) def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, rounding_method='UP', context=None): if context is None: context = {} if from_unit.category_id.id != to_unit.category_id.id: if context.get('raise-exception', True): raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,)) else: return qty amount = qty/from_unit.factor if to_unit: amount = amount * to_unit.factor if round: amount = float_round(amount, precision_rounding=to_unit.rounding, rounding_method=rounding_method) return amount def _compute_price(self, cr
Daniel-Hoerauf/group-assistant
assistant/weatherTest.py
Python
gpl-3.0
433
0.027714
#!/usr/bin/env python import sys from weather import Weather def main(args): weather = Weather() location = weather.lookup_by_location(args[1]) condition = location.forecast()[0] if condition: return condition['text'] + ' with high of ' + condition['high'] + ' and low of ' +
condition['low'] else: return "City not
found. It's probably raining meatballs. Please try again." if __name__ == '__main__': main(sys.argv)
suutari-ai/shoop
shuup/front/views/misc.py
Python
agpl-3.0
730
0
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # L
ICENSE file in the root directory of this source tree. from django.http import HttpResponseRedirect from shuup import
configuration def toggle_all_seeing(request): return_url = request.META["HTTP_REFERER"] if not request.user.is_superuser: return HttpResponseRedirect(return_url) all_seeing_key = "is_all_seeing:%d" % request.user.pk is_all_seeing = not configuration.get(None, all_seeing_key, False) configuration.set(None, all_seeing_key, is_all_seeing) return HttpResponseRedirect(return_url)
yliu120/dbsystem
HW2/dbsys-hw2/Query/Plan.py
Python
apache-2.0
10,860
0.01105
import sys from collections import deque from Catalog.Schema import DBSchema from Query.Operators.TableScan import TableScan from Query.Operators.Select import Select from Query.Operators.Project import Project from Query.Operators.Union import Union from Query.Operators.Join import Join from Query.Operators.GroupBy import GroupBy from Query.Operators.Sort import Sort class Plan: """ A data structure implementing query plans. Query plans are tree data structures whose nodes are objects inheriting from the Query.Operator class. Our Query.Plan class tracks the root of the plan tree, and provides basic accessors such as the ability to retrieve the relations accessed by the query, the query's output schema, and plan pretty printing facilities. Plan instances delegate their iterator to the root operator, enabling direct iteration over query results. Plan instances should use the 'prepare' method prior to iteration (as done with Database.processQuery), to initialize all operators contained in the plan. """ def __init__(self, **kwargs): other = kwargs.get("other", None) if other: self.fromOther(other) elif "root" in kwargs: self.root = kwargs["root"] else: raise ValueError("No root operator specified for query plan") def fromOther(self): self.root = other.root # Returns the root operator in the query plan def root(self): return self.root # Returns the query result schema. def schema(self): return self.root.schema() # Returns the relations used by the query. def relations(self): return [op.relationId() for (_,op) in self.flatten() if isinstance(op, TableScan)] # Pre-order depth-first flattening of the query tree. def flatten(self): if self.root: result = [] queue = deque([(0, self.root)]) while queue: (depth, operator) = queue.popleft() children = operator.inputs() result.append((depth, operator)) if children: queue.extendleft([(depth+1, c) for c in children]) return result # Plan preparation and execution # Returns a prepared plan, where every operator has filled in # internal parameters necessary for processing data. def prepare(self, database): if self.root: for (_, operator) in self.flatten(): operator.prepare(database) return self else: raise ValueError("Invalid query plan") # Iterator abstraction for query processing. # Thus, we can use: "for page in plan: ..." def __iter__(self): return iter(self.root) # Plan and statistics information. # Returns a description for the entire query plan, based on the # description of each individual operator. def explain(self): if self.root: planDesc = [] indent = ' ' * 2 for (depth, operator) in self.flatten(): planDesc.append(indent * depth + operator.explain()) return '\n'.join(planDesc) # Returns the cost of this query plan. Each operator should determine # its own local cost added to the cost of its children. def cost(self): return self.root.cost() # Plan I/O, e.g., for query shipping. def pack(self): raise NotImplementedError def unpack(self): raise NotImplementedError class PlanBuilder: """ A query plan builder class that can be used for LINQ-like construction of queries. A plan builder consists of an operator field, as the running root of the query tree. Each method returns a plan builder instance, that can be used to further operators compose with additional builder methods. A plan builder yields a Query.Plan instance through its finalize() method. >>> import Database >>> db = Database.Database() >>> db.createRelation('employee', [('id', 'int'), ('age', 'int')]) >>> schema = db.relationSchema('employee') # Populate relation >>> for tup in [schema.pack(schema.instantiate(i, 2*i+20)) for i in range(20)]: ... _ = db.insertTuple(schema.name, tup) ... ### SELECT * FROM Employee WHERE age < 30 >>> query1 = db.query().fromTable('employee').where("age < 30").finalize() >>> query1.relations() ['employee'] >>> print(query1.explain()) # doctest: +ELLIPSIS Select[...,cost=...](predicate='age < 30') TableScan[...,cost=...](employee) >>> [schema.unpack(tup).age for page in db.processQuery(query1) for tup in page[1]] [20, 22, 24, 26, 28] ### SELECT eid FROM Employee WHERE age < 30 >>> query2 = db.query().fromTable('employee').where("age < 30").select({'id': ('id', 'int')}).finalize() >>> print(query2.explain()) # doctest: +ELLIPSIS Project[...,cost=...](projections={'id': ('id', 'int')}) Select[...,cost=...](predicate='age < 30') TableScan[...,cost=...](employee) >>> [query2.schema().unpack(tup).id for page in db.processQuery(query2) for tup in page[1]] [0, 1, 2, 3, 4] ### SELECT * FROM Employee UNION ALL Employee >>> query3 = db.query().fromTable('employee').union(db.query().fromTable('employee')).finalize() >>> print(query3.explain()) # doctest: +ELLIPSIS UnionAll[...,cost=...] TableScan[...,cost=...](employee) TableScan[...,cost=...](employee) >>> [query3.schema().unpack(tup).id for page in db.processQuery(query3) for tup in page[1]] # doctest:+ELLIPSIS [0, 1, 2, ..., 19, 0, 1, 2, ..., 19] ### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id >>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'}) >>> query4 = db.query().fromTable('employee').join( \ db.query().fromTable('employee'), \ rhsSchema=e2schema, \ method='block-nested-loops', expr='id == id2').finalize() >>> print(query4.explain()) # doctest: +ELLIPSIS BNLJoin[...,cost=...](expr='id == id2') TableScan[...,cost=...](employee) TableScan[...,cost=...](employee) >>> q4results = [query4.schema().unpack(tup) for page in db.processQuery(query4) for tup in page[1]] >>> [(tup.id, tup.id2) for tup in q4results] # doctest:+ELLIPSIS [(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)] ### Hash join test with the same query. ### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id >>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'}) >>> keySchema = DBSchema('employeeKey', [('id', 'int')]) >>> keySchema2 = DBSchema('employeeKey2', [('id2', 'i
nt')]) >>> query5 = db.query().fromTable('employee').join( \ db.query().fromTable('employee'), \ rhsSchema=e2schema, \ method='hash',
\ lhsHashFn='hash(id) % 4', lhsKeySchema=keySchema, \ rhsHashFn='hash(id2) % 4', rhsKeySchema=keySchema2, \ ).finalize() >>> print(query5.explain()) # doctest: +ELLIPSIS HashJoin[...,cost=...](lhsKeySchema=employeeKey[(id,int)],rhsKeySchema=employeeKey2[(id2,int)],lhsHashFn='hash(id) % 4',rhsHashFn='hash(id2) % 4') TableScan[...,cost=...](employee) TableScan[...,cost=...](employee) >>> q5results = [query5.schema().unpack(tup) for page in db.processQuery(query5) for tup in page[1]] >>> [(tup.id, tup.id2) for tup in q5results] # doctest:+ELLIPSIS [(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)] >>> sorted([(tup.id, tup.id2) for tup in q5results]) # doctest:+ELLIPSIS [(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)] ### Group by aggregate query ### SELECT id, max(age) FROM Employee GROUP BY id >>> aggMinMaxSchema = DBSchema('minmax', [('minAge', 'int'), ('maxAge','int')]) >>> query6 = db.query().fromTable('employee').groupBy( \ groupSchema=keySchema, \ aggSchema=aggMinMaxSchema, \ groupExpr=(lambda e: e.id), \ aggExprs=[(sys.maxsize, lambda acc, e: min(acc, e.age), lambda x: x), \ (0, lambda acc, e: max(acc, e.age), lambda x: x)], \ groupHashFn=(lambda gbVal: hash(gbVal[0]) % 2) \ ).finalize() >>> print(query6.explain()) # doctest: +ELLIPSIS GroupBy[...,cost=...](groupSchema=employeeKey[(id,int)], aggSchema=minmax[(minAge,int),(maxAge,int)]) TableScan[...,cost=...](employee) >>> q6results = [query6.schema().unpack
kedz/cuttsum
trec2015/sbin/reports/raw-stream-count.py
Python
apache-2.0
7,738
0.005557
import streamcorpus as sc import cuttsum.events import cuttsum.corpora from cuttsum.trecdata import SCChunkResource from cuttsum.pipeline import ArticlesResource, DedupedArticlesResource import os import pandas as pd from datetime import datetime from collections import defaultdict import matplotlib.pylab as plt plt.style.use('ggplot') pd.set_option('display.max_rows', 500) pd.set_option('display.width', 200) import locale locale.setlocale(locale.LC_ALL, 'en_US.UTF8') def format_int(x): return locale.format("%d", x, grouping=True) def epoch(dt): return int((dt - datetime(1970, 1, 1)).total_seconds()) chunk_res = SCChunkResource() articles_res = ArticlesResource() ded_articles_res = DedupedArticlesResource() data = [] event2ids = defaultdict(set) fltr_event2ids = defaultdict(set) for event in cuttsum.events.get_events(): corpus = cuttsum.corpora.get_raw_corpus(event) hours = event.list_event_hours() hour2ded = defaultdict(int) hour2ded_fltr = defaultdict(int) ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8) if ded_df is not None: if event.query_num > 25: for ids in ded_df["stream ids"].apply(eval).tolist(): for id1 in ids: event2ids[event.fs_name()].add(id1) for _, row in ded_df.iterrows(): dt = datetime.utcfromtimestamp(row["earliest"]) hour = datetime(dt.year, dt.month, dt.day, dt.hour) hour2ded[hour] += 1 if row["match"] == True: hour2ded_fltr[hour] += 1 hour2goose = defaultdict(int) for hour in hours: path = articles_res.get_chunk_path(event, "goose", hour, corpus) if path is None: continue #print path fname = os.path.split(path)[1] num_goose = int(fname.split("-")[0]) hour2goose[hour] = num_goose # goose_df = articles_res.get_stats_df(event, "goose") # if goose_df is not None: # for _, row in goose_df.iterrows(): # dt = datetime.utcfromtimestamp(row["hour"]) # hour = datetime(dt.year, dt.month, dt.day, dt.hour) # hour2goose[hour] = row["goose articles"] for hour in hours: raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event) num_raw_si = 0 for chunk in raw_chunks: fname = os.path.split(chunk)[1] num_raw_si += int(fname.split("-")[1]) #num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour)) data.append({ "event": event.query_id, "title": event.title, "hour": hour, "raw articles": num_raw_si, "goose articles": hour2goose[hour], "deduped articles": hour2ded[hour], "deduped match articles": hour2ded_fltr[hour], }) for event in cuttsum.events.get_events(): if event.query_num < 26: continue corpus = cuttsum.corpora.FilteredTS2015() hours = event.list_event_hours() hour2ded = defaultdict(int) hour2ded_fltr = defaultdict(int) ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8) if ded_df is not None: for ids in ded_df["stream ids"].apply(eval).tolist(): for id1 in ids: fltr_event2ids[event.fs_name()].add(id1) for _, row in ded_df.iterrows(): dt = datetime.utcfromtimestamp(row["earliest"]) hour = datetime(dt.year, dt.month, dt.day, dt.hour) hour2ded[hour] += 1 if row["match"] == True: hour2ded_fltr[hour] += 1 hour2goose = defaultdict(int) for hour in hours: path = articles_res.get_chunk_path(event, "goose", hour, corpus) if path is None: continue print path fname = os.path.split(path)[1] num_goose = int(fname.split("-")[0]) hour2goose[hour] = num_goose # goose_df = articles_res.get_stats_df(event, "goose") # if goose_df is not None: # for _, row in goose_df.iterrows(): # dt = datetime.utcfromtimestamp(row["hour"]) # hour = datetime(dt.year, dt.month, dt.day, dt.hour) # hour2goose[hour] = row["goose articles"] for hour in hours: print hour raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event) num_raw_si = 0 for chunk in raw_chunks: fname = os.path.split(chunk)[1] #num_raw_si += int(fname.split("-")[1]) with sc.Chunk(path=chunk, mode="rb", message=corpus.sc_msg()) as c: for si in c: num_raw_si += 1 #num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour)) data.append({ "event": event.query_id + " (filtered)", "title": event.title, "hour": hour, "raw articles": num_raw_si, "goose articles": hour2goose[hour], "deduped articles": hour2ded[hour], "deduped match articles": hour2ded_fltr[hour], }) df = pd.DataFrame(data) cols = ["raw articles", "goose articles", "deduped articles", "deduped match articles"] df_sum = df.groupby("event")[cols].sum() df_sum["raw articles"] = df_sum["raw articles"].apply(format_int) df_sum["goose articles"] = df_sum["goose articles"].apply(format_int) df_sum["deduped articles"] = df_sum["deduped articles"].apply(format_int) df_sum["deduped match articles"] = df_sum["deduped match articles"].apply(format_int) print df_sum print coverage = [] for event in cuttsum.events.get_events(): if event.query_num < 26: continue isect = event2ids[event.fs_name()].intersection(fltr_event2ids[event.fs_name()]) n_isect = len(isect) n_unfltr = max(len(event2ids[event.fs_name()]), 1) n_fltr = max(len(fltr_event2ids[event.fs_name()]), 1) print event.fs_name()
print n_isect, float(n_isect) / n_fltr, float(n_isect) / n_unfltr coverage.append({ "event": event.query_id, "intersection": n_isect, "isect/n_2015F":
float(n_isect) / n_fltr, "isect/n_2014": float(n_isect) / n_unfltr, }) df = pd.DataFrame(coverage) df_u = df.mean() df_u["event"] = "mean" print pd.concat([df, df_u.to_frame().T]).set_index("event") exit() with open("article_count.tex", "w") as f: f.write(df_sum.to_latex()) import os if not os.path.exists("plots"): os.makedirs("plots") import cuttsum.judgements ndf = cuttsum.judgements.get_merged_dataframe() for (event, title), group in df.groupby(["event", "title"]): matches = ndf[ndf["query id"] == event] #fig = plt.figure() group = group.set_index(["hour"]) #ax = group[["goose articles", "deduped articles", "deduped match articles"]].plot() linex = epoch(group.index[10]) ax = plt.plot(group.index, group["goose articles"], label="goose") ax = plt.plot(group.index, group["deduped articles"], label="dedupe") ax = plt.plot(group.index, group["deduped match articles"], label="dedupe qmatch") for nugget, ngroup in matches.groupby("nugget id"): times = ngroup["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0]))) #ngroup = ngroup.sort("timestamp") times.sort() times = times.reset_index(drop=True) if len(times) == 0: continue plt.plot_date( (times[0], times[0]), (0, plt.ylim()[1]), '--', color="black", linewidth=.5, alpha=.5) plt.gcf().autofmt_xdate() plt.gcf().suptitle(title) plt.gcf().savefig(os.path.join("plots", "{}-stream.png".format(event))) plt.close("all")
goors/flask-microblog
UserModel.py
Python
apache-2.0
2,758
0.005801
from flask import session from appconfig import * class UserModel: def __init__(self): from models import Tag from models import Post from models import User self.Tag = Tag.Tag self.Post = Post.Post self.User = User.User def login(self, email, password): user = self.User.query.filter_by(Email = email).first() if user and user.check_password(password): session['email'] = user.Email session['nick'] = user.Nick session['Id'] = user.Id return True return False def register(self, email, password, nick, role, id = None): from models import db if id: u = self.User.query.filter_by(Id=id).first() u.Email = email u.Role = role u.set_password(password) u.Nick = nick subject = "You account is updated" else: u = self.User(nick, email, role, password) db.session.add(u) subject = "Account is created" db.session.commit() body = "<p>Hello "+nick+", </p> <p>Your login details for "+URL+" :</p> <p>Username: "+email+" <br />Password: "+password+"</p>" self.send_email(subject, email, body, nick) return u.Id def list(self): users = self.User.query.all() if users: return users return False def getUser(self, id): user = self.User.query.filter_by(Id=id).first()
if user: return user return False def send_email(self, subject, recip
ients, html_body, nick): import mandrill try: mandrill_client = mandrill.Mandrill('ajQ8I81AVELYSYn--6xbmw') message = { 'from_email': ADMINS[0], 'from_name': 'Blog admin', 'headers': {'Reply-To': ADMINS[0]}, 'html': html_body, 'important': True, 'subject': subject, 'to': [{'email': recipients, 'name': nick, 'type': 'to'}], } result = mandrill_client.messages.send(message=message, async=False) ''' [{'_id': 'abc123abc123abc123abc123abc123', 'email': 'recipient.email@example.com', 'reject_reason': 'hard-bounce', 'status': 'sent'}] ''' except mandrill.Error, e: # Mandrill errors are thrown as exceptions print 'A mandrill error occurred: %s - %s' % (e.__class__, e) # A mandrill error occurred: <class 'mandrill.UnknownSubaccountError'> - No subaccount exists with the id 'customer-123' raise
lem9/weblate
weblate/accounts/captcha.py
Python
gpl-3.0
4,975
0
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2017 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # """Simple mathematical captcha.""" from __future__ import unicode_literals import ast from base64 import b64encode, b64decode import hashlib import operator from random import SystemRandom import time from django.conf import settings TIMEDELTA = 600 # Supported operators OPERATORS = { ast.Add: operator.add, ast.Sub: operator.sub, ast.Mult: operator.mul, } class MathCaptcha(object): """Simple match captcha object.""" operators = ('+', '-', '*') operators_display = { '+': '<i class="fa fa-plus"></i>', '-': '<i class="fa fa-minus"></i>', '*': '<i class="fa fa-times"></i>', } interval = (1, 10) def __init__(self, question=None, timestamp=None): if question is None: self.question = self.generate_question() else: self.question = question if timestamp is None: self.timestamp = time.time() else: self.timestamp = timestamp def generate_question(self): """Generate random question.""" generator = SystemRandom() operation = generator.choice(self.operators) first = generator.randint(self.interval[0], self.interval[1]) second = generator.randint(self.interval[0], self.interval[1]) # We don't want negative answers if operation == '-': first += self.interval[1] return ' '.join(( str(first), operation, str(second) )) @staticmethod def from_hash(hashed): """Create object from hash.""" question, timestamp = unhash_question(hashed) return MathCaptcha(question, tim
estamp) @property def hashed(self): """Return hashed question.""" return hash_question(self.question, self.timestamp) def validate(self, answer): """Validate answer.""" return ( self.result == answer and self.timestamp + TIMEDELTA > time.time() ) @property def result(self): """Return result.""" return eval_expr(self.question) @property def display(self): """Get unicode for display."""
parts = self.question.split() return ' '.join(( parts[0], self.operators_display[parts[1]], parts[2], )) def format_timestamp(timestamp): """Format timestamp in a form usable in captcha.""" return '{0:>010x}'.format(int(timestamp)) def checksum_question(question, timestamp): """Return checksum for a question.""" challenge = ''.join((settings.SECRET_KEY, question, timestamp)) sha = hashlib.sha1(challenge.encode('utf-8')) return sha.hexdigest() def hash_question(question, timestamp): """Hashe question so that it can be later verified.""" timestamp = format_timestamp(timestamp) hexsha = checksum_question(question, timestamp) return ''.join(( hexsha, timestamp, b64encode(question.encode('utf-8')).decode('ascii') )) def unhash_question(question): """Unhashe question, verifying its content.""" if len(question) < 40: raise ValueError('Invalid data') hexsha = question[:40] timestamp = question[40:50] try: question = b64decode(question[50:]).decode('utf-8') except (TypeError, UnicodeError): raise ValueError('Invalid encoding') if hexsha != checksum_question(question, timestamp): raise ValueError('Tampered question!') return question, int(timestamp, 16) def eval_expr(expr): """Evaluate arithmetic expression used in Captcha. >>> eval_expr('2+6') 8 >>> eval_expr('2*6') 12 """ return eval_node(ast.parse(expr).body[0].value) def eval_node(node): """Evaluate single AST node.""" if isinstance(node, ast.Num): # number return node.n elif isinstance(node, ast.operator): # operator return OPERATORS[type(node)] elif isinstance(node, ast.BinOp): # binary operation return eval_node(node.op)( eval_node(node.left), eval_node(node.right) ) else: raise ValueError(node)
hortonworks/hortonworks-sandbox
apps/help/src/help/tests.py
Python
apache-2.0
1,364
0.008065
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See t
he NOTICE file # distributed with this work for additional information # regarding
copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Tests for Help from nose.tools import assert_true, assert_equal from desktop.lib.django_test_util import make_logged_in_client def test_about(): c = make_logged_in_client(username="test", is_superuser=True) # Test default output response = c.get('/help/') assert_true("Welcome to Hue!" in response.content) # Test default to index.md response = c.get("/help/about/") response2 = c.get("/help/about/index.html") assert_equal(response.content, response2.content) # Test index at the bottom assert_true('href="/help/desktop' in response.content)
andela-akhenda/maisha-goals
app/__init__.py
Python
mit
1,333
0
import os from flask import Flask, request, g from flask_sqlalchemy import SQLAlchemy from .decorators import json db = SQLAlchemy() def create_app(config_name): """ Create the usual Flask application instance.""" app = Flask(__name__) # Apply configuration cfg = os.path.join(os.getcwd(), 'config', config_name + '.py') app.config.from_pyfile(cfg) # initialize extensions db.init_app(app) # register blueprints from .api_v1 import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api/v1') # authentic
ation token route from .auth import auth from .models import User @app.route('/api/v1', methods=['GET']) @json def api_index(): return { "message": "Welcome to Maisha Goals. Register a new " " user or login to get started"} @app.route('/auth/register', methods=['POST']) @json def register_user(): u = User
() u.import_data(request.json) db.session.add(u) db.session.commit() return { 'message': 'Your account has been successfuly created' }, 201, {'Location': u.get_url()} @app.route('/auth/login') @auth.login_required @json def login_user(): return {'token': g.user.generate_auth_token()} return app
the-fascinator/fascinator-portal
src/main/config/portal/default/default/scripts/maintenance.py
Python
gpl-2.0
729
0.005487
from com.googlecode.fascinator.api.indexer import SearchRequest from com.googlecode.fascinator.common.solr import SolrResult from com.googlecode.fasc
inator.spring import ApplicationContextProvider from java.io import ByteArrayInputStream, ByteArrayOutputStream class MaintenanceData: def __init__(self): pass def __activate__(self, context): self.velocityContext = context self.response = self.velocityContext["response"] self.maintenanceModeService = ApplicationContextProvider.getApplicationContext().getBean("maintenanceModeService") if self.maintenanceModeService.isMai
ntanceMode() == False: self.response.sendRedirect(self.velocityContext["portalPath"]+"/home")
stephenlienharrell/roster-dns-management
test/credentials_test.py
Python
bsd-3-clause
4,275
0.010526
#!/usr/bin/python # Copyright (c) 2009, Purdue University # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # Neither the name of the Purdue University nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test for Credential cache library.""" __copyright__ = 'Copyright (C) 2009, Purdue University' __license__ = 'BSD' __version__ = '#TRUNK#' import unittest import os import roster_core from roster_server import credentials CONFIG_FILE = 'test_data/roster.conf' # Example in test_data SCHEMA_FILE = '../roster-core/data/database_schema.sql' DATA_FILE = 'test_data/test_data.sql' class TestCredentialsLibrary(unittest.TestCase): def setUp(self): self.config_instance = roster_core.Config(file_name=CONFIG_FILE) self.cred_instance = credentials.CredCache(self.config_instance, u'sharrell') db_instance = self.config_instance.GetDb() db_instance.Creat
eRosterDatabase() data = open(DATA_FILE, 'r').read() db_instance.StartTransaction() db_instance.cursor.execute(data) db_instance.EndTransaction() db_instance.close() self.core_instance = roster_core.Core(u'sharrell', self.config_instance) def is_valid_uuid (self, uuid): """ TAKEN FROM THE BLUEZ MODULE is_valid_uuid (uuid) -> bool returns True if uuid is a valid 128-bit UUID. valid UUIDs are always
strings taking one of the following forms: XXXX XXXXXXXX XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX where each X is a hexadecimal digit (case insensitive) """ try: if len (uuid) == 4: if int (uuid, 16) < 0: return False elif len (uuid) == 8: if int (uuid, 16) < 0: return False elif len (uuid) == 36: pieces = uuid.split ("-") if len (pieces) != 5 or \ len (pieces[0]) != 8 or \ len (pieces[1]) != 4 or \ len (pieces[2]) != 4 or \ len (pieces[3]) != 4 or \ len (pieces[4]) != 12: return False [ int (p, 16) for p in pieces ] else: return False except ValueError: return False except TypeError: return False return True def testCredentials(self): self.assertTrue(self.cred_instance.Authenticate(u'sharrell', 'test')) cred_string = self.cred_instance.GetCredentials(u'sharrell', 'test', self.core_instance) self.assertEqual(self.cred_instance.CheckCredential(cred_string, u'sharrell', self.core_instance), u'') self.assertEqual(self.cred_instance.CheckCredential(u'test', u'sharrell', self.core_instance), None) if( __name__ == '__main__' ): unittest.main()
David-Wobrock/django-fake-database-backends
tests/test_project/test_app/migrations/0017_add_float.py
Python
mit
386
0
# Generated by Django 2.0.1 on 2018-01-21 14:23 from django.db import migrations, models class Migration(migrations.Migration):
dependencies = [ ('test_app', '0016_add_filepath'), ] operations = [ migrations.AddField( model_name='secondobject', name='floating', field=models.FloatField(default=0.0), ), ]
CoderDojoTC/python-minecraft
docs/conf.py
Python
mit
8,611
0.006503
# -*- coding: utf-8 -*- # # CoderDojo Twin Cities Python for Minecraft documentation build configuration file, created by # sphinx-quickstart on Fri Oct 24 00:52:04 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.todo'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project.
project = u'CoderDojo Twin Cities Python for Minecraft' copyright = u'by multiple <a href="https://github.com/CoderDojoTC/python-minecraft/graphs/contri
butors">contributors</a>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CoderDojoTwinCitiesPythonforMinecraftdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CoderDojoTwinCitiesPythonforMinecraft.tex', u'CoderDojo Twin Cities Python for Minecraft Documentation', u'Mike McCallister', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'coderdojotwincitiespythonforminecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation', [u'Mike McCallister'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CoderDojoTwinCitiesPythonforMinecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation', u'Mike McCallister', 'CoderDojoTwinCit
mailfish/helena
gallery/urls.py
Python
apache-2.0
197
0.015228
from django.conf.url
s import patterns, url from .views import PhotoListView urlpatterns = patterns('',
url(r'^(?P<slug>[\w-]+)/$', PhotoListView.as_view(), name='image'), )
sinsai/Sahana_eden
controllers/default.py
Python
mit
13,802
0.007245
# -*- coding: utf-8 -*- """ Default Controllers @author: Fran Boon """ module = "default" # Options Menu (available in all Functions) #response.menu_options = [ #[T("About Sahana"), False, URL(r=request, f="about")], #] def call(): "Call an XMLRPC, JSONRPC or RSS service" # If webservices don't use sessions, avoid cluttering up the storage #session.forget() return service() def download(): "Download a file" return response.download(request, db) # Add newly-registered users to Person Registry & 'Authenticated' role auth.settings.register_onaccept = lambda form: auth.s3_register(form) _table_user = auth.settings.table_user _table_user.first_name.label = T("First Name") _table_user.first_name.comment = SPAN("*", _class="req") _table_user.last_name.label = T("Last Name") #_table_user.last_name.comment = SPAN("*", _class="req") _table_user.email.label = T("E-mail") _table_user.email.comment = SPAN("*", _class="req") _table_user.password.comment = SPAN("*", _class="req") #_table_user.password.label = T("Password") #_table_user.language.label = T("Language") _table_user.language.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Language"), T("The language you wish the site to be displayed in."))) _table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT) # ----------------------------------------------------------------------------- def index(): """ Main Home Page """ title = T("Sahana Eden Disaster Management Platform") response.title = title # Menu Boxes #modules = deployment_settings.modules def menu_box( title, ci, fi ): """ Returns a menu_box linking to URL(ci, fi) """ return A( DIV(title, _class = "menu-box-r"), _class = "menu-box-l", _href = URL( r=request, c=ci, f=fi) ) div_arrow_1 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \ request.application), _class = "div_arrow") div_arrow_2 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \ request.application), _class = "div_arrow") div_sit = DIV( H3(T("SITUATION")), _class = "menu_div") if deployment_settings.has_module("irs"): div_sit.append(menu_box(T("Incidents"), "irs", "ireport")) if deployment_settings.has_module("assess"): div_sit.append(menu_box(T("Assessments"), "assess", "assess")) div_sit.append(menu_box(T("Organizations"), "org", "organisation")) div_dec = DIV( H3(T("DECISION")), _class = "menu_div") div_dec.append(menu_box(T("Map"), "gis", "index")) if deployment_settings.has_module("assess"): div_dec.append(menu_box(T("Gap Report"), "project", "gap_report")) div_dec.append(menu_box(T("Gap Map"), "project", "gap_map")) div_res = DIV(H3(T("RESPONSE")), _class = "menu_div", _id = "menu_div_response") if deployment_settings.has_module("req"): div_res.append(menu_box(T("Requests"), "req", "req")) if deployment_settings.has_module("project"): div_res.append(menu_box(T("Activities"), "project", "activity")) #div_additional = DIV(A(DIV(T("Mobile Assess."), # _class = "menu_box" # ), # _href = URL( r=request, c="assess", f= "mobile_basic_assess") # )) menu_boxes = DIV(div_sit, div_arrow_1, div_dec, div_arrow_2, div_res, #div_additional, ) # @ToDo: Replace this with an easily-customisable section on the homepage #settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first() #if settings: # admin_name = settings.admin_name # admin_email = settings.admin_email # admin_tel = settings.admin_tel #else: # # db empty and prepopulate is false # admin_name = T("Sahana Administrator").xml(), # admin_email = "support@Not Set", # admin_tel = T("Not Set").xml(), # Login/Registration forms self_registration = deployment_settings.get_security_self_registration() registered = False login_form = None login_div = None register_form = None register_div = None if 2 not in session.s3.roles: # This user isn't yet logged-in if request.cookies.has_key("registered"): # This browser has logged-in before registered = True # Provide a login box on front page request.args = ["login"] auth.messages.submit_button = T("Login") login_form = auth() login_div = DIV(H3(T("Login")), P(XML("%s <b>%s</b> %s" % (T("Registered users can"), T("login"), T("to access the system"))))) if self_registration: # Provide a Registration box on front page request.args = ["register"] auth.messages.submit_button = T("Register") register_form = auth() register_div = DIV(H3(T("Register")), P(XML("%s <b>%s</b>" % (T("If you would like to help, then please"), T("sign-up now"))))) if session.s3.debug: validate_script = SCRIPT(_type="text/javascript", _src=URL(r=request, c="static", f="scripts/S3/jquery.validate.js")) else: validate_script = SCRIPT(_type="text/javascript", _src=URL(r=request, c="static", f="scripts/S3/jquery.validate.pack.js")) register_div.append(validate_script) if request.env.request_method == "POST": post_script = """ // Unhide register form $('#register_form').removeClass('hide'); // Hide login form $('#login_form').addClass('hide'); """ else: post_script = "" register_script = SCRIPT(""" $(document).ready(function() { // Change register/login links to avoid page reload, make back button work. $('#register-btn').attr('href', '#register'); $('#login-btn').attr('href', '#login'); %s // Redirect Register Button to unhide $('#register-btn').click(function() { // Unhide register form $('#register_form').removeClass('hide'); // Hide login form $('#login_form').addClass('hide'); }); // Redirect Login Button to unhide $('#login-btn').click(function() { // Hide register form $('#register_form').addClass('hide'); // U
nhide login form $('#login_form').removeClass('hide'); }); }); """ % pos
t_script) register_div.append(register_script) return dict(title = title, #modules=modules, menu_boxes=menu_boxes, #admin_name=admin_name, #admin_email=admin_email, #admin_tel=admin_tel, self_registration=self_registration, registered=registered, login_form=login_form, login_div=login_div, register_form=register_form, register_div=register_div ) # ----------------------------------------------------------------------------- def rapid(): """ Set/remove rapid data entry flag """ val = request.vars.get("val", True) if val == "0": val = False else: val = True session.s3.rapid_data_entry = val response.view = "xml.html" return dict(item=str(session.s3.rapid_data_entry)) # ----------------------------------------------------------------------------- def user(): "Auth functions based on arg. See gluon/tools.py" auth.settings
Comunitea/CMNT_004_15
project-addons/scheduled_shipment/wizard/schedule_wizard.py
Python
agpl-3.0
1,376
0.011628
from odoo import models, fields, api, _ from odoo.exceptions import ValidationError from datetime import datetime class StockScheduleWizard(models.TransientModel): _name = "stock.schedule.wiza
rd" scheduled_date = fields.Datetime('Scheduled shipping date') @api.multi def action_but
ton_schedule(self): if self.scheduled_date: date_now = str(datetime.now()) difference = datetime.strptime(date_now, '%Y-%m-%d %H:%M:%S.%f') - \ datetime.strptime(self.scheduled_date, '%Y-%m-%d %H:%M:%S') difference = difference.total_seconds() / float(60) if difference > 0: raise ValidationError(_("Scheduled date must be bigger than current date")) picking = self.env['stock.picking'].browse(self.env.context['parent_obj']) cron_id = self.env['queue.job'].search([('model_name','=','stock.picking'),('state','=','pending'),('record_ids','like',picking.id), ('method_name','=','make_picking_sync')]) if cron_id: if len(cron_id) > 1: cron_id = cron_id[0] if self.scheduled_date > cron_id.eta: cron_id.unlink() picking.sale_id.scheduled_date = self.scheduled_date picking.not_sync = True picking._process_picking_scheduled_time()
Sebelino/PyUserInput
pymouse/mac.py
Python
lgpl-3.0
5,547
0.003966
#Copyright 2013 Paul Barton # #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. import Quartz from AppKit import NSEvent, NSScreen from .base import PyMouseMeta, PyMouseEventMeta pressID = [None, Quartz.kCGEventLeftMouseDown, Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown] releaseID = [None, Quartz.kCGEventLeftMouseUp, Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp] class PyMouse(PyMouseMeta): def press(self, x, y, button=1): event = Quartz.CGEventCreateMouseEvent(None, pressID[button], (x, y), button - 1) Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) def release(self, x, y, button=1): event = Quartz.CGEventCreateMouseEvent(None, releaseID[button], (x, y), button - 1) Quartz.CGEventPost(Quartz.kCGHIDEventTap, event) def move(self, x, y): move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0) Quartz.CGEventPost(Quartz.kCGHIDEventTap, move) def drag(self, x, y): drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0) Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag) def position(self): loc = NSEvent.mouseLocation() return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y def screen_size(self): return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height def scroll(self, vertical=None, horizontal=None, depth=None): #Local submethod for generating Mac scroll events in one axis at a time def scroll_event(y_move=0, x_move=0, z_move=0, n=1): for _ in range(abs(n)): scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent( None, # No source Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines 3, # Number of wheels(dimensions) y_move, x_move, z_move) Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent) #Execute vertical then horizontal then depth scrolling events if vertical is not None: vertical = int(vertical) if vertical == 0: # Do nothing with 0 distance pass elif vertical > 0: # Scroll up if positive scroll_event(y_move=1, n=vertical) else: # Scroll down if negative scroll_event(y_move=-1, n=abs(vertical)) if horizontal is not None: horizontal
= int(horizontal) if horizontal == 0: # Do nothing with 0 distance
pass elif horizontal > 0: # Scroll right if positive scroll_event(x_move=1, n=horizontal) else: # Scroll left if negative scroll_event(x_move=-1, n=abs(horizontal)) if depth is not None: depth = int(depth) if depth == 0: # Do nothing with 0 distance pass elif vertical > 0: # Scroll "out" if positive scroll_event(z_move=1, n=depth) else: # Scroll "in" if negative scroll_event(z_move=-1, n=abs(depth)) class PyMouseEvent(PyMouseEventMeta): def run(self): tap = Quartz.CGEventTapCreate( Quartz.kCGSessionEventTap, Quartz.kCGHeadInsertEventTap, Quartz.kCGEventTapOptionDefault, Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) | Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) | Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) | Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) | Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp), self.handler, None) loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0) loop = Quartz.CFRunLoopGetCurrent() Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode) Quartz.CGEventTapEnable(tap, True) while self.state: Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False) def handler(self, proxy, type, event, refcon): (x, y) = Quartz.CGEventGetLocation(event) if type in pressID: self.click(x, y, pressID.index(type), True) elif type in releaseID: self.click(x, y, releaseID.index(type), False) else: self.move(x, y) if self.capture: Quartz.CGEventSetType(event, Quartz.kCGEventNull) return event
kgblll/libresoft-gymkhana
libs/ChannelTemplate.py
Python
gpl-2.0
822
0.06691
# Copyright (C) # #
Author : from GIC.Channels.GenericChannel import * class ChannelTest (GenericChannel): # mandatory fields to work on LibreGeoSocial search engine MANDATORY_FIELDS = ["latitude", "longitude", "radius", "category"] CATEGORIES = [{"id" : "0", "name" : "all", "desc" : "All supported categories "}, {"id" : "1", "name" : "category1", "desc" : "Category for..."}, ] def __init__ (self): self.options = {} def get_categories(self): return self.CATEGORIES
def get_info(self): return "Channel description" def set_options(self, options): """ Fill self.options with the received dictionary regarding mandatory and optional fields of your channel """ return True, "" def process (self): """ Make the search and return the nodes """
michaelbrooks/uw-message-coding
message_coding/apps/dataset/migrations/0010_auto_20150619_2106.py
Python
mit
419
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migration
s.Migration): dependencies = [ ('dataset', '0009_remove_selection_model'), ] operations = [ migrations.AlterField( model_name='dataset', name='name', field=models.CharField
(default=b'', max_length=150), ), ]
Azure/azure-sdk-for-python
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/_version.py
Python
mit
488
0.004098
# coding=utf-8 # -----------------------------
--------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # ----------------------------------------------------
---------------------- VERSION = "1.0.0b3"
CanonicalLtd/subiquity
subiquitycore/netplan.py
Python
agpl-3.0
5,471
0
import copy import glob import fnmatch import os import logging import yaml log = logging.getLogger("subiquitycore.netplan") def _sanitize_inteface_config(iface_config): for ap, ap_config in iface_config.get('access-points', {}).items(): if 'password' in ap_config: ap_config['password'] = '<REDACTED>' def sanitize_interface_config(iface_config): iface_config = copy.deepcopy(iface_config) _sanitize_inteface_co
nfig(iface_config) return iface_config def sanitize_config(config): """Return a copy of config with passwords redacted.""" config = copy.deepcopy(config) interfaces = config.get('network', {}).get('wifis', {}).items() for iface, iface_config in interfaces: _sanitize_inteface_config(iface_config) return config class Config: """A NetplanConfig represents the network config for a system. Call parse_net
plan_config() with each piece of yaml config, and then call config_for_device to get the config that matches a particular network device, if any. """ def __init__(self): self.physical_devices = [] self.virtual_devices = [] self.config = {} def parse_netplan_config(self, config): try: self.config = config = yaml.safe_load(config) except yaml.ReaderError as e: log.info("could not parse config: %s", e) return network = config.get('network') if network is None: log.info("no 'network' key in config") return version = network.get("version") if version != 2: log.info("network has no/unexpected version %s", version) return for phys_key in 'ethernets', 'wifis': for dev, dev_config in network.get(phys_key, {}).items(): self.physical_devices.append(_PhysicalDevice(dev, dev_config)) for virt_key in 'bonds', 'vlans': for dev, dev_config in network.get(virt_key, {}).items(): self.virtual_devices.append(_VirtualDevice(dev, dev_config)) def config_for_device(self, link): if link.is_virtual: for dev in self.virtual_devices: if dev.name == link.name: return copy.deepcopy(dev.config) else: allowed_matches = ('macaddress',) match_key = 'match' for dev in self.physical_devices: if dev.matches_link(link): config = copy.deepcopy(dev.config) if match_key in config: match = {k: v for k, v in config[match_key].items() if k in allowed_matches} if match: config[match_key] = match else: del config[match_key] return config return {} def load_from_root(self, root): for path in configs_in_root(root): try: fp = open(path) except OSError: log.exception("opening %s failed", path) with fp: self.parse_netplan_config(fp.read()) class _PhysicalDevice: def __init__(self, name, config): match = config.get('match') if match is None: self.match_name = name self.match_mac = None self.match_driver = None else: self.match_name = match.get('name') self.match_mac = match.get('macaddress') self.match_driver = match.get('driver') self.config = config log.debug( "config for %s = %s" % ( name, sanitize_interface_config(self.config))) def matches_link(self, link): if self.match_name is not None: matches_name = fnmatch.fnmatch(link.name, self.match_name) else: matches_name = True if self.match_mac is not None: matches_mac = self.match_mac == link.hwaddr else: matches_mac = True if self.match_driver is not None: matches_driver = self.match_driver == link.driver else: matches_driver = True return matches_name and matches_mac and matches_driver class _VirtualDevice: def __init__(self, name, config): self.name = name self.config = config log.debug( "config for %s = %s" % ( name, sanitize_interface_config(self.config))) def configs_in_root(root, masked=False): """Return a list of all netplan configs under root. The list is ordered in increasing precedence. @param masked: if True, include config paths that are masked by the same basename in a different directory.""" if not os.path.isabs(root): root = os.path.abspath(root) wildcard = "*.yaml" dirs = {"lib": "0", "etc": "1", "run": "2"} rootlen = len(root) paths = [] for d in dirs: paths.extend(glob.glob(os.path.join(root, d, "netplan", wildcard))) def mykey(path): """returned key is basename + string-precidence based on dir.""" bname = os.path.basename(path) bdir = path[rootlen + 1] bdir = bdir[:bdir.find(os.path.sep)] return "%s/%s" % (bname, bdir) if not masked: paths = {os.path.basename(p): p for p in paths}.values() return sorted(paths, key=mykey)
saullocastro/pyNastran
pyNastran/bdf/mesh_utils/bdf_equivalence.py
Python
lgpl-3.0
12,362
0.003074
from __future__ import print_function #from collections import defaultdict #from functools import reduce from six import iteritems, string_types, PY2 #from six.moves import zip, range import numpy as np from numpy import (array, unique, arange, searchsorted, setdiff1d, intersect1d, asarray) from numpy.linalg import norm import scipy from pyNastran.utils import integer_types from pyNastran.bdf.bdf import BDF def bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol, renumber_nodes=False, neq_max=4, xref=True, node_set=None, size=8, is_double=False, remove_collapsed_elements=False, avoid_collapsed_elements=False, crash_on_collapse=False, log=None, debug=True): """ Equivalences nodes; keeps the lower node id; creates two nodes with the same Parameters ---------- bdf_filename : str / BDF str : bdf file path BDF : a BDF model that is fully valid (see xref) bdf_filename_out : str a bdf_filename to write tol : float the spherical tolerance renumber_nodes : bool should the nodes be renumbered (default=False) neq_max : int the number of "close" points (default=4) xref bool: bool does the model need to be cross_referenced (default=True; only applies to model option) node_set : List[int] / (n, ) ndarray the list/array of nodes to consider (not supported with renumber_nodes=True) size : int; {8, 16}; default=8 the bdf write precision is_double : bool; default=False the field precision to write crash_on_collapse : bool; default=False stop if nodes have been collapsed False: blindly move on True: rereads the BDF which catches doubled nodes (temporary); in the future collapse=True won't need to double read; an alternative is to do Patran's method of avoiding collapse) remove_collapsed_elements : bool; default=False (unsupported) True : 1D/2D/3D elements will not be collapsed; CELASx/CDAMP/MPC/etc. are not considered False : no elements will be removed avoid_collapsed_elements : bool; default=False (unsupported) True : only collapses that don't break 1D/2D/3D elements will be considered; CELASx/CDAMP/MPC/etc. are considered False : element can be collapsed debug : bool bdf debugging log : logger(); default=None bdf logging Returns ------- model : BDF() The BDF model corresponding to bdf_filename_out .. warning:: I doubt SPOINTs/EPOINTs work correctly .. warning:: xref not fully implemented (assumes cid=0) .. todo:: node_set stil does work on the all the nodes in the big kdtree loop, which is very inefficient .. todo:: remove_collapsed_elements is not supported .. todo:: avoid_collapsed_elements is not supported """ if not isinstance(tol, float): tol = float(tol) nodes_xyz, model, nids, inew = _eq_nodes_setup( bdf_filename, tol, renumber_nodes=renumber_nodes, xref=xref, node_set=node_set, debug=debug) ieq, slots = _eq_nodes_build_tree(nodes_xyz, nids, tol, inew=inew, node_set=node_set, neq_max=neq_max)[1:] nid_pairs = _eq_nodes_find_pairs(nids, slots, ieq, node_set=node_set) _eq_nodes_final(nid_pairs, model, tol, node_set=node_set) if bdf_filename_out is not None: model.write_bdf(bdf_filename_out, size=size, is_double=is_double) if crash_on_collapse: # lazy way to make sure there aren't any collapsed nodes model2 = BDF(log=log, debug=debug) model2.read_bdf(bdf_filename_out) return model def _eq_nodes_setup(bdf_filename, tol, renumber_nodes=False, xref=True, node_set=None, debug=True): """helper function for `bdf_equivalence_nodes`""" if node_set is not None: if renumber_nodes: raise NotImplementedError('node_set is not None & renumber_nodes=True') #print(type(node_set)) #print('*node_set', node_set) assert len(node_set) > 0, node_set if isinstance(node_set, set): node_set = asarray(list(node_set), dtype='int32') else: node_set = asarray(node_set, dtype='int32') if isinstance(bdf_filename, string_types): xref = True model = BDF(debug=debug) model.read_bdf(bdf_filename, xref=True) else: model = bdf_filename model.cross_reference(xref=xref) coord_ids = model.coord_ids needs_get_position = True if coord_ids == [0] else False # quads / tris #nids_quads = [] #eids_quads = [] #nids_tris = [] #eids_tris = [] # map the node ids to the slot in the nids array renumber_nodes = False inode = 0 nid_map = {} if node_set is not None: if PY2: all_nids = array(model.nodes.keys(), dtype='int32') else: all_nids = array(list(model.nodes.keys()), dtype='int32') # B - A # these are all the nodes that are requested from node_set that are missing # thus len(diff_nodes) == 0 diff_nodes = setdiff1d(node_set, all_nids) if len(diff_nodes) != 0: msg = ('The following nodes cannot be found, but are included' ' in the reduced set; nids=%s' % diff_nodes) raise RuntimeError(msg) # A & B # the nodes to analyze are the union of all the nodes and the desired set # which is basically the same as: # nids = unique(node_set) nids = intersect1d(all_nids, node_set, assume_unique=True) # the new values if renumber_nodes: raise NotImplementedError('node_set is not None & renumber_nodes=True') else: for nid in all_nids: nid_map[inode] = nid inode += 1 #nids = array([node.nid for nid, node in sorted(iteritems(model.nodes)) #if nid in node_set], dtype='int32') else: if renumber_nodes: for nid, node in sorted(iteritems(model.nodes)): node.nid = inode + 1 nid_map[inode] = nid
inode += 1 nnodes = len(model.nodes) nids = arange(1, inode + 1, dtype='int32') assert nids[-1] == nnodes else: for nid, node in sorted(iteritems(model.nodes)): nid_map[inode] = nid inode += 1 nids = array([node.nid for n
id, node in sorted(iteritems(model.nodes))], dtype='int32') all_nids = nids if needs_get_position: nodes_xyz = array([model.nodes[nid].get_position() for nid in nids], dtype='float32') else: nodes_xyz = array([model.nodes[nid].xyz for nid in nids], dtype='float32') if node_set is not None: assert nodes_xyz.shape[0] == len(nids) if 0: # I forget entirely what this block of code is for, but my general # recollection was that it checked that all the nodes that were # referenced were included in the nids list. I'd rather break that # check in order to support nodes_set. # # It's also possible that it's here, so you only consider nodes that # are associated... # there is some set of points that are used on the elements that # will be considered. # # Presumably this is enough to capture all the node ids and NOT # spoints, but I doubt it... spoint_epoint_nid_set = set([]) for eid, element in sorted(iteritems(model.elements)): spoint_epoint_nid_set.update(element.node_ids) for eid, element in sorted(iteritems(model.masses)): spoint_epoint_nid_set.update(element.node_ids) if mod
mbuesch/toprammer
libtoprammer/chips/microchip16/pic24f08kl201sip6.py
Python
gpl-2.0
2,151
0.034868
# # THIS FILE WAS AUTOGENERATED BY makeSip6.py # Do not edit this file manually. All changes will be lost. # """ # TOP2049 Open Source programming suite # # Microchip PIC24f08kl201 SIP6 # # Copyright (c) 2014 Pavel Stemberk <stemberk@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULA
R PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free S
oftware Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ from .microchip16_common import * from .configWords import klx0x_fuseDesc class Chip_Pic24f08kl201sip6(Chip_Microchip16_common): voltageVDD = 3.3 voltageVPP = 8 logicalFlashProgramMemorySize = 0x800000 logicalFlashConfigurationMemorySize = 0x800000 hasEEPROM = False def __init__(self): Chip_Microchip16_common.__init__(self, chipPackage = "DIP10", chipPinVCC = 9, chipPinsVPP = 10, chipPinGND = 8, signature=b"\x06\x4b", # flashPageSize (in number of 24bit words) flashPageSize=0x15FE // 2 + 2, # flashPageSize=0x40, flashPages=1, # eepromPageSize (in 16bit words) eepromPageSize=0, eepromPages=0, # all 7 words uses lowest byte only fuseBytes=2 * 9 ) self.configWordAddr = 0xF80000 # self.osccalBackupAddr = self.userIDLocationAddr + self.userIDLocationSize fuseDesc = klx0x_fuseDesc ChipDescription( Chip_Pic24f08kl201sip6, bitfile = "microchip16sip6", chipID="pic24f08kl201sip6", runtimeID = (0xDF05, 0x01), chipVendors="Microchip", description = "PIC24F08KL201 - ICD", packages = (("DIP10", ""), ), fuseDesc=fuseDesc, maintainer="Pavel Stemberk <stemberk@gmail.com>", )
cybermaniax/lvstop
src/main/python/ipvstop.py
Python
apache-2.0
332
0.009036
#!/usr/bin/env
python ''' Created on 18 gru 2014 @author: ghalajko ''' from lvstop.screen import Screen from lvstop import loop
if __name__ == '__main__': with Screen() as scr: try: scr.main_loop(loop) except KeyboardInterrupt: pass except: raise
raspibo/Livello1
var/www/cgi-bin/writecsvlistsetsredis.py
Python
mit
1,795
0.018942
#!/usr/bin/env python3 # Questo file visualizza la chiave "lists" redis
# # Prima verifica che ci sia la chiave nel form # Serve per la parte di gestione html in python import cgi import cgitb # Abilita gli errori al server web/http cgitb.enable() # Le mie librerie mjl
(Json, Files), mhl (Html), flt (T w/ Redis) import mjl, mhl, flt import redis, subprocess # Parametri generali TestoPagina="Genera file \".csv\" dei valori di chiave \"lists\" Redis" DirBase="/var/www" ConfigFile=DirBase+"/conf/config.json" #ExecFile="/cgi-bin/<exefile>" # Redis "key" RedisKey = "*" # Tutte le chiavi # Form name/s FormName = "rkey" # Apro il database Redis con l'istruzione della mia libreria MyDB = flt.OpenDBFile(ConfigFile) # Start web page - Sono blocchi di html presenti nella libreria print (mhl.MyHtml()) print (mhl.MyHtmlHead()) # Scrivo il Titolo/Testo della pagina print ("<h1>","<center>",TestoPagina,"</center>","</h1>") #print ("<hr/>","<br/>") # Eventuale help/annotazione #print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>") form=cgi.FieldStorage() if FormName not in form: print ("<h2>ERRORE: Non e` stata passata la chiave Redis</h2>") else: RedisKey = cgi.escape(form[FormName].value) RedisKeyStart = cgi.escape(form["VStart"].value) RedisKeyStop = cgi.escape(form["VStop"].value) print ("La chiave viene passata come argomento ad un'altro programma, quindi l'unico feedback possibile e` 0 se e` andato a buon fine, o 1 se c'e` stato un'errore.</br></br>") print ("Comando eseguito:</br>/var/www/cgi-bin/setsVals2csv.py {0:s} {1:s} {2:s}</br></br>".format(RedisKey, RedisKeyStart, RedisKeyStop)) print (subprocess.call(['/var/www/cgi-bin/setsVals2csv.py', RedisKey, RedisKeyStart, RedisKeyStop])) # End web page print (mhl.MyHtmlBottom())
svalenti/agnkey
trunk/bin/agnmag.py
Python
mit
19,750
0.005266
#!/usr/bin/env python description = ">> make final magnitude" usage = "%prog image [options] " import os import string import re import sys from optparse import OptionParser import time import math import agnkey import numpy as np if __name__ == "__main__": start_time = time.time() parser = OptionParser(usage=usage, description=description) parser.add_option("-i", "--interactive", action="store_true", dest='interactive', default=False, help='Interactive \t\t\t [%default]') parser.add_option("-e", "--exzp", dest="exzp", default='', type='str', help='external zero point from different field \t\t %default') parser.add_option("-t", "--typemag", dest="typemag", default='fit', type='str', help='type of magnitude fit,ph \t\t %default') parser.add_option("--datatable", dest="datatable", default='dataredulco', type='str', help='mysql table where stroe reduction info \t\t %default') parser.add_option("--calib", dest="calibration", default='sloan', type='str', help='calibration to (sloan,sloanprime,natural,apass) \t\t %default') parser.add_option("-s", "--system", dest="field", default='', type='str', help='photometric system [sloan, landolt] \t\t %default') option, args = parser.parse_args() if len(args) < 1: sys.argv.append('--help') _typemag = option.typemag if _typemag not in ['fit', 'ph']: sys.argv.append('--help') option, args = parser.parse_args() imglist = args[0] lista = agnkey.util.readlist(imglist) hdr = agnkey.util.readhdr(lista[0]) tel = agnkey.util.readkey3(hdr, 'telescop') filters = agnkey.sites.filterst(tel) filters1 = agnkey.sites.filterst1(tel) _datatable = option.datatable _exzp = option.exzp _calib = option.calibration _field = option.field _interactive = option.interactive typemag = 'PSFMAG1' typemagerr = 'PSFDMAG1' namemag = {'fit': ['PSFMAG1', 'PSFDMAG1'], 'ph': ['APMAG1', 'PSFDMAG1']} dicti0 = agnkey.util.makecatalogue(lista) dicti = {} for _filter in dicti0: for img in dicti0[_filter]: if dicti0[_filter][img][namemag[_typemag][0]] != 9999: if _filter not in dicti: dicti[_filter] = {} if img not in dicti[_filter]: dicti[_filter][img] = {} for key in dicti0[_filter][img].keys(): dicti[_filter][img][key] = dicti0[_filter][img][key] if len(dicti) > 0: allfilters = '' for fil in dicti: allfilters = allfilters + filters1[fil] if _interactive: print allfilters if _field == 'apass' or _calib == 'apass': queste0 = agnkey.agnloopdef.chosecolor(allfilters, False, 'apass') queste1 = agnkey.agnloopdef.chosecolor(allfilters, True, 'apass') else: queste0 = agnkey.agnloopdef.chosecolor(allfilters, False) queste1 = agnkey.agnloopdef.chosecolor(allfilters, True) if _exzp: lista2 = agnkey.util.readlist(_exzp) dicti2 = agnkey.util.makecatalogue(lista2) for _filter2 in dicti2: img2 = dicti2[_filter2].keys()[0] for jj in dicti2[_filter2][img2].keys(): if 'ZP' in jj: if _filter2 in dicti: for img in dicti[_filter2].keys(): dicti[_filter2][img][jj] = dicti2[_filter2][img2][jj] agnkey.util.updateheader(img, 0, {jj: [dicti2[_filter2][img2][jj], 'a b sa sb in y=a+bx']}) agnkey.util.updateheader(img, 0, {'CATALOG': [str(img2), 'catalogue source']}) print jj, dicti2[_filter2][img2][jj] for _filter in dicti: for img in dicti[_filter]:
if _interactive: print '\#### ', img # if dicti[_filter][img][namemag[_typemag][0]]!=9999: # start calibrating image 1 secondimage = [] jdvec = []
filtvec = [] colore = [] for ii in dicti[_filter][img].keys(): if 'ZP' in ii: # for each zero point available cc = ii[-2:] # color used for filt2 in dicti.keys(): if filt2 != _filter: for jj in dicti[filt2].keys(): for ll in dicti[filt2][jj].keys(): if 'ZP' in ll and ll[-2:] == cc: secondimage.append(jj) jdvec.append(dicti[filt2][jj]['MJD'] - dicti[_filter][img]['MJD']) filtvec.append(filt2) colore.append(cc) if len(secondimage) > 0: colorescelto = '' vv = queste1[agnkey.sites.filterst1(tel)[_filter]] if len(vv) > 0: if vv[0].upper() in colore: colorescelto = vv[0].upper() else: vv = queste0[agnkey.sites.filterst1(tel)[_filter]] if len(vv) > 0: if vv[0].upper() in colore: colorescelto = vv[0].upper() if colorescelto: print 'use ' + _filter + ' with color ' + colorescelto filtvec = np.compress(np.array(colore) == colorescelto, filtvec) jdvec = np.compress(np.array(colore) == colorescelto, jdvec) secondimage = np.compress(np.array(colore) == colorescelto, secondimage) colore = np.compress(np.array(colore) == colorescelto, colore) dicti[_filter][img]['secondimg'] = secondimage[np.argmin(jdvec)] dicti[_filter][img]['secondfilt'] = filtvec[np.argmin(jdvec)] _filter2 = dicti[_filter][img]['secondfilt'] img2 = dicti[_filter][img]['secondimg'] col = colore[np.argmin(jdvec)] if dicti[_filter][img]['telescope'] in ['lsc', '1m0-04', '1m0-05', '1m0-06', '1m0-09']: kk = agnkey.sites.extintion('ctio') elif dicti[_filter][img]['telescope'] in ['elp', '1m0-08']: kk = agnkey.sites.extintion('mcdonald') elif dicti[_filter][img]['telescope'] in ['cpt', '1m0-12', '1m0-10', '1m0-13']: kk = agnkey.sites.extintion('southafrica') elif dicti[_filter][img]['telescope'] in ['ftn']: kk = agnkey.sites.extintion('mauna') elif dicti[_filter][img]['telescope'] in ['1m0-03', '1m0-11', 'fts', 'coj']: kk = agnkey.sites.extintion('siding') else: print _filter, img, dicti[_filter][img] sys.exit('problem with dicti') if _interactive: print dicti[_filter][img]['airmass'] print kk[filters1[_filter]] print 2.5 * math.log10(dicti[_filter][img]['exptime']) print dicti[_filter][img][namemag[_typemag][0]] # instrumental mag corrected for exp time and airmass # mag0=dicti[_filter][img][namemag[_typemag][0]]+2.5*math.log10(dicti[_filter][img]['exptime'])-kk[filters1[_filter]]*dicti[_filter][img]['airmass'] mag0 = dicti[_filter][img][namemag[_typemag][0]] - kk[filters1[_filter]] * dicti[_filter][img][ 'airmass'] dmag0 = dicti[_filter][img][namemag[_
jolyonb/edx-platform
openedx/core/djangoapps/video_pipeline/models.py
Python
agpl-3.0
3,579
0.001397
""" Model to hold edx-video-pipeline configurations. """ from __future__ import absolute_import from config_models.models import ConfigurationModel from django.contrib.auth import get_user_model from django.db import models from django.utils.translation import ugettext_lazy as _ from opaque_keys.edx.django.models import CourseKeyField import six class VideoPipelineIntegration(ConfigurationModel): """ Manages configuration for connecting to the edx-video-pipeline service and using its API. .. no_pii: """ client_name = models.CharField( max_length=100, default='VEDA-Prod', null=False, blank=False, help_text=_('Oauth client name of video pipeline service.') ) api_url = models.URLField( verbose_name=_('Internal API URL'), help_text=_('edx-video-pipeline API URL.') ) service_username = models.CharField( max_length=100, default='veda_service_user', null=False, blank=False, help_text=_('Username created for Video Pipeline Integration, e.g. veda_service_user.') ) def get_service_user(self): # NOTE: We load the user model here to avoid issues at startup time that result from the hacks # in lms/startup.py. User = get_user_model() # pylint: disable=invalid-name return User.objects.get(username=self.service_username) class VideoUploadsEnabledByDefault(ConfigurationModel): """ Enables video uploads enabled By default feature across the platform. .. no_pii: """ # this field overrides course-specific settings enabled_for_all_courses = models.BooleanField(default=False) @classmethod def feature_enabled(cls, course_id): """ Looks at the currently active configuration model to determine whether the VideoUploadsEnabledByDefault feature is available. If the feature flag is not enabled, the feature is not available. If the flag is enabled for all the courses, feature is available. If the flag is enabled and the provided course_id is for a course with CourseVideoUploadsEnabledByDefault enabled, then the feature is available. Arguments: course_id (CourseKey): course id for whom feature will be checked. """ if not cls.is_enabled(): return False elif not cls.current().enabled_for_all_courses: feature = (CourseVideoUploadsEnabledByDefault.objects .filter(course_id=course_id) .order_by('-change_date') .first()) return feature.enabled if feature else False return True def __unicode__(self): current_model = VideoUploadsEnabledByDefault.current() return u"VideoUploadsEnabledByDefault: enabled {is_enabled}".format( is_enabled=current_model.is_enabled() ) class CourseVideoUploadsEnabledByDefault(ConfigurationModel): """ Enables video uploads enabled by default feature for a specific course. Its global feat
ure must be enabled for this to take effect. .. no_pii: """ KEY_FIELDS = ('course_id',) course_id = CourseKeyField(max_length=255, db_index=True) def __unicode__(self): not_en = "Not " if self.enabled: not_en = "" return u"Course '{course_key}': Video Uploads {not_enabled}Enabled by default.".format( course_key=six.text_type(self.course_id), no
t_enabled=not_en )
andersondss/LearningDjango
udemy/SimpleMooc/SimpleMooc/settings.py
Python
mit
3,174
0.000315
""" Django settings for SimpleMooc project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of s
ettings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '#f3*&^_56
z9tj4=l%7+0gzg17o(sw&%(use@zt+_k@=y(ke2f5' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # libs 'taggit', # apps 'SimpleMooc.core', 'SimpleMooc.courses', 'SimpleMooc.accounts', 'SimpleMooc.forum', ) from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP TEMPLATE_CONTEXT_PROCESSORS = TCP + ( 'django.core.context_processors.request', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'SimpleMooc.urls' WSGI_APPLICATION = 'SimpleMooc.wsgi.application' # Database # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'pt-br' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, "SimpleMooc", "media") MEDIA_URL = "/media/" # Email # EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" DEFAULT_FROM_EMAIL = "Nome <anderson.bcc.uag@gmail.com>" EMAIL_USE_TLS = True EMAIL_HOST = "smtp.gmail.com" EMAIL_HOST_USER = "anderson.bcc.uag@gmail.com" EMAIL_HOST_PASSWORD = "123" EMAIL_PORT = "587" CONTACT_EMAIL = "anderson.adss.hotmail@gmail.com" # auth LOGIN_URL = "accounts:login" LOGIN_REDIRECT_URL = "core:home" LOGOUT_URL = "accounts:logout" AUTH_USER_MODEL = "accounts.User" # Heroku settings DATABASES = { 'default': dj_database_url.config(), } # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] STATIC_ROOT = 'staticfiles' STATIC_URL = '/static/' STATICFILES_DIRS = (os.path.join(BASE_DIR, "SimpleMooc", "core", "static/"),) try: from SimpleMooc.local_settings import * except ImportError: pass
yashpungaliya/MailingListParser
lib/analysis/author/graph/interaction.py
Python
gpl-3.0
7,719
0.003627
""" This module is used to generate graphs that show the interaction between authors either through multiple edges or through edge weights. There is an edge from one author to another if the former sent a message to the latter. These graphs depict thread-wise interaction of the authors for the entire mailing list and these interactions are labelled in chronological order to help identify the flow of messages across authors. """ import json from util.read_utils import * def add_to_multigraph(graph_obj, discussion_graph, json_data, nbunch, label_prefix=''): """ """ i = 0 for node in sorted(nbunch): node_attr = json_data[node] if node_attr['Cc'] is None: addr_list = node_attr['To'] else: addr_list = node_attr['To'] | node_attr['Cc'] for to_address in addr_list: graph_obj.add_edge(node_attr['From'], to_address, label=label_prefix+str(i)) succ_nbunch = [int(x) for x in discussion_graph.successors(node)] if succ_nbunch is not None: add_to_multigraph(graph_obj, discussion_graph, json_data, succ_nbunch, label_prefix+str(i)+'.') i += 1 def author_interaction_multigraph(discussion_graph, json_data, limit=10): """ """ niter = 0 for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph): interaction_graph = nx.MultiDiGraph() origin = min(int(x) for x in conn_subgraph.nodes()) add_to_multigraph(interaction_graph, discussion_graph, json_data, [origin]) # print(json_data[origin]) g1 = nx.to_agraph(interaction_graph) g1.draw("author_multi/"+str(origin)+'.png', prog='circo') niter += 1 if limit == niter and limit > 0: break def add_to_weighted_graph(graph_obj, discussion_graph, json_data, nbunch, node_enum=list()): """ """ for node in sorted(nbunch): node_attr = json_data[node] if node_attr['Cc'] is None: addr_list = node_attr['To'] else: addr_list = node_attr['To'] | node_attr['Cc'] if node_attr['From'] not in node_enum: node_enum.append(node_attr['From']) from_node = node_enum.index(node_attr['From']) for to_address in addr_list: if to_address not in node_enum: node_enum.append(to_address) to_node = node_enum.index(to_address) if not graph_obj.has_edge(from_node, to_node): graph_obj.add_edge(from_node, to_node, label=1) else: graph_obj[from_node][to_node]['label'] += 1 succ_nbunch = [int(x) for x in discussion_graph.successors(node)] if succ_nbunch is not None: add_to_weighted_graph(graph_obj, discussion_graph, json_data, succ_nbunch, node_enum) def author_interaction_weighted_graph(discussion_graph, json_data, limit=10): """ """ niter = 0 for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph): interaction_graph = nx.DiGraph() origin = min(int(x) for x in conn_subgraph.nodes()) add_to_weighted_graph(interaction_graph, discussion_graph, json_data, [origin], []) # print(json_data[origin]) g1 = nx.to_agraph(interaction_graph) g1.draw("author_weighted/"+str(origin)+'.png', prog='circo') niter += 1 if limit == niter and limit > 0: break def weighted_multigraph(): # Time limit can be specified here in the form of a timestamp in one of the identifiable formats and all messages # that have arrived after this timestamp will be ignored. time_limit = None # If true, then messages that belong to threads that have only a single author are ignored. igno
re_lat = True if time_limit is None: time_limit = time.strftime("%a, %d %b %Y %H:%M:%S %z") msgs_before_time = set() time_limit = get_datetime_object(time_limit) print("All messages before", time_limit, "are being considered.") discussion_graph = nx.DiGraph() email_re = re.compile(r'[\w\.-]+@[\w\.-]+') json_data = dict() # Add nodes into N
etworkX graph by reading from CSV file if not ignore_lat: with open("graph_nodes.csv", "r") as node_file: for pair in node_file: node = pair.split(';', 2) if get_datetime_object(node[2].strip()) < time_limit: node[0] = int(node[0]) msgs_before_time.add(node[0]) from_addr = email_re.search(node[1].strip()) from_addr = from_addr.group(0) if from_addr is not None else node[1].strip() discussion_graph.add_node(node[0], time=node[2].strip(), color="#ffffff", style='bold', sender=from_addr) node_file.close() print("Nodes added.") # Add edges into NetworkX graph by reading from CSV file with open("graph_edges.csv", "r") as edge_file: for pair in edge_file: edge = pair.split(';') edge[0] = int(edge[0]) edge[1] = int(edge[1]) if edge[0] in msgs_before_time and edge[1] in msgs_before_time: discussion_graph.add_edge(*edge) edge_file.close() print("Edges added.") else: lone_author_threads = get_lone_author_threads(False) # Add nodes into NetworkX graph only if they are not a part of a thread that has only a single author with open("graph_nodes.csv", "r") as node_file: for pair in node_file: node = pair.split(';', 2) node[0] = int(node[0]) if get_datetime_object(node[2].strip()) < time_limit and node[0] not in lone_author_threads: msgs_before_time.add(node[0]) from_addr = email_re.search(node[1].strip()) from_addr = from_addr.group(0) if from_addr is not None else node[1].strip() discussion_graph.add_node(node[0], time=node[2].strip(), color="#ffffff", style='bold', sender=from_addr) node_file.close() print("Nodes added.") # Add edges into NetworkX graph only if they are not a part of a thread that has only a single author with open("graph_edges.csv", "r") as edge_file: for pair in edge_file: edge = pair.split(';') edge[0] = int(edge[0]) edge[1] = int(edge[1]) if edge[0] not in lone_author_threads and edge[1] not in lone_author_threads: if edge[0] in msgs_before_time and edge[1] in msgs_before_time: discussion_graph.add_edge(*edge) edge_file.close() print("Edges added.") with open('clean_data.json', 'r') as json_file: for chunk in lines_per_n(json_file, 9): json_obj = json.loads(chunk) # print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc']) from_addr = email_re.search(json_obj['From']) json_obj['From'] = from_addr.group(0) if from_addr is not None else json_obj['From'] json_obj['To'] = set(email_re.findall(json_obj['To'])) json_obj['Cc'] = set(email_re.findall(json_obj['Cc'])) if json_obj['Cc'] is not None else None # print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc']) json_data[json_obj['Message-ID']] = json_obj print("JSON data loaded.") author_interaction_weighted_graph(discussion_graph, json_data, limit=20) author_interaction_multigraph(discussion_graph, json_data, limit=20)
danielecook/gist-alfred
set_info.py
Python
mit
400
0
#!/usr/b
in/python # encoding: utf-8 import sys from gist import create_workflow, set_github_token from workflow import Workflow, web from workflow.background import run_in_background, is_running def main(wf): arg = wf.args[0] if len(arg) > 0: token = wf.args[0] set_github_token(wf, token) if __name__ == '__main__': wf = creat
e_workflow() sys.exit(wf.run(main))
dezelin/scons
scons-local/SCons/Node/Alias.py
Python
mit
4,197
0.001906
"""scons.Node.Alias Alias nodes. This creates a hash of global Aliases (dummy targets). """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person
obtaining # a copy of this software and associated documentation files (the # "Software"), to dea
l in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Node/Alias.py 2014/07/05 09:42:21 garyo" import collections import SCons.Errors import SCons.Node import SCons.Util class AliasNameSpace(collections.UserDict): def Alias(self, name, **kw): if isinstance(name, SCons.Node.Alias.Alias): return name try: a = self[name] except KeyError: a = SCons.Node.Alias.Alias(name, **kw) self[name] = a return a def lookup(self, name, **kw): try: return self[name] except KeyError: return None class AliasNodeInfo(SCons.Node.NodeInfoBase): current_version_id = 1 field_list = ['csig'] def str_to_node(self, s): return default_ans.Alias(s) class AliasBuildInfo(SCons.Node.BuildInfoBase): current_version_id = 1 class Alias(SCons.Node.Node): NodeInfo = AliasNodeInfo BuildInfo = AliasBuildInfo def __init__(self, name): SCons.Node.Node.__init__(self) self.name = name def str_for_display(self): return '"' + self.__str__() + '"' def __str__(self): return self.name def make_ready(self): self.get_csig() really_build = SCons.Node.Node.build is_up_to_date = SCons.Node.Node.children_are_up_to_date def is_under(self, dir): # Make Alias nodes get built regardless of # what directory scons was run from. Alias nodes # are outside the filesystem: return 1 def get_contents(self): """The contents of an alias is the concatenation of the content signatures of all its sources.""" childsigs = [n.get_csig() for n in self.children()] return ''.join(childsigs) def sconsign(self): """An Alias is not recorded in .sconsign files""" pass # # # def changed_since_last_build(self, target, prev_ni): cur_csig = self.get_csig() try: return cur_csig != prev_ni.csig except AttributeError: return 1 def build(self): """A "builder" for aliases.""" pass def convert(self): try: del self.builder except AttributeError: pass self.reset_executor() self.build = self.really_build def get_csig(self): """ Generate a node's content signature, the digested signature of its content. node - the node cache - alternate node to use for the signature cache returns - the content signature """ try: return self.ninfo.csig except AttributeError: pass contents = self.get_contents() csig = SCons.Util.MD5signature(contents) self.get_ninfo().csig = csig return csig default_ans = AliasNameSpace() SCons.Node.arg2nodes_lookups.append(default_ans.lookup) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
smmribeiro/intellij-community
python/testData/intentions/returnTypeInNewNumpyDocString_after.py
Python
apache-2.0
75
0.013333
def f(x):
""" Returns -
------ object """ return 42
valmynd/MediaFetcher
src/plugins/youtube_dl/youtube_dl/extractor/netzkino.py
Python
gpl-3.0
2,537
0.028774
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, js_to_json, parse_iso8601, ) class NetzkinoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/(?P<category>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.netzkino.de/#!/scifikino/rakete-zum-mond', 'md5': '92a3f8b76f8d7220acce5377ea5d4873', 'info_dict': { 'id': 'rakete-zum-mond', 'ext': 'mp4', 'title': 'Rakete zum Mond (Endstation Mond, Destination Moon)', 'comments': 'mincount:3', 'description': 'md5:1eddeacc7e62d5a25a2d1a7290c64a28', 'upload_date': '20120813', 'thumbnail': r're:https?://.*\.jpg$', 'timestamp': 1344858571, 'age_limit': 12, }, 'params': { 'skip_download': 'Download only works from Germany', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) category_id = mobj.group('category') video_id = mobj.group('id') api_url = 'http://api.netzkino.de.simplecache.net/capi-2.0a/categories/%s.json?d=www' % category_id api_info = self._download_json(api_url, video_id) info = next( p for p in api_info['posts'] if p['slug'] == video_id) custom_fields = info['custom_fields'] production_js = self._download_webpage( 'http://www.netzkino.de/beta/dist/production.min.js', video_id, note='Downloading player code') avo_js = self._search_regex( r'var urlTemplate=(\{.*?"\})', production_js, 'URL templates') templates = self._parse_json( avo_js, video_id, transform_source=js_to_json) suffix = { 'hds': '.mp4/manifest.f4m', 'hls': '.mp4/master.m3u8', 'pmd': '.mp4', } film_fn = custom_fields['Streaming'][0] formats = [{ 'format_id': key, 'ext': 'mp4', 'url': tpl.replace('{}', film_fn) + suffix[key], } for key, tpl in templates.items()] self._sort_formats(formats) comments = [{ 'timestamp': parse_iso8601(c.get('date'), delimiter=' '), 'id': c['id'], 'author': c['name'], 'html': c['content'], 'parent': 'root' if c.get('parent', 0) == 0 else c['parent'], } for c in info.get('comments', [])] return { 'id': video_id, 'formats': formats, 'comments': comments, 'title': i
nfo['title'], 'age_limit': int_or_none(custom_fields.get('FSK')[0]), 'timestamp': parse_iso8601(info.get('date'), delimiter=' '), 'description': clean_html(info.get('content')), 'thumbnail': info.get('t
humbnail'), 'playlist_title': api_info.get('title'), 'playlist_id': category_id, }
daviddrysdale/python-phonenumbers
python/phonenumbers/data/region_BL.py
Python
apache-2.0
959
0.007299
"""Auto-generated file, do not edit by hand. BL metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_BL = PhoneMetadata(id='BL', country_code=590, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='(?:590|(?:69|80)\\d|976)\\d{6}
', possible_length=(9,)), fixed_line=PhoneNumberDesc(national_number_pattern='590(?:2[7-9]|5[12]|87)\\d{4}', example_number='590271234', possible_length=(9,)), mobile=PhoneNumberDesc(national_number_pattern='69(?:0\\d\\d|1(?:2[2-9]|3[0-5]))\\d{4}', example_number='690001234', possible_length=(9,)), toll_free=PhoneNumberDesc(national_number_pattern='80[0-5]\\d{6}', example_number='800012345', possible_length=(9,)), voip=PhoneNumberD
esc(national_number_pattern='976[01]\\d{5}', example_number='976012345', possible_length=(9,)), national_prefix='0', national_prefix_for_parsing='0', mobile_number_portable_region=True)
pandeyop/rally
tests/unit/benchmark/scenarios/neutron/test_network.py
Python
apache-2.0
24,839
0
# Copyright 2014: Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from rally.benchmark.scenarios.neutron import network from tests.unit import test NEUTRON_NETWORKS = "rally.benchmark.scenarios.neutron.network.NeutronNetworks" class NeutronNetworksTestCase(test.TestCase): @mock.patch(NEUTRON_NETWORKS + "._list_networks") @mock.patch(NEUTRON_NETWORKS + "._create_network") def test_create_and_list_networks(self, mock_create, mock_list): neutron_scenario = network.NeutronNetworks() # Default options network_create_args = {} neutron_scenario.create_and_list_networks( network_create_args=network_create_args) mock_create.assert_called_once_with(network_create_args) mock_list.assert_called_once_with() mock_create.reset_mock() mock_list.reset_mock() # Explicit network name is specified network_create_args = {"name": "given-name"} neutron_scenario.create_and_list_networks( network_create_args=network_create_args) mock_create.assert_called_once_with(network_create_args) mock_list.assert_called_once_with() @mock.patch(NEUTRON_NETWORKS + "._update_network") @mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={ "network": { "id": "network-id", "name": "network-name", "admin_state_up": False } }) def test_create_and_update_networks(self, mock_create_network, mock_update_network): scenario = network.NeutronNetworks() network_update_args = {"name": "_updated", "admin_state_up": True} # Default options scenario.create_and_update_networks( network_update_args=network_update_args) mock_c
reate_network.assert_called_once_with({}) mock_update_network.assert_has_calls( [mock.call(mock_create_network.return_value, network_update_args)]) mock_create_network.reset_mo
ck() mock_update_network.reset_mock() # Explicit network name is specified network_create_args = {"name": "network-name", "admin_state_up": False} scenario.create_and_update_networks( network_create_args=network_create_args, network_update_args=network_update_args) mock_create_network.assert_called_once_with(network_create_args) mock_update_network.assert_has_calls( [mock.call(mock_create_network.return_value, network_update_args)]) @mock.patch(NEUTRON_NETWORKS + "._delete_network") @mock.patch(NEUTRON_NETWORKS + "._create_network") def test_create_and_delete_networks(self, mock_create, mock_delete): neutron_scenario = network.NeutronNetworks() # Default options network_create_args = {} neutron_scenario.create_and_delete_networks() mock_create.assert_called_once_with(network_create_args) self.assertEqual(1, mock_delete.call_count) mock_create.reset_mock() mock_delete.reset_mock() # Explict network name is specified network_create_args = {"name": "given-name"} neutron_scenario.create_and_delete_networks( network_create_args=network_create_args) mock_create.assert_called_once_with(network_create_args) self.assertEqual(1, mock_delete.call_count) @mock.patch(NEUTRON_NETWORKS + "._list_subnets") @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") def test_create_and_list_subnets(self, mock_create_network_and_subnets, mock_list): scenario = network.NeutronNetworks() subnets_per_network = 4 subnet_cidr_start = "default_cidr" mock_create_network_and_subnets.reset_mock() mock_list.reset_mock() # Default options scenario.create_and_list_subnets( subnets_per_network=subnets_per_network, subnet_cidr_start=subnet_cidr_start) mock_create_network_and_subnets.assert_has_calls( [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) mock_list.assert_called_once_with() mock_create_network_and_subnets.reset_mock() mock_list.reset_mock() # Custom options scenario.create_and_list_subnets( subnet_create_args={"allocation_pools": []}, subnet_cidr_start="custom_cidr", subnets_per_network=subnets_per_network) mock_create_network_and_subnets.assert_has_calls( [mock.call({}, {"allocation_pools": []}, subnets_per_network, "custom_cidr")]) mock_list.assert_called_once_with() @mock.patch(NEUTRON_NETWORKS + "._update_subnet") @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") def test_create_and_update_subnets(self, mock_create_network_and_subnets, mock_update_subnet): scenario = network.NeutronNetworks() subnets_per_network = 1 subnet_cidr_start = "default_cidr" net = { "network": { "id": "network-id" } } subnet = { "subnet": { "name": "subnet-name", "id": "subnet-id", "enable_dhcp": False } } mock_create_network_and_subnets.return_value = (net, [subnet]) subnet_update_args = {"name": "_updated", "enable_dhcp": True} mock_create_network_and_subnets.reset_mock() mock_update_subnet.reset_mock() # Default options scenario.create_and_update_subnets( subnet_update_args=subnet_update_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) mock_create_network_and_subnets.assert_has_calls( [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) mock_update_subnet.assert_has_calls( [mock.call(subnet, subnet_update_args)]) mock_create_network_and_subnets.reset_mock() mock_update_subnet.reset_mock() # Custom options subnet_cidr_start = "custom_cidr" scenario.create_and_update_subnets( subnet_update_args=subnet_update_args, subnet_create_args={"allocation_pools": []}, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) mock_create_network_and_subnets.assert_has_calls( [mock.call({}, {"allocation_pools": []}, subnets_per_network, subnet_cidr_start)]) mock_update_subnet.assert_has_calls( [mock.call(subnet, subnet_update_args)]) @mock.patch(NEUTRON_NETWORKS + "._delete_subnet") @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") def test_create_and_delete_subnets(self, mock_create_network_and_subnets, mock_delete): scenario = network.NeutronNetworks() net = { "network": { "id": "network-id" } } subnet = { "subnet": { "name": "subnet-name", "id": "subnet-id", "enable_dhcp": False } } mock_create_network_and_subnets.return_value = (net,
jlongever/redfish-client-python
on_http_redfish_1_0/models/chassis_1_0_0_chassis_actions.py
Python
apache-2.0
3,731
0.001072
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import i
teritems
class Chassis100ChassisActions(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ Chassis100ChassisActions - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'oem': 'object', 'chassis_reset': 'Chassis100Reset' } self.attribute_map = { 'oem': 'Oem', 'chassis_reset': '#Chassis.Reset' } self._oem = None self._chassis_reset = None @property def oem(self): """ Gets the oem of this Chassis100ChassisActions. :return: The oem of this Chassis100ChassisActions. :rtype: object """ return self._oem @oem.setter def oem(self, oem): """ Sets the oem of this Chassis100ChassisActions. :param oem: The oem of this Chassis100ChassisActions. :type: object """ self._oem = oem @property def chassis_reset(self): """ Gets the chassis_reset of this Chassis100ChassisActions. :return: The chassis_reset of this Chassis100ChassisActions. :rtype: Chassis100Reset """ return self._chassis_reset @chassis_reset.setter def chassis_reset(self, chassis_reset): """ Sets the chassis_reset of this Chassis100ChassisActions. :param chassis_reset: The chassis_reset of this Chassis100ChassisActions. :type: Chassis100Reset """ self._chassis_reset = chassis_reset def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
creasyw/IMTAphy
wnsbase/playground/builtins/__init__.py
Python
gpl-2.0
1,457
0.00755
############################################################################### # This file is part of openWNS (open Wireless Network Simulator) # _____________________________________________________________________________ # # Copyright (C) 2004-2007 # Chair of Communication Networks (ComNets) # Kopernikusstr. 16, D-52074 Aachen, Germany # phone: ++49-241-80-27910, # fax: ++49-241-80-22242 # email: info@openwns.org # www: http://www.openwns.org # _____________________________________________________________________________ # # openWNS is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License version 2 as published by the # Free Software Foundation; # # openWNS is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## #from Cleanup import * #from Documentation import * #from ForEach import * #from Install import * #
from Lint import * #from Missing import * #from Replay import * #from SanityCheck import *# #f#rom Testing import * #f#rom Update import * #from Upgrade import *
colinbrislawn/scikit-bio
skbio/sequence/tests/test_sequence.py
Python
bsd-3-clause
113,531
0.000352
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import six from six.moves import zip_longest import copy import re from types import GeneratorType from collections import Hashable from unittest import TestCase, main import numpy as np import numpy.testing as npt import pandas as pd from skbio import Sequence from skbio.util import assert_data_frame_almost_equal from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index, _as_slice_if_single_index) class SequenceSubclass(Sequence): """Used for testing purposes.""" pass class TestSequence(TestCase): def setUp(self): self.lowercase_seq = Sequence('AAAAaaaa', lowercase='key') self.sequence_kinds = frozenset([ str, Sequence, lambda s: np.fromstring(s, dtype='|S1'), lambda s: np.fromstring(s, dtype=np.uint8)]) def empty_generator(): raise StopIteration() yield self.getitem_empty_indices = [ [], (), {}, empty_generator(), # ndarray of implicit float dtype np.array([]), np.array([], dtype=int)] def test_init_default_parameters(self): seq = Sequence('.ABC123xyz-') npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c')) self.assertEqual('.ABC123xyz-', str(seq)) self.assertFalse(seq.has_metadata()) self.assertEqual(seq.metadata, {}) self.assertFalse(seq.has_positional_metadata()) assert_data_frame_almost_equal(seq.positional_metadata, pd.DataFrame(index=np.arange(11))) def test_init_nondefault_parameters(self): seq = Sequence('.ABC123xyz-', metadata={'id': 'foo', 'description': 'bar baz'}, positional_metadata={'quality': range(11)}) npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c')) self.assertEqual('.ABC123xyz-', str(seq)) self.assertTrue(seq.has_metadata()) self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'}) self.assertTrue(seq.has_positional_metadata()) assert_data_frame_almost_equal( seq.positional_metadata, pd.DataFrame({'quality': range(11)}, index=np.arange(11))) def test_init_handles_missing_metadata_efficiently(self): seq = Sequence('ACGT') # metadata attributes should be None and not initialized to a "missing" # representation self.assertIsNone(seq._metadata) self.assertIsNone(seq._positional_metadata) # initializing from an existing Sequence object should handle metadata # attributes efficiently on both objects new_seq = Sequence(seq) self.assertIsNone(seq._metadata) self.assertIsNone(seq._positional_metadata) self.assertIsNone(new_seq._metadata) self.assertIsNone(new_seq._positional_metadata) self.assertFalse(seq.has_metadata()) self.assertFalse(seq.has_positional_metadata()) self.assertFalse(new_seq.has_metadata()) self.assertFalse(new_seq.has_positional_metadata()) def test_init_empty_sequence(self): # Test constructing an empty sequence using each supported input type. for s in (b'', # bytes u'', # unicode np.array('', dtype='c'), # char vector np.fromstring('', dtype=np.uint8), # byte vec Sequence('')): # another Sequence object seq = Sequence(s) self.assertIsInstance(seq.values, np.ndarray) self.assertEqual(seq.values.dtype, '|S1') self.assertEqual(seq.values.shape, (0, )) npt.assert_equal(seq.values, np.array('', dtype='c')) self.assertEqual(str(seq), '') self.assertEqual(len(seq), 0) self.assertFalse(seq.has_metadata()) self.assertEqual(seq.metadata, {}) self.assertFalse(seq.has_positional_metadata()) assert_data_frame_almost_equal(seq.positional_metadata, pd.DataFrame(index=np.arange(0))) def test_init_single_character_sequence(self): for s in (b'A', u'A', np.array('A', dtype='c'), np.fromstring('A', dtype=np.uint8), Sequence('A')): seq = Sequence(s) self.assertIsInstance(seq.values, np.ndarray) self.assertEqual(seq.values.dtype, '|S1') self.assertEqual(seq.values.shape, (1,)) npt.assert_equal(seq.values, np.array('A', dtype='c')) self.assertEqual(str(seq), 'A') self.assertEqual(len(seq), 1) self.assertFalse(seq.has_metadata()) self.assertEqual(seq.metadata, {}) self.assertFalse(seq.has_positional_metadata()) assert_data_frame_almost_equal(seq.positional_metadata, pd.DataFrame(index=np.arange(1))) def test_init_multiple_character_sequence(self): for s in (b'.ABC\t123 xyz-', u'.ABC\t123 xyz-', np.array('.ABC\t123 xyz-', dtype='c'), np.fromstring('.ABC\t123 xyz-', dtype=np.uint8), Sequence('.ABC\t123 xyz-')): seq = Sequence(s) self.assertIsInstance(seq.values, np.ndarray) self.assertEqual(seq.values.dtype, '|S1') self.assertEqual(seq.values.shape, (14,)) npt.assert_equal(seq.values, np.array('.ABC\t123 xyz-', dtype='c')) self.assertEqual(str(seq), '.ABC\t123 xyz-') self.assertEqual(len(seq), 14) self.assertFalse(seq.has_metadata()) self.assertEqual(seq.metadata, {}) self.assertFalse(seq.has_positional_metadata()) assert_data_frame_almost_equal(seq.positional_metadata, pd.DataFrame(index=np.arange(14))) def test_init_from_sequence_object(self): # We're testing this in its simplest form in other tests. This test # exercises more complicated cases of building a sequence from another # sequence. # just the sequence, no other metadata seq = Sequence(
'ACGT') self.assertEqual(Sequence(seq), seq) # sequence with metadata should have everything propagated seq = Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'}, positional_metadata={'quality': range(4)}) self.assertEqual(Sequence(seq), seq) # should be able to override metadata self.assertEqual( Sequence(seq, metadata={'id': 'ab
c', 'description': '123'}, positional_metadata={'quality': [42] * 4}), Sequence('ACGT', metadata={'id': 'abc', 'description': '123'}, positional_metadata={'quality': [42] * 4})) # subclasses work too seq = SequenceSubclass('ACGT', metadata={'id': 'foo', 'description': 'bar baz'}, positional_metadata={'quality': range(4)}) self.assertEqual( Sequence(seq), Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'}, positional_metadata={'quality': range(4)})) def test_init_from_contiguous_sequence_bytes_view(self): bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8) view = bytes[:3] seq = Sequence(view) # sequence should be what we'd expect
KDB2/veusz
veusz/document/export.py
Python
gpl-2.0
15,098
0.002384
# Copyright (C) 2011 Jeremy S. Sanders # Email: Jeremy Sanders <jeremy@jeremysanders.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################## """Routines to export the document.""" from __future__ import division import os.path import random import math import codecs import re from ..compat import crange from .. import qtall as qt4 from .. import utils try: from . import emf_export hasemf = True except ImportError: hasemf = False from . import svg_export from . import selftest_export from . import painthelper # 1m in inch m_inch = 39.370079 def _(text, disambiguation=None, context="Export"): """Translate text.""" return qt4.QCoreApplication.translate(context, text, disambiguation) def scalePDFMediaBox(text, pagewidth, reqdsizes): """Take the PDF file text and adjust the page size. pagewidth: full page width reqdsizes: list of tuples of width, height """ outtext = b'' outidx = 0 for size, match in zip( reqdsizes, re.finditer( br'^/MediaBox \[([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\]$', text, re.MULTILINE)): box = [float(x) for x in match.groups()] widthfactor = (box[2]-box[0])/pagewidth newbox = ('/MediaBox [%i %i %i %i]' % ( int(box[0]), int(math.floor(box[3]-widthfactor*size[1])), int(math.ceil(box[0]+widthfactor*size[0])), int(math.ceil(box[3])) )).encode('ascii') outtext += text[outidx:match.start()] + newbox outidx = match.end() outtext += text[outidx:] return outtext def fixupPDFIndices(text): """Fixup index table in PDF. Basically, we need to find the start of each obj in the file These indices are then placed in an xref table at the end The index to the xref table is placed after a startxref """ # find occurences of obj in string indices = {} for m in re.finditer(b'([0-9]+) 0 obj', text): index = int(m.group(1)) indices[index] = m.start(0) # build up xref block (note trailing spaces) xref = [b'xref', ('0 %i' % (len(indices)+1)).encode('ascii'), b'0000000000 65535 f '] for i in crange(len(indices)): xref.append( ('%010i %05i n ' % (indices[i+1], 0)).encode('ascii') ) xref.append(b'trailer\n') xref = b'\n'.join(xref) # replace current xref with this one xref_match = re.search(b'^xref\n.*trailer\n', text, re.DOTALL | re.MULTILINE) xref_index = xref_match.start(0) text = text[:xref_index] + xref + text[xref_match.end(0):] # put the correct index to the xref after startxref startxref_re = re.compile(b'^startxref\n[0-9]+\n', re.DOTALL | re.MULTILINE) text = startxref_re.sub( ('startxref\n%i
\n' % xref_index).encode('ascii'), text) return text def fixupPSBoundingBox(infname, outfname, pagewidth, size): """Make bounding box for EPS
/PS match size given.""" with open(infname, 'rU') as fin: with open(outfname, 'w') as fout: for line in fin: if line[:14] == '%%BoundingBox:': # replace bounding box line by calculated one parts = line.split() widthfactor = float(parts[3]) / pagewidth origheight = float(parts[4]) line = "%s %i %i %i %i\n" % ( parts[0], 0, int(math.floor(origheight-widthfactor*size[1])), int(math.ceil(widthfactor*size[0])), int(math.ceil(origheight)) ) fout.write(line) class Export(object): """Class to do the document exporting. This is split from document to make that class cleaner. """ formats = [ (["bmp"], _("Windows bitmap")), (["eps"], _("Encapsulated Postscript")), (["ps"], _("Postscript")), (["jpg"], _("Jpeg bitmap")), (["pdf"], _("Portable Document Format")), #(["pic"], _("QT Pic format")), (["png"], _("Portable Network Graphics")), (["svg"], _("Scalable Vector Graphics")), (["tiff"], _("Tagged Image File Format bitmap")), (["xpm"], _("X Pixmap")), ] if hasemf: formats.append( (["emf"], _("Windows Enhanced Metafile")) ) formats.sort() def __init__(self, doc, filename, pagenumber, color=True, bitmapdpi=100, antialias=True, quality=85, backcolor='#ffffff00', pdfdpi=150, svgtextastext=False): """Initialise export class. Parameters are: doc: document to write filename: output filename pagenumber: pagenumber to export or list of pages for some formats color: use color or try to use monochrome bitmapdpi: assume this dpi value when writing images antialias: antialias text and lines when writing bitmaps quality: compression factor for bitmaps backcolor: background color default for bitmaps (default transparent). pdfdpi: dpi for pdf and eps files svgtextastext: write text in SVG as text, rather than curves """ self.doc = doc self.filename = filename self.pagenumber = pagenumber self.color = color self.bitmapdpi = bitmapdpi self.antialias = antialias self.quality = quality self.backcolor = backcolor self.pdfdpi = pdfdpi self.svgtextastext = svgtextastext def export(self): """Export the figure to the filename.""" ext = os.path.splitext(self.filename)[1].lower() if ext in ('.eps', '.ps', '.pdf'): self.exportPDFOrPS(ext) elif ext in ('.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.xpm'): self.exportBitmap(ext) elif ext == '.svg': self.exportSVG() elif ext == '.selftest': self.exportSelfTest() elif ext == '.pic': self.exportPIC() elif ext == '.emf' and hasemf: self.exportEMF() else: raise RuntimeError("File type '%s' not supported" % ext) def renderPage(self, page, size, dpi, painter): """Render page using paint helper to painter. This first renders to the helper, then to the painter """ helper = painthelper.PaintHelper(size, dpi=dpi, directpaint=painter) painter.setClipRect( qt4.QRectF( qt4.QPointF(0,0), qt4.QPointF(*size)) ) painter.save() self.doc.paintTo(helper, page) painter.restore() painter.end() def getSinglePage(self): """Check single number of pages or throw exception, else return page number.""" try: if len(self.pagenumber) != 1: raise RuntimeError( 'Can only export a single page in this format') return self.pagenumber[0] except TypeError: return self.pagenumber def exportBitmap(self, ext): """Export to a bitmap format.""" format = ext[1:] # setFormat() doesn't want the leading '.' if format == 'jpeg': format = 'jpg' page = self.getSinglePage() # get size for bitmap's dpi dpi = self.bitmapdpi size = self.doc.pageSize(page, dpi=(dpi,dpi)) # create real ou
lifemapper/core
LmWebServer/services/api/v2/layer.py
Python
gpl-3.0
9,222
0
"""This module provides REST services for Layers""" import cherrypy from LmCommon.common.lmconstants import HTTPStatus from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.services.api.v2.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.cp_tools.lm_format import lm_formatter # ............................................................................. @cherrypy.expose @cherrypy.popargs('path_layer_id') class LayerService(LmService): """Class for layers service. """ # ................................ @lm_formatter def GET(self, path_layer_id=None, after_time=None, alt_pred_code=None, before_time=None, date_code=None, epsg_code=None, env_code=None, env_type_id=None, gcm_code=None, layerType=None, limit=100, offset=0, url_user=None, scenario_id=None, squid=None, **params): """GET request. Individual layer, count, or list. """ # Layer type: # 0 - Anything # 1 - Environmental layer # 2 - ? (Not implemented yet) if layerType is None or layerType == 0: if path_layer_id is None: return self._list_layers( self.get_user_id(url_user=url_user), after_time=after_time, before_time=before_time, epsg_code=epsg_code, limit=limit, offset=offset, squid=squid) if path_layer_id.lower() == 'count': return self._count_layers( self.get_user_id(url_user=url_user), after_time=after_time, before_time=before_time, epsg_code=epsg_code, squid=squid) return self._get_layer(path_layer_id, env_layer=False) if path_layer_id is None: return self._list_env_layers( self.get_user_id(url_user=url_user), after_time=after_time, alt_pred_code=alt_pred_code, before_time=before_time, date_code=date_code, env_code=env_code, env_type_id=env_type_id, epsg_code=epsg_code, gcm_code=gcm_code, limit=limit, offset=offset, scenario_id=scenario_id) if path_layer_id.lower() == 'count': return self._count_env_layers( self.get_user_id(url_user=url_user), after_time=after_time, alt_pred_code=alt_pred_code, before_time=before_time, date_code=date_code, env_code=env_code, env_type_id=env_type_id, epsg_code=epsg_code, gcm_code=gcm_code, scenario_code=scenario_id) return self._get_layer(path_layer_id, env_layer=True) # ................................ def _count_env_layers(self, user_id, after_time=None, alt_pred_code=None, before_time=None, date_code=None, env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, scenario_code=None): """Count environmental layer objects matching the specified criteria Args: user_id: The user to list environmental layers for. Note that this may not be the same user logged into the system after_time: Return layers modified after this time (Modified Julian Day) alt_pred_code: Return layers with this alternate prediction code before_time: Return layers modified before this time (Modified Julian Day) date_code: Return layers with this date code env_code: Return layers with this environment code env_type_id: Return layers with this environmental type epsg_code: Return layers with this EPSG code gcm_code: Return layers with this GCM code scenario_id: Return layers from this scenario """ layer_count = self.scribe.count_env_layers( user_id=user_id, env_code=env_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, date_code=date_code, after_time=after_time, before_time=before_time, epsg=epsg_code, env_type_id=env_type_id, scenario_code=scenario_code) return {'count': layer_count} # ................................ def _count_layers(self, user_id, after_time=None, before_time=None, epsg_code=None, squid=None): """Return a count of layers matching the specified criteria Args: user_id: The user to list layers for. Note that this may not be the same user that is logged into the system after_time: List layers modified after this time (Modified Julian Day) before_time: List layers modified before this time (Modified Julian Day) epsg_code: Return layers that have this EPSG code limit: Return this number of layers, at most offset: Offset the returned layers by this number squid: Return layers with this species identifier """ layer_count = self.scribe.count_layers( user_id=user_id, squid=squid, after_time=after_time, before_time=before_time, epsg=epsg_code) return {'count': layer_count} # ................................ def _get_layer(self, path_layer_id, env_layer=False): """Attempt to get a layer """ try: _ = int(path_layer_id) except ValueError: raise cherrypy.HTTPError( HTTPStatus.BAD_REQUEST, '{} is not a valid layer ID'.format(path_layer_id)) if env_layer: lyr = self.scribe.get_env_layer(lyr_id=path_layer_id) else: lyr = self.scribe.get_layer(lyr_id=path_layer_id) if lyr is None: raise cherrypy.HTTPError( HTTPStatus.NOT_FOUND, 'Environmental layer {} was not found'.format(path_layer_id)) if check_user_permission(self.get_user_id(), lyr, HTTPMethod.GET): return lyr raise cherrypy.HTTPError( HTTPStatus.FORBIDDEN, 'User {} does not have permission to access layer {}'.format( self.get_user_id(), path_layer_id)) # ...
............................. def _list_env_layers(self, user_id, after_ti
me=None, alt_pred_code=None, before_time=None, date_code=None, env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, limit=100, offset=0, scenario_id=None): """List environmental layer objects matching the specified criteria Args: user_id: The user to list environmental layers for. Note that this may not be the same user logged into the system after_time: (optional) Return layers modified after this time (Modified Julian Day) alt_pred_code: (optional) Return layers with this alternate prediction code before_time: (optional) Return layers modified before this time (Modified Julian Day) date_code: (optional) Return layers with this date code env_code: (optional) Return layers with this environment code env_type_id: (optional) Return layers with this environmental type epsg_code: (optional) Return layers with this EPSG code gcm_code: (optional) Return layers with this GCM code limit: (optional) Return this number of layers, at most offset: (optional) Offset the returned layers by this number scenario_id: (optional) Return layers from this scenario """ lyr_atoms = self.scribe.list_env_layers( offset, limit, user_id=user_id, env_code=env_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, date_code=date_code, after_time=after_time, before_time=before_time, epsg=epsg_code, env_type_id=env_type_id) return lyr_atoms # ................................
yunojuno/django-request-profiler
tests/utils.py
Python
mit
491
0.004073
from unittest import skipIf from django.conf import settings def skipIfDefaultUser(test_func): """ Skip a test if a default user model is in use. """ return skipIf(settings.AUTH_USER_MODEL == "auth.User", "Default user model in use")( test_func ) def skipIfCustomUser(test_func): """ Skip a test if a custom user model is in u
se. """ return skipIf(settings.AUTH_USER_MODEL != "auth.User", "Custom user model in use")(
test_func )
eduNEXT/edx-platform
openedx/core/djangoapps/oauth_dispatch/tests/test_api.py
Python
agpl-3.0
2,669
0.003372
""" Tests for OAuth Dispatch python API module. """ import unittest from django.conf import settings from django.http import HttpRequest from django.test import TestCase from oauth2_provider.models import AccessToken from common.djangoapps.student.tests.factories import UserFactory OAUTH_PROVIDER_ENABLED = settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER') if OAUTH_PROVIDER_ENABLED: from openedx.core.djangoapps.oauth_dispatch import api from openedx.core.djangoapps.oauth_dispatch.adapters import DOTAdapter from openedx.core.djangoapps.oauth_dispatch.tests.constants import DUMMY_REDIRECT_URL EXPECTED_DEFAULT_EXPIRES_IN = 36000 @unittest.skipUnless(OAUTH_PROVIDER_ENABLED, 'OAuth2 not enabled') class TestOAuthDispatchAPI(TestCase): """ Tests for oauth_dispatch's api.py module. """ def setUp(self): super().setUp() self.adapter = DOTAdapter() self.user = UserFactory() self.client = self.adapter.create_publ
ic_client( name='public app', user=self.user, redirect_uri=DUMMY_REDIRECT_URL
, client_id='public-client-id', ) def _assert_stored_token(self, stored_token_value, expected_token_user, expected_client): stored_access_token = AccessToken.objects.get(token=stored_token_value) assert stored_access_token.user.id == expected_token_user.id assert stored_access_token.application.client_id == expected_client.client_id assert stored_access_token.application.user.id == expected_client.user.id def test_create_token_success(self): token = api.create_dot_access_token(HttpRequest(), self.user, self.client) assert token['access_token'] assert token['refresh_token'] self.assertDictContainsSubset( { 'token_type': 'Bearer', 'expires_in': EXPECTED_DEFAULT_EXPIRES_IN, 'scope': '', }, token, ) self._assert_stored_token(token['access_token'], self.user, self.client) def test_create_token_another_user(self): another_user = UserFactory() token = api.create_dot_access_token(HttpRequest(), another_user, self.client) self._assert_stored_token(token['access_token'], another_user, self.client) def test_create_token_overrides(self): expires_in = 4800 token = api.create_dot_access_token( HttpRequest(), self.user, self.client, expires_in=expires_in, scopes=['profile'], ) self.assertDictContainsSubset({'scope': 'profile'}, token) self.assertDictContainsSubset({'expires_in': expires_in}, token)
raphael0202/spaCy
spacy/ja/tag_map.py
Python
mit
4,024
0.03466
# encoding: utf8 from __future__ import unicode_literals from ..symbols import * TAG_MAP = { # Explanation of Unidic tags: # https://www.gavo.t.u-tokyo.ac.jp/~mine/japanese/nlp+slp/UNIDIC_manual.pdf # Universal Dependencies Mapping: # http://universaldependencies.org/ja/overview/morphology.html # http://universaldependencies.org/ja/pos/all.html "記号,一般,*,*":{POS: PUNCT}, # this includes characters used to represent sounds like ドレミ "記号,文字,*,*":{POS: PUNCT}, # this is for Greek and Latin characters used as sumbols, as in math "感動詞,フィラー,*,*": {POS: INTJ}, "感動詞,一般,*,*": {POS: INTJ}, # this is specifically for unicode full-width space "空白,*,*,*": {POS: X}, "形状詞,一般,*,*":{POS: ADJ}, "形状詞,タリ,*,*":{POS: ADJ}, "形状詞,助動詞語幹,*,*":{POS: ADJ}, "形容詞,一般,*,*":{POS: ADJ}, "形容詞,非自立可能,*,*":{POS: AUX}, # XXX ADJ if alone, AUX otherwise "助詞,格助詞,*,*":{POS: ADP}, "助詞,係助詞,*,*":{POS: ADP}, "助詞,終助詞,*,*":{POS: PART}, "助詞,準体助詞,*,*":{POS: SCONJ}, # の as in 走るのが速い "助詞,接続助詞,*,*":{POS: SCONJ}, # verb ending て "助詞,副助詞,*,*":{POS: PART}, # ばかり, つつ after a verb "助動詞,*,*,*":{POS: AUX}, "接続詞,*,*,*":{POS: SCONJ}, # XXX: might need refinement "接頭辞,*,*,*":{POS: NOUN}, "接尾辞,形状詞的,*,*":{POS: ADJ}, # がち, チック "接尾辞,形容詞的,*,*":{POS: ADJ}, # -らしい "接尾辞,動詞的,*,*":{POS: NOUN}, # -じみ "接尾辞,名詞的,サ変可能,*":{POS: NOUN}, # XXX see 名詞,普通名詞,サ変可能,* "接尾辞,名詞的,一般,*":{POS: NOUN}, "接尾辞,名詞的,助数詞,*":{POS: NOUN}, "接尾辞,名詞的,副詞可能,*":{POS: NOUN}, # -後, -過ぎ "代名詞,*,*,*":{POS: PRON}, "動詞,一般,*,*":{POS: VERB}, "動詞,非自立可能,*,*":{POS: VERB}, # XXX VERB if alone, AUX otherwise "動詞,非自立可能,*,*,AUX":{POS: AUX}, "動詞,非自立可能,*,*,VERB":{POS: VERB}, "副詞,*,*,*":{POS: ADV}, "補助記号,AA,一般,*":{POS: SYM}, # text art "補助記号,AA,顔文字,*":{POS: SYM}, # kaomoji "補助記号,一般,*,*":{POS: SYM}, "補助記号,括弧開,*,*":{POS: PUNCT}, # open bracket "補助記号,括弧閉,*,*":{POS: PUNCT}, # close bracket "補助記号,句点,*,*":{POS: PUNCT}, # period or other EOS marker "補助記号,読点,*,*":{POS: PUNCT}, # comma "名詞,固有名詞,一般,*":{POS: PROPN}, # general proper noun "名詞,固有名詞,人名,一般":{POS: PROPN}, # person's name "名詞,固有名詞,人名,姓":{POS: PROPN}, # surname "名詞,固有名詞,人名,名":{POS: PROPN}, # first name "名詞,固有名詞,地名,一般":{POS: PROPN}, # place name "名詞,固有名詞,地名,国":{POS: PROPN}, # country name "名詞,助動詞語幹,*,*":{POS: AUX}, "名詞,数詞,*,*":{POS: NUM}, # includes Chinese numerals "名詞,普通名詞,サ変可能,*":{POS: NOUN}, # XXX: sometimes VERB in UDv2; suru-verb noun "名詞,普通名詞,サ変可能,*,NOUN":{POS: NOUN}, "名詞,普通名詞,サ変可能,*,VERB":{POS: VE
RB}, "名詞,普通名詞,サ変形状詞可能,*":{POS: NOUN}, # ex: 下手 "名詞,普通名詞,一般,*":{POS: NOUN}, "名詞,普通名詞,形状詞可能,*":{POS: NOUN}, # XXX: sometimes ADJ in UDv2 "名詞,普通名詞,形状詞可能,*,NOUN":{POS: NOUN}, "
名詞,普通名詞,形状詞可能,*,ADJ":{POS: ADJ}, "名詞,普通名詞,助数詞可能,*":{POS: NOUN}, # counter / unit "名詞,普通名詞,副詞可能,*":{POS: NOUN}, "連体詞,*,*,*":{POS: ADJ}, # XXX this has exceptions based on literal token "連体詞,*,*,*,ADJ":{POS: ADJ}, "連体詞,*,*,*,PRON":{POS: PRON}, "連体詞,*,*,*,DET":{POS: DET}, }
elkingtowa/pyrake
tests/test_spidermiddleware_urllength.py
Python
mit
685
0.00146
from unittest import TestCase from pyrake.contrib.spidermiddleware.urllength import UrlLengthMiddleware from pyrake.http import Response, Request from pyrake.spider import Spider class TestUrlLengthMiddleware(TestCase): def test_process_spider_output(self):
res = Response('http://pyraketest.org') short_url_req = Request('http://pyraketest.org/') long_url_req = Request('http://pyraketest.org/this_is_a_long_url')
reqs = [short_url_req, long_url_req] mw = UrlLengthMiddleware(maxlength=25) spider = Spider('foo') out = list(mw.process_spider_output(res, reqs, spider)) self.assertEquals(out, [short_url_req])
jamslevy/gsoc
app/soc/models/priority_group.py
Python
apache-2.0
1,130
0.004425
#!/usr/bin/python2.5 # # Copyright 2009 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the speci
fic language governing perm
issions and # limitations under the License. """This module contains the Priority Group Model.""" __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.ext import db from django.utils.translation import ugettext from soc.models import linkable class PriorityGroup(linkable.Linkable): """The PriorityGroup model. """ #: the priority of this group, 0 being lower than 1 priority = db.IntegerProperty(required=False, default=0) #: the human readable name of this priority gropu name = db.StringProperty(required=False)
samuelshaner/openmc
docs/source/conf.py
Python
mit
7,909
0.006448
# -*- coding: utf-8 -*- # # metasci documentation build configuration file, created by # sphinx-quickstart on Sun Feb 7 22:29:49 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # Determine if we're on Read the Docs server on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # On Read the Docs, we need to mock a few third-party modules so we don't get # ImportErrors when building documentation try: from unittest.mock import MagicMock except ImportError: from mock import Mock as MagicMock MOCK_MODULES = ['numpy', 'numpy.polynomial', 'numpy.polynomial.polynomial', 'h5py', 'pandas', 'opencg'] sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES) import numpy as np np.polynomial.Polynomial = MagicMock # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../sphinxext')) sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinx_numfig', 'notebook_sphinxext'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'OpenMC' copyright = u'2011-2016, Massachusetts Institute of Technology' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "0.8" # The full version, including alpha/beta/rc tags. release = "0.8.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They
are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' #pygments_style = 'friendly' #pygments_style = 'bw' #pygments_style = 'fruity' #pygments_style = 'manni' pygments_style = 'tango' # A list of ignored prefixes for module index sorti
ng. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages if not on_rtd: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_logo = '_images/openmc_logo.png' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "OpenMC Documentation" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] def setup(app): app.add_stylesheet('theme_overrides.css') # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'openmcdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'openmc.tex', u'OpenMC Documentation', u'Massachusetts Institute of Technology', 'manual'), ] latex_elements = { 'preamble': r""" \usepackage{enumitem} \usepackage{amsfonts} \usepackage{amsmath} \setlistdepth{99} \usepackage{tikz} \usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy} \usepackage{fixltx2e} \hypersetup{bookmarksdepth=3} \setcounter{tocdepth}{2} \numberwithin{equation}{section} """, 'printindex': r"" } # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True #Autodocumentation Flags #autodoc_member_order = "groupwise" #autoclass_content = "both" autosummary_generate = True napoleon_use_ivar = True intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None), 'matplotlib': ('http://matplotlib.org/', None) }
adobe-mds/dcos-cassandra-service
integration/tests/infinity_commons.py
Python
apache-2.0
1,791
0.002233
import json import shakedown import dcos from dcos import marathon from enum import Enum from tests.command import ( cassandra_api_url, spin, WAIT_TIME_IN_SECONDS ) class PlanState(Enum): ERROR = "ERROR" WAITING = "WAITING" PENDING = "PENDING" IN_PROGRESS = "IN_PROGRESS" COMPLETE = "COMPLETE" def filter_phase(plan, phase_name): for phase in plan['phases']: if phase['name'] == phase_name: return phase return None def get_phase_index(plan, phase_name): idx = 0 for phase in plan['phases']: if phase['name'] == phase_name: return idx else: idx += 1 return -1 counter = 0 def get_and_verify_plan(predicate=lambda r: True, wait_time=WAIT_TIME_IN_SECONDS): global counter plan_url = cassandra_api_url('plan') def fn(): try: return dcos.http.get(plan_url) except dcos.errors.DCOSHTTPException as err: return err.response def success_predicate(result): global counter message = 'Request to {} failed'.format(plan_url) try: body = resu
lt.json() except ValueError: return False, message if counter < 3: counter += 1 pred_res = predicate(body) if pred_res: counter = 0 return pred_res, message return spin(fn, success_predicate, wait_time=wait_time).json() def get_marathon_uri(): """Gets URL to the Marathon instance""" return '{}/mar
athon'.format(shakedown.dcos_url()) def get_marathon_client(): """Gets a marathon client""" return marathon.Client(get_marathon_uri()) def strip_meta(app): app.pop('fetch') app.pop('version') app.pop('versionInfo') return app
MartinOehler/LINBO-ServerGUI
linboweb/linboserver/forms.py
Python
gpl-2.0
1,587
0.006931
# Author: Martin Oehler <oehler@knopper.net> 2013 # License: GPL V2 from django.forms import ModelForm from django.forms import Form from django.forms import ModelChoiceField from django.forms.widgets import RadioSelect from django.forms.widgets import CheckboxSelectMultiple from django.forms.widgets
import TextInput from django.for
ms.widgets import Textarea from django.forms.widgets import DateInput from django.contrib.admin import widgets from linboweb.linboserver.models import partition from linboweb.linboserver.models import partitionSelection from linboweb.linboserver.models import os from linboweb.linboserver.models import vm from linboweb.linboserver.models import client from linboweb.linboserver.models import clientGroup from linboweb.linboserver.models import pxelinuxcfg class partitionForm(ModelForm): class Meta: model = partition class partitionSelectionForm(ModelForm): class Meta: model = partitionSelection class osForm(ModelForm): partitionselection = ModelChoiceField(queryset=partitionSelection.objects.all()) class Meta: model = os class vmForm(ModelForm): class Meta: model = vm class clientForm(ModelForm): pxelinuxconfiguration = ModelChoiceField(queryset=pxelinuxcfg.objects.all()) class Meta: model = client class clientGroupForm(ModelForm): class Meta: model = clientGroup class pxelinuxcfgForm(ModelForm): class Meta: model = pxelinuxcfg widgets = { 'configuration': Textarea(attrs={'cols': 80, 'rows': 40}), }